Пример #1
0
/*
 * Function:	set_bootfs
 * Description:	Sets the bootfs property on the boot pool to be the
 *		root dataset of the activated BE.
 * Parameters:
 *		boot_pool - The pool we're setting bootfs in.
 *		be_root_ds - The main dataset for the BE.
 * Return:
 *		BE_SUCCESS - Success
 *		be_errno_t - Failure
 * Scope:
 *		Private
 */
static int
set_bootfs(char *boot_rpool, char *be_root_ds)
{
	zpool_handle_t *zhp;
	int err = BE_SUCCESS;

	if ((zhp = zpool_open(g_zfs, boot_rpool)) == NULL) {
		be_print_err(gettext("set_bootfs: failed to open pool "
		    "(%s): %s\n"), boot_rpool, libzfs_error_description(g_zfs));
		err = zfs_err_to_be_err(g_zfs);
		return (err);
	}

	err = zpool_set_prop(zhp, "bootfs", be_root_ds);
	if (err) {
		be_print_err(gettext("set_bootfs: failed to set "
		    "bootfs property for pool %s: %s\n"), boot_rpool,
		    libzfs_error_description(g_zfs));
		err = zfs_err_to_be_err(g_zfs);
		zpool_close(zhp);
		return (err);
	}

	zpool_close(zhp);
	return (BE_SUCCESS);
}
Пример #2
0
static void
zfs_verror(libzfs_handle_t *hdl, int error, const char *fmt, va_list ap)
{
	(void) vsnprintf(hdl->libzfs_action, sizeof (hdl->libzfs_action),
	    fmt, ap);
	hdl->libzfs_error = error;

	if (hdl->libzfs_desc_active)
		hdl->libzfs_desc_active = 0;
	else
		hdl->libzfs_desc[0] = '\0';

	if (hdl->libzfs_printerr) {
		if (error == EZFS_UNKNOWN) {
			(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "internal "
			    "error: %s\n"), libzfs_error_description(hdl));
			abort();
		}

		(void) fprintf(stderr, "%s: %s\n", hdl->libzfs_action,
		    libzfs_error_description(hdl));
		if (error == EZFS_NOMEM)
			exit(1);
	}
}
Пример #3
0
/* Remove a home directory structure */
int
rm_homedir(char *dir, int flags)
{
	struct stat stbuf;
	char *nm, *rp;

	rp = realpath(dir, NULL);
	if (rp && (strcmp(rp, "/") == 0)) {
		return (0);
	}

	if ((stat(dir, &stbuf) != 0) || !S_ISDIR(stbuf.st_mode))
		return (0);

	if ((strcmp(stbuf.st_fstype, MNTTYPE_ZFS) == 0) &&
	    (flags & MANAGE_ZFS)) {
		if (g_zfs == NULL)
			g_zfs = libzfs_init();

		if (g_zfs == NULL) {
			errmsg(M_OOPS, "libzfs_init failure", strerror(errno));
			return (EX_HOMEDIR);
		}

		if ((nm = get_mnt_special(dir, stbuf.st_fstype)) != NULL) {
			zfs_handle_t *zhp;

			if ((zhp = zfs_open(g_zfs, nm, ZFS_TYPE_FILESYSTEM))
			    != NULL) {
				if ((zfs_unmount(zhp, NULL, 0) == 0) &&
				    (zfs_destroy(zhp, B_FALSE) == 0)) {
					zfs_close(zhp);
					libzfs_fini(g_zfs);
					g_zfs = NULL;
					return (0);
				}

				errmsg(M_OOPS, "destroy the home directory",
				    libzfs_error_description(g_zfs));

				(void) zfs_mount(zhp, NULL, 0);
				zfs_close(zhp);

				libzfs_fini(g_zfs);
				g_zfs = NULL;
				return (EX_HOMEDIR);
			}
		}
	}

	(void) sprintf(cmdbuf, "rm -rf %s", dir);

	if (g_zfs != NULL) {
		libzfs_fini(g_zfs);
		g_zfs = NULL;
	}

	return (system(cmdbuf));
}
Пример #4
0
/*
 * Function:	be_get_zone_be_list
 * Description:	Finds all the BEs for this zone on the system.
 * Parameters:
 *		zone_be_name - The name of the BE to look up.
 *              zone_be_container_ds - The dataset for the zone.
 *		zbe_nodes - A reference pointer to the list of BEs. The list
 *			   structure will be allocated here and must
 *			   be freed by a call to be_free_list. If there are no
 *			   BEs found on the system this reference will be
 *			   set to NULL.
 * Return:
 *		BE_SUCCESS - Success
 *		be_errno_t - Failure
 * Scope:
 *		Semi-private (library wide use only)
 */
int
be_get_zone_be_list(
/* LINTED */
	char *zone_be_name,
	char *zone_be_container_ds,
	be_node_list_t **zbe_nodes)
{
	zfs_handle_t *zhp = NULL;
	list_callback_data_t cb = { 0 };
	int ret = BE_SUCCESS;

	if (zbe_nodes == NULL)
		return (BE_ERR_INVAL);

	if (!zfs_dataset_exists(g_zfs, zone_be_container_ds,
	    ZFS_TYPE_FILESYSTEM)) {
		return (BE_ERR_BE_NOENT);
	}

	zone_be = B_TRUE;

	if ((zhp = zfs_open(g_zfs, zone_be_container_ds,
	    ZFS_TYPE_FILESYSTEM)) == NULL) {
		be_print_err(gettext("be_get_zone_be_list: failed to open "
		    "the zone BE dataset %s: %s\n"), zone_be_container_ds,
		    libzfs_error_description(g_zfs));
		ret = zfs_err_to_be_err(g_zfs);
		goto cleanup;
	}

	(void) strcpy(be_container_ds, zone_be_container_ds);

	if (cb.be_nodes_head == NULL) {
		if ((cb.be_nodes_head = be_list_alloc(&ret,
		    sizeof (be_node_list_t))) == NULL) {
			ZFS_CLOSE(zhp);
			goto cleanup;
		}
		cb.be_nodes = cb.be_nodes_head;
	}
	if (ret == 0)
		ret = zfs_iter_filesystems(zhp, be_add_children_callback, &cb);
	ZFS_CLOSE(zhp);

	*zbe_nodes = cb.be_nodes_head;

cleanup:
	zone_be = B_FALSE;

	return (ret);
}
Пример #5
0
/*
 * Create a snapshot on the volume
 */
int
snapshot_create(char *volname, char *jname, boolean_t recursive,
    boolean_t hold)
{
	char snapname[ZFS_MAXNAMELEN];
	int rv;

	if (!volname || !*volname)
		return (-1);

	(void) snprintf(snapname, ZFS_MAXNAMELEN, "%s@%s", volname, jname);

	(void) mutex_lock(&zlib_mtx);
	if ((rv = zfs_snapshot(zlibh, snapname, recursive, NULL))
	    == -1) {
		if (errno == EEXIST) {
			(void) mutex_unlock(&zlib_mtx);
			return (0);
		}
		NDMP_LOG(LOG_DEBUG,
		    "snapshot_create: %s failed (err=%d): %s",
		    snapname, errno, libzfs_error_description(zlibh));
		(void) mutex_unlock(&zlib_mtx);
		return (rv);
	}
	if (hold && snapshot_hold(volname, snapname, jname, recursive) != 0) {
		NDMP_LOG(LOG_DEBUG,
		    "snapshot_create: %s hold failed (err=%d): %s",
		    snapname, errno, libzfs_error_description(zlibh));
		(void) mutex_unlock(&zlib_mtx);
		return (-1);
	}

	(void) mutex_unlock(&zlib_mtx);
	return (0);
}
Пример #6
0
void Destroy::find(const std::string &root)
{
    zfs_handle_t *hzfs = zfs_open(m_hlib, root.c_str(),
        ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);

    if (!hzfs) {
        std::cerr << root << ": " << libzfs_error_description(m_hlib)
            << std::endl;
        return;
    }

    zfs_iter_snapshots(hzfs, B_FALSE, iter_dataset, this);

    if (m_recursive) {
        zfs_iter_filesystems(hzfs, iter_dataset, this);
    }

    zfs_close(hzfs);
}
Пример #7
0
/*
 * Target is the dataset whose pool we want to open.
 */
static void
zhack_import(char *target, boolean_t readonly)
{
	nvlist_t *config;
	nvlist_t *props;
	int error;

	kernel_init(readonly ? FREAD : (FREAD | FWRITE));
	g_zfs = libzfs_init();
	ASSERT(g_zfs != NULL);

	dmu_objset_register_type(DMU_OST_ZFS, space_delta_cb);

	g_readonly = readonly;
	g_importargs.unique = B_TRUE;
	g_importargs.can_be_active = readonly;
	g_pool = strdup(target);

	error = zpool_tryimport(g_zfs, target, &config, &g_importargs);
	if (error)
		fatal(NULL, FTAG, "cannot import '%s': %s", target,
		    libzfs_error_description(g_zfs));

	props = NULL;
	if (readonly) {
		VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
		VERIFY(nvlist_add_uint64(props,
		    zpool_prop_to_name(ZPOOL_PROP_READONLY), 1) == 0);
	}

	zfeature_checks_disable = B_TRUE;
	error = spa_import(target, config, props,
	    (readonly ?  ZFS_IMPORT_SKIP_MMP : ZFS_IMPORT_NORMAL));
	zfeature_checks_disable = B_FALSE;
	if (error == EEXIST)
		error = 0;

	if (error)
		fatal(NULL, FTAG, "can't import '%s': %s", target,
		    strerror(error));
}
Пример #8
0
void Destroy::destroy(const std::string &name)
{
    if (m_verbose)
        std::cout << "    "
            << (m_dry_run ? "would destroy : " : "destroying    : ") << name
            << "\n";

    if (m_dry_run)
        return;

    zfs_handle_t *hzfs = zfs_open(m_hlib, name.c_str(), ZFS_TYPE_SNAPSHOT);

    if (!hzfs) {
        std::cerr << name << ": " << libzfs_error_description(m_hlib)
            << std::endl;
        return;
    }

    zfs_destroy(hzfs, m_defer ? B_TRUE : B_FALSE);

    zfs_close(hzfs);
}
Пример #9
0
/*
 * Function:	set_canmount
 * Description:	Sets the canmount property on the datasets of the
 *		activated BE.
 * Parameters:
 *		be_nodes - The be_node_t returned from be_list
 *		value - The value of canmount we setting, on|off|noauto.
 * Return:
 *		BE_SUCCESS - Success
 *		be_errno_t - Failure
 * Scope:
 *		Private
 */
static int
set_canmount(be_node_list_t *be_nodes, char *value)
{
	char		ds_path[MAXPATHLEN];
	zfs_handle_t	*zhp = NULL;
	be_node_list_t	*list = be_nodes;
	int		err = BE_SUCCESS;

	while (list != NULL) {
		be_dataset_list_t *datasets = list->be_node_datasets;

		be_make_root_ds(list->be_rpool, list->be_node_name, ds_path,
		    sizeof (ds_path));

		if ((zhp = zfs_open(g_zfs, ds_path, ZFS_TYPE_DATASET)) ==
		    NULL) {
			be_print_err(gettext("set_canmount: failed to open "
			    "dataset (%s): %s\n"), ds_path,
			    libzfs_error_description(g_zfs));
			err = zfs_err_to_be_err(g_zfs);
			return (err);
		}
		if (zfs_prop_get_int(zhp, ZFS_PROP_MOUNTED)) {
			/*
			 * it's already mounted so we can't change the
			 * canmount property anyway.
			 */
			err = BE_SUCCESS;
		} else {
			err = zfs_prop_set(zhp,
			    zfs_prop_to_name(ZFS_PROP_CANMOUNT), value);
			if (err) {
				ZFS_CLOSE(zhp);
				be_print_err(gettext("set_canmount: failed to "
				    "set dataset property (%s): %s\n"),
				    ds_path, libzfs_error_description(g_zfs));
				err = zfs_err_to_be_err(g_zfs);
				return (err);
			}
		}
		ZFS_CLOSE(zhp);

		while (datasets != NULL) {
			be_make_root_ds(list->be_rpool,
			    datasets->be_dataset_name, ds_path,
			    sizeof (ds_path));

			if ((zhp = zfs_open(g_zfs, ds_path, ZFS_TYPE_DATASET))
			    == NULL) {
				be_print_err(gettext("set_canmount: failed to "
				    "open dataset %s: %s\n"), ds_path,
				    libzfs_error_description(g_zfs));
				err = zfs_err_to_be_err(g_zfs);
				return (err);
			}
			if (zfs_prop_get_int(zhp, ZFS_PROP_MOUNTED)) {
				/*
				 * it's already mounted so we can't change the
				 * canmount property anyway.
				 */
				err = BE_SUCCESS;
				ZFS_CLOSE(zhp);
				break;
			}
			err = zfs_prop_set(zhp,
			    zfs_prop_to_name(ZFS_PROP_CANMOUNT), value);
			if (err) {
				ZFS_CLOSE(zhp);
				be_print_err(gettext("set_canmount: "
				    "Failed to set property value %s "
				    "for dataset %s: %s\n"), value, ds_path,
				    libzfs_error_description(g_zfs));
				err = zfs_err_to_be_err(g_zfs);
				return (err);
			}
			ZFS_CLOSE(zhp);
			datasets = datasets->be_next_dataset;
		}
		list = list->be_next_node;
	}
	return (err);
}
Пример #10
0
/*
 * Function:	_be_list
 * Description:	This does the actual work described in be_list.
 * Parameters:
 *		be_name - The name of the BE to look up.
 *			  If NULL a list of all BEs will be returned.
 *		be_nodes - A reference pointer to the list of BEs. The list
 *			   structure will be allocated here and must
 *			   be freed by a call to be_free_list. If there are no
 *			   BEs found on the system this reference will be
 *			   set to NULL.
 * Return:
 *		BE_SUCCESS - Success
 *		be_errno_t - Failure
 * Scope:
 *		Semi-private (library wide use only)
 */
int
_be_list(char *be_name, be_node_list_t **be_nodes)
{
	list_callback_data_t cb = { 0 };
	be_transaction_data_t bt = { 0 };
	int ret = BE_SUCCESS;
	zpool_handle_t *zphp;
	char *rpool = NULL;
	struct be_defaults be_defaults;

	if (be_nodes == NULL)
		return (BE_ERR_INVAL);

	be_get_defaults(&be_defaults);

	if (be_find_current_be(&bt) != BE_SUCCESS) {
		/*
		 * We were unable to find a currently booted BE which
		 * probably means that we're not booted in a BE envoronment.
		 * None of the BE's will be marked as the active BE.
		 */
		(void) strcpy(cb.current_be, "-");
	} else {
		(void) strncpy(cb.current_be, bt.obe_name,
		    sizeof (cb.current_be));
		rpool = bt.obe_zpool;
	}

	/*
	 * If be_name is NULL we'll look for all BE's on the system.
	 * If not then we will only return data for the specified BE.
	 */
	if (be_name != NULL)
		cb.be_name = strdup(be_name);

	if (be_defaults.be_deflt_rpool_container && rpool != NULL) {
		if ((zphp = zpool_open(g_zfs, rpool)) == NULL) {
			be_print_err(gettext("be_list: failed to "
			    "open rpool (%s): %s\n"), rpool,
			    libzfs_error_description(g_zfs));
			free(cb.be_name);
			return (zfs_err_to_be_err(g_zfs));
		}

		ret = be_get_list_callback(zphp, &cb);
	} else {
		if ((zpool_iter(g_zfs, be_get_list_callback, &cb)) != 0) {
			if (cb.be_nodes_head != NULL) {
				be_free_list(cb.be_nodes_head);
				cb.be_nodes_head = NULL;
				cb.be_nodes = NULL;
			}
			ret = BE_ERR_BE_NOENT;
		}
	}

	if (cb.be_nodes_head == NULL) {
		if (be_name != NULL)
			be_print_err(gettext("be_list: BE (%s) does not "
			    "exist\n"), be_name);
		else
			be_print_err(gettext("be_list: No BE's found\n"));
		ret = BE_ERR_BE_NOENT;
	}

	*be_nodes = cb.be_nodes_head;

	free(cb.be_name);

	be_sort_list(be_nodes);

	return (ret);
}
Пример #11
0
/*
 * Remove and release the backup snapshot
 */
int
snapshot_destroy(char *volname, char *jname, boolean_t recursive,
    boolean_t hold, int *zfs_err)
{
	char snapname[ZFS_MAXNAMELEN];
	zfs_handle_t *zhp;
	zfs_type_t ztype;
	char *namep;
	int err;

	if (zfs_err)
		*zfs_err = 0;

	if (!volname || !*volname)
		return (-1);

	if (recursive) {
		ztype = ZFS_TYPE_VOLUME | ZFS_TYPE_FILESYSTEM;
		namep = volname;
	} else {
		(void) snprintf(snapname, ZFS_MAXNAMELEN, "%s@%s", volname,
		    jname);
		namep = snapname;
		ztype = ZFS_TYPE_SNAPSHOT;
	}

	(void) mutex_lock(&zlib_mtx);
	if (hold &&
	    snapshot_release(volname, namep, jname, recursive) != 0) {
		NDMP_LOG(LOG_DEBUG,
		    "snapshot_destroy: %s release failed (err=%d): %s",
		    namep, errno, libzfs_error_description(zlibh));
		(void) mutex_unlock(&zlib_mtx);
		return (-1);
	}

	if ((zhp = zfs_open(zlibh, namep, ztype)) == NULL) {
		NDMP_LOG(LOG_DEBUG, "snapshot_destroy: open %s failed",
		    namep);
		(void) mutex_unlock(&zlib_mtx);
		return (-1);
	}

	if (recursive) {
		err = zfs_destroy_snaps(zhp, jname, B_TRUE);
	} else {
		err = zfs_destroy(zhp, B_TRUE);
	}

	if (err) {
		NDMP_LOG(LOG_ERR, "%s (recursive destroy: %d): %d; %s; %s",
		    namep,
		    recursive,
		    libzfs_errno(zlibh),
		    libzfs_error_action(zlibh),
		    libzfs_error_description(zlibh));

		if (zfs_err)
			*zfs_err = err;
	}

	zfs_close(zhp);
	(void) mutex_unlock(&zlib_mtx);

	return (0);
}
Пример #12
0
/*
	Create a home directory and populate with files from skeleton
	directory.
*/
int
create_home(char *homedir, char *skeldir, uid_t uid, gid_t gid, int newfs)
		/* home directory to create */
		/* skel directory to copy if indicated */
		/* uid of new user */
		/* group id of new user */
		/* allow filesystem creation */
{
	struct stat stbuf;
	char *dname, *bname;
	char *dataset;

	if (g_zfs == NULL)
		g_zfs = libzfs_init();

	(void) strcpy(dhome, homedir);
	(void) strcpy(bhome, homedir);
	dname = dirname(dhome);
	bname = basename(bhome);

	(void) strcpy(pdir, dname);
	if ((stat(pdir, &stbuf) != 0) || !S_ISDIR(stbuf.st_mode)) {
		errmsg(M_OOPS, "access the parent directory", strerror(errno));
		return (EX_HOMEDIR);
	}

	if (strcmp(stbuf.st_fstype, MNTTYPE_AUTOFS) == 0) {
		(void) strcpy(pdir, EXPORTDIR);
		(void) strlcat(pdir, dname, PATH_MAX + 1);
		(void) snprintf(homedir, PATH_MAX + 1, "%s/%s", pdir, bname);
		if (stat(pdir, &stbuf) == 0)
			(void) edit_autofs_home(bname, bname, pdir);
	}

	if ((strcmp(stbuf.st_fstype, MNTTYPE_ZFS) == 0) &&
	    (g_zfs != NULL) && newfs &&
	    ((dataset = get_mnt_special(pdir, stbuf.st_fstype)) != NULL)) {
		char nm[ZFS_MAXNAMELEN];
		zfs_handle_t *zhp;

	    	(void) snprintf(nm, ZFS_MAXNAMELEN, "%s/%s", dataset, bname);

		if ((zfs_create(g_zfs, nm, ZFS_TYPE_FILESYSTEM, NULL) != 0) ||
	    	    ((zhp = zfs_open(g_zfs, nm, ZFS_TYPE_FILESYSTEM)) ==
		    NULL)) {
			errmsg(M_OOPS, "create the home directory",
			    libzfs_error_description(g_zfs));
			return (EX_HOMEDIR);
		}

		if (zfs_mount(zhp, NULL, 0) != 0) {
			errmsg(M_OOPS, "mount the home directory",
			    libzfs_error_description(g_zfs));
			(void) zfs_destroy(zhp, B_FALSE);
			return (EX_HOMEDIR);
		}

		zfs_close(zhp);

		if (chmod(homedir, S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH) != 0) {
			errmsg(M_OOPS, "change permissions of home directory",
			    strerror(errno));
			return (EX_HOMEDIR);
		}
	} else {
		if (mkdir(homedir, S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH) != 0) {
			errmsg(M_OOPS, "create the home directory",
			    strerror(errno));
			return (EX_HOMEDIR);
		}
	}

	if( chown(homedir, uid, gid) != 0 ) {
		errmsg(M_OOPS, "change ownership of home directory", 
		    strerror(errno));
		return( EX_HOMEDIR );
	}

	if(skeldir) {
		/* copy the skel_dir into the home directory */
		(void) sprintf( cmdbuf, "cd %s && find . -print | cpio -pd %s",
			skeldir, homedir);

		if( system( cmdbuf ) != 0 ) {
			errmsg(M_OOPS, "copy skeleton directory into home "
			    "directory", strerror(errno));
			(void) rm_homedir( homedir );
			return( EX_HOMEDIR );
		}

		/* make sure contents in the home dirctory have correct owner */
		(void) sprintf( cmdbuf,"cd %s && find . -exec chown %ld {} \\;",
			homedir, uid );
		if( system( cmdbuf ) != 0) {
			errmsg(M_OOPS, "change owner of files home directory",
			    strerror(errno));

			(void) rm_homedir( homedir );
			return( EX_HOMEDIR );
		}

		/* and group....... */
		(void) sprintf( cmdbuf, "cd %s && find . -exec chgrp %ld {} \\;",
			homedir, gid );
		if( system( cmdbuf ) != 0) {
			errmsg(M_OOPS, "change group of files home directory",
			    strerror(errno));
			(void) rm_homedir( homedir );
			return( EX_HOMEDIR );
		}
	}
	return( EX_SUCCESS );
}
Пример #13
0
/*
 * Create a home directory and populate with files from skeleton
 * directory.
 */
int
create_home(char *homedir, char *skeldir, uid_t uid, gid_t gid, int flags)
		/* home directory to create */
		/* skel directory to copy if indicated */
		/* uid of new user */
		/* group id of new user */
		/* miscellaneous flags */
{
	struct stat stbuf;
	char *dataset;
	char *dname, *bname, *rp;
	int created_fs = 0;

	rp = realpath(homedir, NULL);
	if (rp && (strcmp(rp, "/") == 0)) {
		return (EX_HOMEDIR);
	}

	(void) strcpy(dhome, homedir);
	(void) strcpy(bhome, homedir);
	dname = dirname(dhome);
	bname = basename(bhome);
	(void) strcpy(pdir, dname);

	if ((stat(pdir, &stbuf) != 0) || !S_ISDIR(stbuf.st_mode)) {
		errmsg(M_OOPS, "access the parent directory", strerror(errno));
		return (EX_HOMEDIR);
	}

	if ((strcmp(stbuf.st_fstype, MNTTYPE_ZFS) == 0) &&
	    (flags & MANAGE_ZFS)) {
		if (g_zfs == NULL)
			g_zfs = libzfs_init();
		if (g_zfs == NULL) {
			errmsg(M_OOPS, "libzfs_init failure", strerror(errno));
			return (EX_HOMEDIR);
		}
		if ((dataset = get_mnt_special(pdir, stbuf.st_fstype))
		    != NULL) {
			char nm[ZFS_MAX_DATASET_NAME_LEN];
			zfs_handle_t *zhp;

			(void) snprintf(nm, sizeof (nm), "%s/%s",
			    dataset, bname);

			if ((zfs_create(g_zfs, nm, ZFS_TYPE_FILESYSTEM, NULL)
				!= 0) ||
			    ((zhp = zfs_open(g_zfs, nm, ZFS_TYPE_FILESYSTEM)) ==
			    NULL)) {
				errmsg(M_OOPS, "create the home directory",
				    libzfs_error_description(g_zfs));
				libzfs_fini(g_zfs);
				g_zfs = NULL;
				return (EX_HOMEDIR);
			}

			if (zfs_mount(zhp, NULL, 0) != 0) {
				errmsg(M_OOPS, "mount the home directory",
				    libzfs_error_description(g_zfs));
				(void) zfs_destroy(zhp, B_FALSE);
				zfs_close(zhp);
				libzfs_fini(g_zfs);
				g_zfs = NULL;
				return (EX_HOMEDIR);
			}

			zfs_close(zhp);

			if (chmod(homedir,
				S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH) != 0) {
				errmsg(M_OOPS,
				    "change permissions of home directory",
				    strerror(errno));
				libzfs_fini(g_zfs);
				g_zfs = NULL;
				return (EX_HOMEDIR);
			}

			created_fs = 1;
		} else {
			errmsg(M_NO_ZFS_MOUNTPOINT, pdir);
		}
	}

	if (!created_fs) {
		if (mkdir(homedir, S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH)
			!= 0) {
			errmsg(M_OOPS, "create the home directory",
			    strerror(errno));
			if (g_zfs != NULL) {
				libzfs_fini(g_zfs);
				g_zfs = NULL;
			}
			return (EX_HOMEDIR);
		}
	}

	if (chown(homedir, uid, gid) != 0) {
		errmsg(M_OOPS, "change ownership of home directory",
		    strerror(errno));
		if (g_zfs != NULL) {
			libzfs_fini(g_zfs);
			g_zfs = NULL;
		}
		return (EX_HOMEDIR);
	}

	if (skeldir != NULL) {
		/* copy the skel_dir into the home directory */
		(void) sprintf(cmdbuf, "cd %s && find . -print | cpio -pd %s",
			skeldir, homedir);

		if (system(cmdbuf) != 0) {
			errmsg(M_OOPS, "copy skeleton directory into home "
			    "directory", strerror(errno));
			(void) rm_homedir(homedir, flags);
			if (g_zfs != NULL) {
				libzfs_fini(g_zfs);
				g_zfs = NULL;
			}
			return (EX_HOMEDIR);
		}

		/* make sure contents in the home dirctory have correct owner */
		(void) sprintf(cmdbuf,
		    "cd %s && find . -exec chown %ld:%ld {} \\;",
		    homedir, uid, gid);
		if (system(cmdbuf) != 0) {
			errmsg(M_OOPS,
			    "change owner and group of files home directory",
			    strerror(errno));
			(void) rm_homedir(homedir, flags);
			if (g_zfs != NULL) {
				libzfs_fini(g_zfs);
				g_zfs = NULL;
			}
			return (EX_HOMEDIR);
		}

	}
	if (g_zfs != NULL) {
		libzfs_fini(g_zfs);
		g_zfs = NULL;
	}
	return (EX_SUCCESS);
}
Пример #14
0
/*
 * Function:	be_get_node_data
 * Description:	Helper function used to collect all the information to fill
 *		in the be_node_list structure to be returned by be_list.
 * Parameters:
 *		zhp - Handle to the root dataset for the BE whose information
 *		      we're collecting.
 *		be_node - a pointer to the node structure we're filling in.
 *		be_name - The BE name of the node whose information we're
 *		          collecting.
 *		current_be - the name of the currently active BE.
 *		be_ds - The dataset name for the BE.
 *
 * Returns:
 *		BE_SUCCESS - Success
 *		be_errno_t - Failure
 * Scope:
 *		Private
 */
static int
be_get_node_data(
	zfs_handle_t *zhp,
	be_node_list_t *be_node,
	char *be_name,
	const char *rpool,
	char *current_be,
	char *be_ds)
{
	char prop_buf[MAXPATHLEN];
	nvlist_t *userprops = NULL;
	nvlist_t *propval = NULL;
	nvlist_t *zone_propval = NULL;
	char *prop_str = NULL;
	char *zone_prop_str = NULL;
	char *grub_default_bootfs = NULL;
	zpool_handle_t *zphp = NULL;
	int err = 0;

	if (be_node == NULL || be_name == NULL || current_be == NULL ||
	    be_ds == NULL) {
		be_print_err(gettext("be_get_node_data: invalid arguments, "
		    "can not be NULL\n"));
		return (BE_ERR_INVAL);
	}

	errno = 0;

	be_node->be_root_ds = strdup(be_ds);
	if ((err = errno) != 0 || be_node->be_root_ds == NULL) {
		be_print_err(gettext("be_get_node_data: failed to "
		    "copy root dataset name\n"));
		return (errno_to_be_err(err));
	}

	be_node->be_node_name = strdup(be_name);
	if ((err = errno) != 0 || be_node->be_node_name == NULL) {
		be_print_err(gettext("be_get_node_data: failed to "
		    "copy BE name\n"));
		return (errno_to_be_err(err));
	}
	if (strncmp(be_name, current_be, MAXPATHLEN) == 0)
		be_node->be_active = B_TRUE;
	else
		be_node->be_active = B_FALSE;

	be_node->be_rpool = strdup(rpool);
	if (be_node->be_rpool == NULL || (err = errno) != 0) {
		be_print_err(gettext("be_get_node_data: failed to "
		    "copy root pool name\n"));
		return (errno_to_be_err(err));
	}

	be_node->be_space_used = zfs_prop_get_int(zhp, ZFS_PROP_USED);

	if (getzoneid() == GLOBAL_ZONEID) {
		if ((zphp = zpool_open(g_zfs, rpool)) == NULL) {
			be_print_err(gettext("be_get_node_data: failed to open "
			    "pool (%s): %s\n"), rpool,
			    libzfs_error_description(g_zfs));
			return (zfs_err_to_be_err(g_zfs));
		}

		(void) zpool_get_prop(zphp, ZPOOL_PROP_BOOTFS, prop_buf,
		    ZFS_MAXPROPLEN, NULL, B_FALSE);
		if (be_has_grub() && (be_default_grub_bootfs(rpool,
		    &grub_default_bootfs) == BE_SUCCESS) &&
		    grub_default_bootfs != NULL)
			if (strcmp(grub_default_bootfs, be_ds) == 0)
				be_node->be_active_on_boot = B_TRUE;
			else
				be_node->be_active_on_boot = B_FALSE;
		else if (prop_buf != NULL && strcmp(prop_buf, be_ds) == 0)
			be_node->be_active_on_boot = B_TRUE;
		else
			be_node->be_active_on_boot = B_FALSE;

		be_node->be_global_active = B_TRUE;

		free(grub_default_bootfs);
		zpool_close(zphp);
	} else {
		if (be_zone_compare_uuids(be_node->be_root_ds))
			be_node->be_global_active = B_TRUE;
		else
			be_node->be_global_active = B_FALSE;
	}

	/*
	 * If the dataset is mounted use the mount point
	 * returned from the zfs_is_mounted call. If the
	 * dataset is not mounted then pull the mount
	 * point information out of the zfs properties.
	 */
	be_node->be_mounted = zfs_is_mounted(zhp,
	    &(be_node->be_mntpt));
	if (!be_node->be_mounted) {
		if (zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, prop_buf,
		    ZFS_MAXPROPLEN, NULL, NULL, 0, B_FALSE) == 0)
			be_node->be_mntpt = strdup(prop_buf);
		else
			return (zfs_err_to_be_err(g_zfs));
	}

	be_node->be_node_creation = (time_t)zfs_prop_get_int(zhp,
	    ZFS_PROP_CREATION);

	/* Get all user properties used for libbe */
	if ((userprops = zfs_get_user_props(zhp)) == NULL) {
		be_node->be_policy_type = strdup(be_default_policy());
	} else {
		if (getzoneid() != GLOBAL_ZONEID) {
			if (nvlist_lookup_nvlist(userprops,
			    BE_ZONE_ACTIVE_PROPERTY, &zone_propval) != 0 ||
			    zone_propval == NULL) {
				be_node->be_active_on_boot = B_FALSE;
			} else {
				verify(nvlist_lookup_string(zone_propval,
				    ZPROP_VALUE, &zone_prop_str) == 0);
				if (strcmp(zone_prop_str, "on") == 0) {
					be_node->be_active_on_boot = B_TRUE;
				} else {
					be_node->be_active_on_boot = B_FALSE;
				}
			}
		}

		if (nvlist_lookup_nvlist(userprops, BE_POLICY_PROPERTY,
		    &propval) != 0 || propval == NULL) {
			be_node->be_policy_type =
			    strdup(be_default_policy());
		} else {
			verify(nvlist_lookup_string(propval, ZPROP_VALUE,
			    &prop_str) == 0);
			if (prop_str == NULL || strcmp(prop_str, "-") == 0 ||
			    strcmp(prop_str, "") == 0)
				be_node->be_policy_type =
				    strdup(be_default_policy());
			else
				be_node->be_policy_type = strdup(prop_str);
		}
		if (getzoneid() != GLOBAL_ZONEID) {
			if (nvlist_lookup_nvlist(userprops,
			    BE_ZONE_PARENTBE_PROPERTY, &propval) != 0 &&
			    nvlist_lookup_string(propval, ZPROP_VALUE,
			    &prop_str) == 0) {
				be_node->be_uuid_str = strdup(prop_str);
			}
		} else {
			if (nvlist_lookup_nvlist(userprops, BE_UUID_PROPERTY,
			    &propval) == 0 && nvlist_lookup_string(propval,
			    ZPROP_VALUE, &prop_str) == 0) {
				be_node->be_uuid_str = strdup(prop_str);
			}
		}
	}

	/*
	 * Increment the dataset counter to include the root dataset
	 * of the BE.
	 */
	be_node->be_node_num_datasets++;

	return (BE_SUCCESS);
}
Пример #15
0
/*
 * Function:	be_get_list_callback
 * Description:	Callback function used by zfs_iter to look through all
 *		the pools on the system looking for BEs. If a BE name was
 *		specified only that BE's information will be collected and
 *		returned.
 * Parameters:
 *		zlp - handle to the first zfs dataset. (provided by the
 *		      zfs_iter_* call)
 *		data - pointer to the callback data and where we'll pass
 *		       the BE information back.
 * Returns:
 *		0 - Success
 *		be_errno_t - Failure
 * Scope:
 *		Private
 */
static int
be_get_list_callback(zpool_handle_t *zlp, void *data)
{
	list_callback_data_t *cb = (list_callback_data_t *)data;
	char be_ds[MAXPATHLEN];
	char *open_ds = NULL;
	char *rpool = NULL;
	zfs_handle_t *zhp = NULL;
	int ret = 0;

	cb->zpool_name = rpool =  (char *)zpool_get_name(zlp);

	/*
	 * Generate string for the BE container dataset
	 */
	be_make_container_ds(rpool, be_container_ds,
	    sizeof (be_container_ds));

	/*
	 * If a BE name was specified we use it's root dataset in place of
	 * the container dataset. This is because we only want to collect
	 * the information for the specified BE.
	 */
	if (cb->be_name != NULL) {
		if (!be_valid_be_name(cb->be_name))
			return (BE_ERR_INVAL);
		/*
		 * Generate string for the BE root dataset
		 */
		be_make_root_ds(rpool, cb->be_name, be_ds, sizeof (be_ds));
		open_ds = be_ds;
	} else {
		open_ds = be_container_ds;
	}

	/*
	 * Check if the dataset exists
	 */
	if (!zfs_dataset_exists(g_zfs, open_ds,
	    ZFS_TYPE_FILESYSTEM)) {
		/*
		 * The specified dataset does not exist in this pool or
		 * there are no valid BE's in this pool. Try the next zpool.
		 */
		zpool_close(zlp);
		return (0);
	}

	if ((zhp = zfs_open(g_zfs, open_ds, ZFS_TYPE_FILESYSTEM)) == NULL) {
		be_print_err(gettext("be_get_list_callback: failed to open "
		    "the BE dataset %s: %s\n"), open_ds,
		    libzfs_error_description(g_zfs));
		ret = zfs_err_to_be_err(g_zfs);
		zpool_close(zlp);
		return (ret);
	}

	/*
	 * If a BE name was specified we iterate through the datasets
	 * and snapshots for this BE only. Otherwise we will iterate
	 * through the next level of datasets to find all the BE's
	 * within the pool
	 */
	if (cb->be_name != NULL) {
		if (cb->be_nodes_head == NULL) {
			if ((cb->be_nodes_head = be_list_alloc(&ret,
			    sizeof (be_node_list_t))) == NULL) {
				ZFS_CLOSE(zhp);
				zpool_close(zlp);
				return (ret);
			}
			cb->be_nodes = cb->be_nodes_head;
		}

		if ((ret = be_get_node_data(zhp, cb->be_nodes, cb->be_name,
		    rpool, cb->current_be, be_ds)) != BE_SUCCESS) {
			ZFS_CLOSE(zhp);
			zpool_close(zlp);
			return (ret);
		}
		ret = zfs_iter_snapshots(zhp, be_add_children_callback, cb);
	}

	if (ret == 0)
		ret = zfs_iter_filesystems(zhp, be_add_children_callback, cb);
	ZFS_CLOSE(zhp);

	zpool_close(zlp);
	return (ret);
}
Пример #16
0
/*
 * Function:	_be_activate
 * Description:	This does the actual work described in be_activate.
 * Parameters:
 *		be_name - pointer to the name of BE to activate.
 *
 * Return:
 *		BE_SUCCESS - Success
 *		be_errnot_t - Failure
 * Scope:
 *		Public
 */
int
_be_activate(char *be_name)
{
	be_transaction_data_t cb = { 0 };
	zfs_handle_t	*zhp = NULL;
	char		root_ds[MAXPATHLEN];
	char		*cur_vers = NULL, *new_vers = NULL;
	be_node_list_t	*be_nodes = NULL;
	uuid_t		uu = {0};
	int		entry, ret = BE_SUCCESS;
	int		zret = 0;

	/*
	 * TODO: The BE needs to be validated to make sure that it is actually
	 * a bootable BE.
	 */

	if (be_name == NULL)
		return (BE_ERR_INVAL);

	/* Set obe_name to be_name in the cb structure */
	cb.obe_name = be_name;

	/* find which zpool the be is in */
	if ((zret = zpool_iter(g_zfs, be_find_zpool_callback, &cb)) == 0) {
		be_print_err(gettext("be_activate: failed to "
		    "find zpool for BE (%s)\n"), cb.obe_name);
		return (BE_ERR_BE_NOENT);
	} else if (zret < 0) {
		be_print_err(gettext("be_activate: "
		    "zpool_iter failed: %s\n"),
		    libzfs_error_description(g_zfs));
		ret = zfs_err_to_be_err(g_zfs);
		return (ret);
	}

	be_make_root_ds(cb.obe_zpool, cb.obe_name, root_ds, sizeof (root_ds));
	cb.obe_root_ds = strdup(root_ds);

	if (getzoneid() == GLOBAL_ZONEID) {
		if (be_has_grub() && (ret = be_get_grub_vers(&cb, &cur_vers,
		    &new_vers)) != BE_SUCCESS) {
			be_print_err(gettext("be_activate: failed to get grub "
			    "versions from capability files.\n"));
			return (ret);
		}
		if (cur_vers != NULL) {
			/*
			 * We need to check to see if the version number from
			 * the BE being activated is greater than the current
			 * one.
			 */
			if (new_vers != NULL &&
			    atof(cur_vers) < atof(new_vers)) {
				if ((ret = be_do_installgrub(&cb))
				    != BE_SUCCESS) {
					free(new_vers);
					free(cur_vers);
					return (ret);
				}
				free(new_vers);
			}
			free(cur_vers);
		} else if (new_vers != NULL) {
			if ((ret = be_do_installgrub(&cb)) != BE_SUCCESS) {
				free(new_vers);
				return (ret);
			}
			free(new_vers);
		}
		if (!be_has_menu_entry(root_ds, cb.obe_zpool, &entry)) {
			if ((ret = be_append_menu(cb.obe_name, cb.obe_zpool,
			    NULL, NULL, NULL)) != BE_SUCCESS) {
				be_print_err(gettext("be_activate: Failed to "
				    "add BE (%s) to the GRUB menu\n"),
				    cb.obe_name);
				goto done;
			}
		}
		if (be_has_grub()) {
			if ((ret = be_change_grub_default(cb.obe_name,
			    cb.obe_zpool)) != BE_SUCCESS) {
				be_print_err(gettext("be_activate: failed to "
				    "change the default entry in menu.lst\n"));
				goto done;
			}
		}
	}

	if ((ret = _be_list(cb.obe_name, &be_nodes)) != BE_SUCCESS) {
		return (ret);
	}

	if ((ret = set_canmount(be_nodes, "noauto")) != BE_SUCCESS) {
		be_print_err(gettext("be_activate: failed to set "
		    "canmount dataset property\n"));
		goto done;
	}

	if ((ret = set_bootfs(be_nodes->be_rpool, root_ds)) != BE_SUCCESS) {
		be_print_err(gettext("be_activate: failed to set "
		    "bootfs pool property for %s\n"), root_ds);
		goto done;
	}

	if ((zhp = zfs_open(g_zfs, root_ds, ZFS_TYPE_FILESYSTEM)) != NULL) {
		/*
		 * We don't need to close the zfs handle at this
		 * point because The callback funtion
		 * be_promote_ds_callback() will close it for us.
		 */
		if (be_promote_ds_callback(zhp, NULL) != 0) {
			be_print_err(gettext("be_activate: "
			    "failed to activate the "
			    "datasets for %s: %s\n"),
			    root_ds,
			    libzfs_error_description(g_zfs));
			ret = BE_ERR_PROMOTE;
			goto done;
		}
	} else {
		be_print_err(gettext("be_activate:: failed to open "
		    "dataset (%s): %s\n"), root_ds,
		    libzfs_error_description(g_zfs));
		ret = zfs_err_to_be_err(g_zfs);
		goto done;
	}

	if (getzoneid() == GLOBAL_ZONEID &&
	    be_get_uuid(cb.obe_root_ds, &uu) == BE_SUCCESS &&
	    (ret = be_promote_zone_ds(cb.obe_name, cb.obe_root_ds))
	    != BE_SUCCESS) {
		be_print_err(gettext("be_activate: failed to promote "
		    "the active zonepath datasets for zones in BE %s\n"),
		    cb.obe_name);
	}

done:
	be_free_list(be_nodes);
	return (ret);
}
Пример #17
0
/*
 * Function:	be_promote_ds_callback
 * Description:	This function is used to promote the datasets for the BE
 *		being activated as well as the datasets for the zones BE
 *		being activated.
 *
 * Parameters:
 *              zhp - the zfs handle for zone BE being activated.
 *		data - not used.
 * Return:
 *		0 - Success
 *		be_errno_t - Failure
 *
 * Scope:
 *		Private
 */
static int
/* LINTED */
be_promote_ds_callback(zfs_handle_t *zhp, void *data)
{
	char	origin[MAXPATHLEN];
	char	*sub_dataset = NULL;
	int	ret = 0;

	if (zhp != NULL) {
		sub_dataset = strdup(zfs_get_name(zhp));
		if (sub_dataset == NULL) {
			ret = BE_ERR_NOMEM;
			goto done;
		}
	} else {
		be_print_err(gettext("be_promote_ds_callback: "
		    "Invalid zfs handle passed into function\n"));
		ret = BE_ERR_INVAL;
		goto done;
	}

	/*
	 * This loop makes sure that we promote the dataset to the
	 * top of the tree so that it is no longer a decendent of any
	 * dataset. The ZFS close and then open is used to make sure that
	 * the promotion is updated before we move on.
	 */
	while (zfs_prop_get(zhp, ZFS_PROP_ORIGIN, origin,
	    sizeof (origin), NULL, NULL, 0, B_FALSE) == 0) {
		if (zfs_promote(zhp) != 0) {
			if (libzfs_errno(g_zfs) != EZFS_EXISTS) {
				be_print_err(gettext("be_promote_ds_callback: "
				    "promote of %s failed: %s\n"),
				    zfs_get_name(zhp),
				    libzfs_error_description(g_zfs));
				ret = zfs_err_to_be_err(g_zfs);
				goto done;
			} else {
				/*
				 * If the call to zfs_promote returns the
				 * error EZFS_EXISTS we've hit a snapshot name
				 * collision. This means we're probably
				 * attemping to promote a zone dataset above a
				 * parent dataset that belongs to another zone
				 * which this zone was cloned from.
				 *
				 * TODO: If this is a zone dataset at some
				 * point we should skip this if the zone
				 * paths for the dataset and the snapshot
				 * don't match.
				 */
				be_print_err(gettext("be_promote_ds_callback: "
				    "promote of %s failed due to snapshot "
				    "name collision: %s\n"), zfs_get_name(zhp),
				    libzfs_error_description(g_zfs));
				ret = zfs_err_to_be_err(g_zfs);
				goto done;
			}
		}
		ZFS_CLOSE(zhp);
		if ((zhp = zfs_open(g_zfs, sub_dataset,
		    ZFS_TYPE_FILESYSTEM)) == NULL) {
			be_print_err(gettext("be_promote_ds_callback: "
			    "Failed to open dataset (%s): %s\n"), sub_dataset,
			    libzfs_error_description(g_zfs));
			ret = zfs_err_to_be_err(g_zfs);
			goto done;
		}
	}

	/* Iterate down this dataset's children and promote them */
	ret = zfs_iter_filesystems(zhp, be_promote_ds_callback, NULL);

done:
	free(sub_dataset);
	ZFS_CLOSE(zhp);
	return (ret);
}
Пример #18
0
int
be_rename(nvlist_t *be_attrs)
{
	be_transaction_data_t	bt = { 0 };
	be_transaction_data_t	cbt = { 0 };
	be_fs_list_data_t	fld = { 0 };
	zfs_handle_t	*zhp = NULL;
	char		root_ds[MAXPATHLEN];
	char		*mp = NULL;
	int		zret = 0, ret = BE_SUCCESS;

	/* Initialize libzfs handle */
	if (!be_zfs_init())
		return (BE_ERR_INIT);

	/* Get original BE name to rename from */
	if (nvlist_lookup_string(be_attrs, BE_ATTR_ORIG_BE_NAME, &bt.obe_name)
	    != 0) {
		be_print_err(gettext("be_rename: failed to "
		    "lookup BE_ATTR_ORIG_BE_NAME attribute\n"));
		be_zfs_fini();
		return (BE_ERR_INVAL);
	}

	/* Get new BE name to rename to */
	if (nvlist_lookup_string(be_attrs, BE_ATTR_NEW_BE_NAME, &bt.nbe_name)
	    != 0) {
		be_print_err(gettext("be_rename: failed to "
		    "lookup BE_ATTR_NEW_BE_NAME attribute\n"));
		be_zfs_fini();
		return (BE_ERR_INVAL);
	}

	/*
	 * Get the currently active BE and check to see if this
	 * is an attempt to rename the currently active BE.
	 */
	if (be_find_current_be(&cbt) != BE_SUCCESS) {
		be_print_err(gettext("be_rename: failed to find the currently "
		    "active BE\n"));
		be_zfs_fini();
		return (BE_ERR_CURR_BE_NOT_FOUND);
	}

	if (strncmp(bt.obe_name, cbt.obe_name,
	    MAX(strlen(bt.obe_name), strlen(cbt.obe_name))) == 0) {
		be_print_err(gettext("be_rename: This is an attempt to rename "
		    "the currently active BE, which is not supported\n"));
		be_zfs_fini();
		free(cbt.obe_name);
		return (BE_ERR_RENAME_ACTIVE);
	}

	/* Validate original BE name */
	if (!be_valid_be_name(bt.obe_name)) {
		be_print_err(gettext("be_rename: "
		    "invalid BE name %s\n"), bt.obe_name);
		be_zfs_fini();
		return (BE_ERR_INVAL);
	}

	/* Validate new BE name */
	if (!be_valid_be_name(bt.nbe_name)) {
		be_print_err(gettext("be_rename: invalid BE name %s\n"),
		    bt.nbe_name);
		be_zfs_fini();
		return (BE_ERR_INVAL);
	}

	/* Find which zpool the BE is in */
	if ((zret = zpool_iter(g_zfs, be_find_zpool_callback, &bt)) == 0) {
		be_print_err(gettext("be_rename: failed to "
		    "find zpool for BE (%s)\n"), bt.obe_name);
		be_zfs_fini();
		return (BE_ERR_BE_NOENT);
	} else if (zret < 0) {
		be_print_err(gettext("be_rename: zpool_iter failed: %s\n"),
		    libzfs_error_description(g_zfs));
		ret = zfs_err_to_be_err(g_zfs);
		be_zfs_fini();
		return (ret);
	}

	/* New BE will reside in the same zpool as orig BE */
	bt.nbe_zpool = bt.obe_zpool;

	be_make_root_ds(bt.obe_zpool, bt.obe_name, root_ds, sizeof (root_ds));
	bt.obe_root_ds = strdup(root_ds);
	be_make_root_ds(bt.nbe_zpool, bt.nbe_name, root_ds, sizeof (root_ds));
	bt.nbe_root_ds = strdup(root_ds);

	/*
	 * Generate a list of file systems from the BE that are legacy
	 * mounted before renaming.  We use this list to determine which
	 * entries in the vfstab we need to update after we've renamed the BE.
	 */
	if ((ret = be_get_legacy_fs(bt.obe_name, bt.obe_root_ds, NULL, NULL,
	    &fld)) != BE_SUCCESS) {
		be_print_err(gettext("be_rename: failed to "
		    "get legacy mounted file system list for %s\n"),
		    bt.obe_name);
		goto done;
	}

	/* Get handle to BE's root dataset */
	if ((zhp = zfs_open(g_zfs, bt.obe_root_ds, ZFS_TYPE_FILESYSTEM))
	    == NULL) {
		be_print_err(gettext("be_rename: failed to "
		    "open BE root dataset (%s): %s\n"),
		    bt.obe_root_ds, libzfs_error_description(g_zfs));
		ret = zfs_err_to_be_err(g_zfs);
		goto done;
	}

	/* Rename of BE's root dataset. */
	if (zfs_rename(zhp, bt.nbe_root_ds, B_FALSE, B_FALSE) != 0) {
		be_print_err(gettext("be_rename: failed to "
		    "rename dataset (%s): %s\n"), bt.obe_root_ds,
		    libzfs_error_description(g_zfs));
		ret = zfs_err_to_be_err(g_zfs);
		goto done;
	}

	/* Refresh handle to BE's root dataset after the rename */
	ZFS_CLOSE(zhp);
	if ((zhp = zfs_open(g_zfs, bt.nbe_root_ds, ZFS_TYPE_FILESYSTEM))
	    == NULL) {
		be_print_err(gettext("be_rename: failed to "
		    "open BE root dataset (%s): %s\n"),
		    bt.obe_root_ds, libzfs_error_description(g_zfs));
		ret = zfs_err_to_be_err(g_zfs);
		goto done;
	}

	/* If BE is already mounted, get its mountpoint */
	if (zfs_is_mounted(zhp, &mp) && mp == NULL) {
		be_print_err(gettext("be_rename: failed to "
		    "get altroot of mounted BE %s: %s\n"),
		    bt.nbe_name, libzfs_error_description(g_zfs));
		ret = zfs_err_to_be_err(g_zfs);
		goto done;
	}

	/* Update BE's vfstab */
	if ((ret = be_update_vfstab(bt.nbe_name, bt.obe_zpool, bt.nbe_zpool,
	    &fld, mp)) != BE_SUCCESS) {
		be_print_err(gettext("be_rename: "
		    "failed to update new BE's vfstab (%s)\n"), bt.nbe_name);
		goto done;
	}

	/* Update this BE's GRUB menu entry */
	if (getzoneid() == GLOBAL_ZONEID && (ret = be_update_menu(bt.obe_name,
	    bt.nbe_name, bt.obe_zpool, NULL)) != BE_SUCCESS) {
		be_print_err(gettext("be_rename: "
		    "failed to update grub menu entry from %s to %s\n"),
		    bt.obe_name, bt.nbe_name);
	}

done:
	be_free_fs_list(&fld);

	ZFS_CLOSE(zhp);

	be_zfs_fini();

	free(bt.obe_root_ds);
	free(bt.nbe_root_ds);
	return (ret);
}
Пример #19
0
/*
 * Function:	be_add_children_callback
 * Description:	Callback function used by zfs_iter to look through all
 *		the datasets and snapshots for each BE and add them to
 *		the lists of information to be passed back.
 * Parameters:
 *		zhp - handle to the first zfs dataset. (provided by the
 *		      zfs_iter_* call)
 *		data - pointer to the callback data and where we'll pass
 *		       the BE information back.
 * Returns:
 *		0 - Success
 *		be_errno_t - Failure
 * Scope:
 *		Private
 */
static int
be_add_children_callback(zfs_handle_t *zhp, void *data)
{
	list_callback_data_t	*cb = (list_callback_data_t *)data;
	char			*str = NULL, *ds_path = NULL;
	int			ret = 0;
	struct be_defaults be_defaults;

	be_get_defaults(&be_defaults);

	ds_path = str = strdup(zfs_get_name(zhp));

	/*
	 * get past the end of the container dataset plus the trailing "/"
	 */
	str = str + (strlen(be_container_ds) + 1);
	if (be_defaults.be_deflt_rpool_container) {
		/* just skip if invalid */
		if (!be_valid_be_name(str))
			return (BE_SUCCESS);
	}

	if (cb->be_nodes_head == NULL) {
		if ((cb->be_nodes_head = be_list_alloc(&ret,
		    sizeof (be_node_list_t))) == NULL) {
			ZFS_CLOSE(zhp);
			return (ret);
		}
		cb->be_nodes = cb->be_nodes_head;
	}

	if (zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT && !zone_be) {
		be_snapshot_list_t *snapshots = NULL;
		if (cb->be_nodes->be_node_snapshots == NULL) {
			if ((cb->be_nodes->be_node_snapshots =
			    be_list_alloc(&ret, sizeof (be_snapshot_list_t)))
			    == NULL || ret != BE_SUCCESS) {
				ZFS_CLOSE(zhp);
				return (ret);
			}
			cb->be_nodes->be_node_snapshots->be_next_snapshot =
			    NULL;
			snapshots = cb->be_nodes->be_node_snapshots;
		} else {
			for (snapshots = cb->be_nodes->be_node_snapshots;
			    snapshots != NULL;
			    snapshots = snapshots->be_next_snapshot) {
				if (snapshots->be_next_snapshot != NULL)
					continue;
				/*
				 * We're at the end of the list add the
				 * new snapshot.
				 */
				if ((snapshots->be_next_snapshot =
				    be_list_alloc(&ret,
				    sizeof (be_snapshot_list_t))) == NULL ||
				    ret != BE_SUCCESS) {
					ZFS_CLOSE(zhp);
					return (ret);
				}
				snapshots = snapshots->be_next_snapshot;
				snapshots->be_next_snapshot = NULL;
				break;
			}
		}
		if ((ret = be_get_ss_data(zhp, str, snapshots,
		    cb->be_nodes)) != BE_SUCCESS) {
			ZFS_CLOSE(zhp);
			return (ret);
		}
	} else if (strchr(str, '/') == NULL) {
		if (cb->be_nodes->be_node_name != NULL) {
			if ((cb->be_nodes->be_next_node =
			    be_list_alloc(&ret, sizeof (be_node_list_t))) ==
			    NULL || ret != BE_SUCCESS) {
				ZFS_CLOSE(zhp);
				return (ret);
			}
			cb->be_nodes = cb->be_nodes->be_next_node;
			cb->be_nodes->be_next_node = NULL;
		}

		/*
		 * If this is a zone root dataset then we only need
		 * the name of the zone BE at this point. We grab that
		 * and return.
		 */
		if (zone_be) {
			ret = be_get_zone_node_data(cb->be_nodes, str);
			ZFS_CLOSE(zhp);
			return (ret);
		}

		if ((ret = be_get_node_data(zhp, cb->be_nodes, str,
		    cb->zpool_name, cb->current_be, ds_path)) != BE_SUCCESS) {
			ZFS_CLOSE(zhp);
			return (ret);
		}
	} else if (strchr(str, '/') != NULL && !zone_be) {
		be_dataset_list_t *datasets = NULL;
		if (cb->be_nodes->be_node_datasets == NULL) {
			if ((cb->be_nodes->be_node_datasets =
			    be_list_alloc(&ret, sizeof (be_dataset_list_t)))
			    == NULL || ret != BE_SUCCESS) {
				ZFS_CLOSE(zhp);
				return (ret);
			}
			cb->be_nodes->be_node_datasets->be_next_dataset = NULL;
			datasets = cb->be_nodes->be_node_datasets;
		} else {
			for (datasets = cb->be_nodes->be_node_datasets;
			    datasets != NULL;
			    datasets = datasets->be_next_dataset) {
				if (datasets->be_next_dataset != NULL)
					continue;
				/*
				 * We're at the end of the list add
				 * the new dataset.
				 */
				if ((datasets->be_next_dataset =
				    be_list_alloc(&ret,
				    sizeof (be_dataset_list_t)))
				    == NULL || ret != BE_SUCCESS) {
					ZFS_CLOSE(zhp);
					return (ret);
				}
				datasets = datasets->be_next_dataset;
				datasets->be_next_dataset = NULL;
				break;
			}
		}

		if ((ret = be_get_ds_data(zhp, str,
		    datasets, cb->be_nodes)) != BE_SUCCESS) {
			ZFS_CLOSE(zhp);
			return (ret);
		}
	}
	ret = zfs_iter_children(zhp, be_add_children_callback, cb);
	if (ret != 0) {
		be_print_err(gettext("be_add_children_callback: "
		    "encountered error: %s\n"),
		    libzfs_error_description(g_zfs));
		ret = zfs_err_to_be_err(g_zfs);
	}
	ZFS_CLOSE(zhp);
	return (ret);
}
Пример #20
0
/*
 * Function:	be_get_grub_vers
 * Description:	Gets the grub version number from /boot/grub/capability. If
 *              capability file doesn't exist NULL is returned.
 * Parameters:
 *              bt - The transaction data for the BE we're getting the grub
 *                   version for.
 *              cur_vers - used to return the current version of grub from
 *                         the root pool.
 *              new_vers - used to return the grub version of the BE we're
 *                         activating.
 * Return:
 *              BE_SUCCESS - Success
 *              be_errno_t - Failed to find version
 * Scope:
 *		Private
 */
static int
be_get_grub_vers(be_transaction_data_t *bt, char **cur_vers, char **new_vers)
{
	zfs_handle_t	*zhp = NULL;
	zfs_handle_t	*pool_zhp = NULL;
	int ret = BE_SUCCESS;
	char cap_file[MAXPATHLEN];
	char *temp_mntpnt = NULL;
	char *zpool_mntpt = NULL;
	char *ptmp_mntpnt = NULL;
	char *orig_mntpnt = NULL;
	boolean_t be_mounted = B_FALSE;
	boolean_t pool_mounted = B_FALSE;

	if (!be_has_grub()) {
		be_print_err(gettext("be_get_grub_vers: Not supported on "
		    "this architecture\n"));
		return (BE_ERR_NOTSUP);
	}

	if (bt == NULL || bt->obe_name == NULL || bt->obe_zpool == NULL ||
	    bt->obe_root_ds == NULL) {
		be_print_err(gettext("be_get_grub_vers: Invalid BE\n"));
		return (BE_ERR_INVAL);
	}

	if ((pool_zhp = zfs_open(g_zfs, bt->obe_zpool, ZFS_TYPE_FILESYSTEM)) ==
	    NULL) {
		be_print_err(gettext("be_get_grub_vers: zfs_open failed: %s\n"),
		    libzfs_error_description(g_zfs));
		return (zfs_err_to_be_err(g_zfs));
	}

	/*
	 * Check to see if the pool's dataset is mounted. If it isn't we'll
	 * attempt to mount it.
	 */
	if ((ret = be_mount_pool(pool_zhp, &ptmp_mntpnt,
	    &orig_mntpnt, &pool_mounted)) != BE_SUCCESS) {
		be_print_err(gettext("be_get_grub_vers: pool dataset "
		    "(%s) could not be mounted\n"), bt->obe_zpool);
		ZFS_CLOSE(pool_zhp);
		return (ret);
	}

	/*
	 * Get the mountpoint for the root pool dataset.
	 */
	if (!zfs_is_mounted(pool_zhp, &zpool_mntpt)) {
		be_print_err(gettext("be_get_grub_vers: pool "
		    "dataset (%s) is not mounted. Can't set the "
		    "default BE in the grub menu.\n"), bt->obe_zpool);
		ret = BE_ERR_NO_MENU;
		goto cleanup;
	}

	/*
	 * get the version of the most recent grub update.
	 */
	(void) snprintf(cap_file, sizeof (cap_file), "%s%s",
	    zpool_mntpt, BE_CAP_FILE);
	free(zpool_mntpt);
	zpool_mntpt = NULL;

	if ((ret = get_ver_from_capfile(cap_file, cur_vers)) != BE_SUCCESS)
		goto cleanup;

	if ((zhp = zfs_open(g_zfs, bt->obe_root_ds, ZFS_TYPE_FILESYSTEM)) ==
	    NULL) {
		be_print_err(gettext("be_get_grub_vers: failed to "
		    "open BE root dataset (%s): %s\n"), bt->obe_root_ds,
		    libzfs_error_description(g_zfs));
		free(cur_vers);
		ret = zfs_err_to_be_err(g_zfs);
		goto cleanup;
	}
	if (!zfs_is_mounted(zhp, &temp_mntpnt)) {
		if ((ret = _be_mount(bt->obe_name, &temp_mntpnt,
		    BE_MOUNT_FLAG_NO_ZONES)) != BE_SUCCESS) {
			be_print_err(gettext("be_get_grub_vers: failed to "
			    "mount BE (%s)\n"), bt->obe_name);
			free(*cur_vers);
			*cur_vers = NULL;
			ZFS_CLOSE(zhp);
			goto cleanup;
		}
		be_mounted = B_TRUE;
	}
	ZFS_CLOSE(zhp);

	/*
	 * Now get the grub version for the BE being activated.
	 */
	(void) snprintf(cap_file, sizeof (cap_file), "%s%s", temp_mntpnt,
	    BE_CAP_FILE);
	ret = get_ver_from_capfile(cap_file, new_vers);
	if (ret != BE_SUCCESS) {
		free(*cur_vers);
		*cur_vers = NULL;
	}
	if (be_mounted)
		(void) _be_unmount(bt->obe_name, 0);

cleanup:
	if (pool_mounted) {
		int iret = BE_SUCCESS;
		iret = be_unmount_pool(pool_zhp, ptmp_mntpnt, orig_mntpnt);
		if (ret == BE_SUCCESS)
			ret = iret;
		free(orig_mntpnt);
		free(ptmp_mntpnt);
	}
	ZFS_CLOSE(pool_zhp);

	free(temp_mntpnt);
	return (ret);
}
Пример #21
0
/*
 * The device associated with the given vdev (either by devid or physical path)
 * has been added to the system.  If 'isdisk' is set, then we only attempt a
 * replacement if it's a whole disk.  This also implies that we should label the
 * disk first.
 *
 * First, we attempt to online the device (making sure to undo any spare
 * operation when finished).  If this succeeds, then we're done.  If it fails,
 * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
 * but that the label was not what we expected.  If the 'autoreplace' property
 * is not set, then we relabel the disk (if specified), and attempt a 'zpool
 * replace'.  If the online is successful, but the new state is something else
 * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
 * race, and we should avoid attempting to relabel the disk.
 *
 * Also can arrive here from a ESC_ZFS_VDEV_CHECK event
 */
static void
zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
{
    char *path;
    vdev_state_t newstate;
    nvlist_t *nvroot, *newvd;
    pendingdev_t *device;
    uint64_t wholedisk = 0ULL;
    uint64_t offline = 0ULL;
    uint64_t guid = 0ULL;
    char *physpath = NULL, *new_devid = NULL;
    char rawpath[PATH_MAX], fullpath[PATH_MAX];
    char devpath[PATH_MAX];
    int ret;

    if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
        return;

    (void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
    (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
    (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
    (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_GUID, &guid);

    if (offline)
        return;  /* don't intervene if it was taken offline */

    zed_log_msg(LOG_INFO, "zfs_process_add: pool '%s' vdev '%s' (%llu)",
                zpool_get_name(zhp), path, (long long unsigned int)guid);

    /*
     * The VDEV guid is preferred for identification (gets passed in path)
     */
    if (guid != 0) {
        (void) snprintf(fullpath, sizeof (fullpath), "%llu",
                        (long long unsigned int)guid);
    } else {
        /*
         * otherwise use path sans partition suffix for whole disks
         */
        (void) strlcpy(fullpath, path, sizeof (fullpath));
        if (wholedisk) {
            char *spath = zfs_strip_partition(g_zfshdl, fullpath);

            (void) strlcpy(fullpath, spath, sizeof (fullpath));
            free(spath);
        }
    }

    /*
     * Attempt to online the device.
     */
    if (zpool_vdev_online(zhp, fullpath,
                          ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
            (newstate == VDEV_STATE_HEALTHY ||
             newstate == VDEV_STATE_DEGRADED)) {
        zed_log_msg(LOG_INFO, "  zpool_vdev_online: vdev %s is %s",
                    fullpath, (newstate == VDEV_STATE_HEALTHY) ?
                    "HEALTHY" : "DEGRADED");
        return;
    }

    /*
     * If the pool doesn't have the autoreplace property set, then attempt
     * a true online (without the unspare flag), which will trigger a FMA
     * fault.
     */
    if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
            !wholedisk || physpath == NULL) {
        (void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
                                 &newstate);
        zed_log_msg(LOG_INFO, "  zpool_vdev_online: %s FORCEFAULT (%s)",
                    fullpath, libzfs_error_description(g_zfshdl));
        return;
    }

    /*
     * convert physical path into its current device node
     */
    (void) snprintf(rawpath, sizeof (rawpath), "%s%s", DEV_BYPATH_PATH,
                    physpath);
    if (realpath(rawpath, devpath) == NULL) {
        zed_log_msg(LOG_INFO, "  realpath: %s failed (%s)",
                    rawpath, strerror(errno));

        (void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
                                 &newstate);

        zed_log_msg(LOG_INFO, "  zpool_vdev_online: %s FORCEFAULT (%s)",
                    fullpath, libzfs_error_description(g_zfshdl));
        return;
    }

    /*
     * we're auto-replacing a raw disk, so label it first
     */
    if (!labeled) {
        char *leafname;

        /*
         * If this is a request to label a whole disk, then attempt to
         * write out the label.  Before we can label the disk, we need
         * to map the physical string that was matched on to the under
         * lying device node.
         *
         * If any part of this process fails, then do a force online
         * to trigger a ZFS fault for the device (and any hot spare
         * replacement).
         */
        leafname = strrchr(devpath, '/') + 1;

        /*
         * If this is a request to label a whole disk, then attempt to
         * write out the label.
         */
        if (zpool_label_disk(g_zfshdl, zhp, leafname) != 0) {
            zed_log_msg(LOG_INFO, "  zpool_label_disk: could not "
                        "label '%s' (%s)", leafname,
                        libzfs_error_description(g_zfshdl));

            (void) zpool_vdev_online(zhp, fullpath,
                                     ZFS_ONLINE_FORCEFAULT, &newstate);
            return;
        }

        /*
         * The disk labeling is asynchronous on Linux. Just record
         * this label request and return as there will be another
         * disk add event for the partition after the labeling is
         * completed.
         */
        device = malloc(sizeof (pendingdev_t));
        (void) strlcpy(device->pd_physpath, physpath,
                       sizeof (device->pd_physpath));
        list_insert_tail(&g_device_list, device);

        zed_log_msg(LOG_INFO, "  zpool_label_disk: async '%s' (%llu)",
                    leafname, (long long unsigned int)guid);

        return;	/* resumes at EC_DEV_ADD.ESC_DISK for partition */

    } else { /* labeled */
        boolean_t found = B_FALSE;
        /*
         * match up with request above to label the disk
         */
        for (device = list_head(&g_device_list); device != NULL;
                device = list_next(&g_device_list, device)) {
            if (strcmp(physpath, device->pd_physpath) == 0) {
                list_remove(&g_device_list, device);
                free(device);
                found = B_TRUE;
                break;
            }
        }
        if (!found) {
            /* unexpected partition slice encountered */
            (void) zpool_vdev_online(zhp, fullpath,
                                     ZFS_ONLINE_FORCEFAULT, &newstate);
            return;
        }

        zed_log_msg(LOG_INFO, "  zpool_label_disk: resume '%s' (%llu)",
                    physpath, (long long unsigned int)guid);

        if (nvlist_lookup_string(vdev, "new_devid", &new_devid) != 0) {
            zed_log_msg(LOG_INFO, "  auto replace: missing devid!");
            return;
        }

        (void) snprintf(devpath, sizeof (devpath), "%s%s",
                        DEV_BYID_PATH, new_devid);
        path = devpath;
    }

    /*
     * Construct the root vdev to pass to zpool_vdev_attach().  While adding
     * the entire vdev structure is harmless, we construct a reduced set of
     * path/physpath/wholedisk to keep it simple.
     */
    if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) {
        zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
        return;
    }
    if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
        zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
        nvlist_free(nvroot);
        return;
    }

    if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
            nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
            nvlist_add_string(newvd, ZPOOL_CONFIG_DEVID, new_devid) != 0 ||
            (physpath != NULL && nvlist_add_string(newvd,
                    ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
            nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
            nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
            nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &newvd,
                                    1) != 0) {
        zed_log_msg(LOG_WARNING, "zfs_mod: unable to add nvlist pairs");
        nvlist_free(newvd);
        nvlist_free(nvroot);
        return;
    }

    nvlist_free(newvd);

    /*
     * auto replace a leaf disk at same physical location
     */
    ret = zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE);

    zed_log_msg(LOG_INFO, "  zpool_vdev_replace: %s with %s (%s)",
                fullpath, path, (ret == 0) ? "no errors" :
                libzfs_error_description(g_zfshdl));

    nvlist_free(nvroot);
}
Пример #22
0
/*
 * Function:	be_do_installgrub
 * Description:	This function runs installgrub using the grub loader files
 *              from the BE we're activating and installing them on the
 *              pool the BE lives in.
 *
 * Parameters:
 *              bt - The transaction data for the BE we're activating.
 * Return:
 *		BE_SUCCESS - Success
 *		be_errno_t - Failure
 *
 * Scope:
 *		Private
 */
static int
be_do_installgrub(be_transaction_data_t *bt)
{
	zpool_handle_t  *zphp = NULL;
	zfs_handle_t	*zhp = NULL;
	nvlist_t **child, *nv, *config;
	uint_t c, children = 0;
	char *tmp_mntpt = NULL;
	char *pool_mntpnt = NULL;
	char *ptmp_mntpnt = NULL;
	char *orig_mntpnt = NULL;
	FILE *cap_fp = NULL;
	FILE *zpool_cap_fp = NULL;
	char line[BUFSIZ];
	char cap_file[MAXPATHLEN];
	char zpool_cap_file[MAXPATHLEN];
	char stage1[MAXPATHLEN];
	char stage2[MAXPATHLEN];
	char installgrub_cmd[MAXPATHLEN];
	char *vname;
	char be_run_cmd_errbuf[BUFSIZ];
	int ret = BE_SUCCESS;
	int err = 0;
	boolean_t be_mounted = B_FALSE;
	boolean_t pool_mounted = B_FALSE;

	if (!be_has_grub()) {
		be_print_err(gettext("be_do_installgrub: Not supported "
		    "on this architecture\n"));
		return (BE_ERR_NOTSUP);
	}

	if ((zhp = zfs_open(g_zfs, bt->obe_root_ds, ZFS_TYPE_FILESYSTEM)) ==
	    NULL) {
		be_print_err(gettext("be_do_installgrub: failed to "
		    "open BE root dataset (%s): %s\n"), bt->obe_root_ds,
		    libzfs_error_description(g_zfs));
		ret = zfs_err_to_be_err(g_zfs);
		return (ret);
	}
	if (!zfs_is_mounted(zhp, &tmp_mntpt)) {
		if ((ret = _be_mount(bt->obe_name, &tmp_mntpt,
		    BE_MOUNT_FLAG_NO_ZONES)) != BE_SUCCESS) {
			be_print_err(gettext("be_do_installgrub: failed to "
			    "mount BE (%s)\n"), bt->obe_name);
			ZFS_CLOSE(zhp);
			return (ret);
		}
		be_mounted = B_TRUE;
	}
	ZFS_CLOSE(zhp);

	(void) snprintf(stage1, sizeof (stage1), "%s%s", tmp_mntpt, BE_STAGE_1);
	(void) snprintf(stage2, sizeof (stage2), "%s%s", tmp_mntpt, BE_STAGE_2);

	if ((zphp = zpool_open(g_zfs, bt->obe_zpool)) == NULL) {
		be_print_err(gettext("be_do_installgrub: failed to open "
		    "pool (%s): %s\n"), bt->obe_zpool,
		    libzfs_error_description(g_zfs));
		ret = zfs_err_to_be_err(g_zfs);
		if (be_mounted)
			(void) _be_unmount(bt->obe_name, 0);
		free(tmp_mntpt);
		return (ret);
	}

	if ((config = zpool_get_config(zphp, NULL)) == NULL) {
		be_print_err(gettext("be_do_installgrub: failed to get zpool "
		    "configuration information. %s\n"),
		    libzfs_error_description(g_zfs));
		ret = zfs_err_to_be_err(g_zfs);
		goto done;
	}

	/*
	 * Get the vdev tree
	 */
	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) != 0) {
		be_print_err(gettext("be_do_installgrub: failed to get vdev "
		    "tree: %s\n"), libzfs_error_description(g_zfs));
		ret = zfs_err_to_be_err(g_zfs);
		goto done;
	}

	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
	    &children) != 0) {
		be_print_err(gettext("be_do_installgrub: failed to traverse "
		    "the vdev tree: %s\n"), libzfs_error_description(g_zfs));
		ret = zfs_err_to_be_err(g_zfs);
		goto done;
	}
	for (c = 0; c < children; c++) {
		uint_t i, nchildren = 0;
		nvlist_t **nvchild;
		vname = zpool_vdev_name(g_zfs, zphp, child[c], B_FALSE);
		if (vname == NULL) {
			be_print_err(gettext(
			    "be_do_installgrub: "
			    "failed to get device name: %s\n"),
			    libzfs_error_description(g_zfs));
			ret = zfs_err_to_be_err(g_zfs);
			goto done;
		}
		if (strcmp(vname, "mirror") == 0 || vname[0] != 'c') {

			if (nvlist_lookup_nvlist_array(child[c],
			    ZPOOL_CONFIG_CHILDREN, &nvchild, &nchildren) != 0) {
				be_print_err(gettext("be_do_installgrub: "
				    "failed to traverse the vdev tree: %s\n"),
				    libzfs_error_description(g_zfs));
				ret = zfs_err_to_be_err(g_zfs);
				goto done;
			}

			for (i = 0; i < nchildren; i++) {
				vname = zpool_vdev_name(g_zfs, zphp,
				    nvchild[i], B_FALSE);
				if (vname == NULL) {
					be_print_err(gettext(
					    "be_do_installgrub: "
					    "failed to get device name: %s\n"),
					    libzfs_error_description(g_zfs));
					ret = zfs_err_to_be_err(g_zfs);
					goto done;
				}

				(void) snprintf(installgrub_cmd,
				    sizeof (installgrub_cmd),
				    "%s %s %s /dev/rdsk/%s",
				    BE_INSTALL_GRUB, stage1, stage2, vname);
				if (be_run_cmd(installgrub_cmd,
				    be_run_cmd_errbuf, BUFSIZ, NULL, 0) !=
				    BE_SUCCESS) {
					be_print_err(gettext(
					    "be_do_installgrub: installgrub "
					    "failed for device %s.\n"), vname);
					/* Assume localized cmd err output. */
					be_print_err(gettext(
					    "  Command: \"%s\"\n"),
					    installgrub_cmd);
					be_print_err("%s", be_run_cmd_errbuf);
					free(vname);
					ret = BE_ERR_BOOTFILE_INST;
					goto done;
				}
				free(vname);
			}
		} else {
			(void) snprintf(installgrub_cmd,
			    sizeof (installgrub_cmd), "%s %s %s /dev/rdsk/%s",
			    BE_INSTALL_GRUB, stage1, stage2, vname);
			if (be_run_cmd(installgrub_cmd, be_run_cmd_errbuf,
			    BUFSIZ, NULL, 0) != BE_SUCCESS) {
				be_print_err(gettext(
				    "be_do_installgrub: installgrub "
				    "failed for device %s.\n"), vname);
				/* Assume localized cmd err output. */
				be_print_err(gettext("  Command: \"%s\"\n"),
				    installgrub_cmd);
				be_print_err("%s", be_run_cmd_errbuf);
				free(vname);
				ret = BE_ERR_BOOTFILE_INST;
				goto done;
			}
			free(vname);
		}
	}

	/*
	 * Copy the grub capability file from the BE we're activating into
	 * the root pool.
	 */
	(void) snprintf(cap_file, sizeof (cap_file), "%s%s", tmp_mntpt,
	    BE_CAP_FILE);

	if ((zhp = zfs_open(g_zfs, bt->obe_zpool, ZFS_TYPE_FILESYSTEM)) ==
	    NULL) {
		be_print_err(gettext("be_do_installgrub: zfs_open "
		    "failed: %s\n"), libzfs_error_description(g_zfs));
		zpool_close(zphp);
		return (zfs_err_to_be_err(g_zfs));
	}

	/*
	 * Check to see if the pool's dataset is mounted. If it isn't we'll
	 * attempt to mount it.
	 */
	if ((ret = be_mount_pool(zhp, &ptmp_mntpnt,
	    &orig_mntpnt, &pool_mounted)) != BE_SUCCESS) {
		be_print_err(gettext("be_do_installgrub: pool dataset "
		    "(%s) could not be mounted\n"), bt->obe_zpool);
		ZFS_CLOSE(zhp);
		zpool_close(zphp);
		return (ret);
	}

	/*
	 * Get the mountpoint for the root pool dataset.
	 */
	if (!zfs_is_mounted(zhp, &pool_mntpnt)) {
		be_print_err(gettext("be_do_installgrub: pool "
		    "dataset (%s) is not mounted. Can't check the grub "
		    "version from the grub capability file.\n"), bt->obe_zpool);
		ret = BE_ERR_NO_MENU;
		goto done;
	}

	(void) snprintf(zpool_cap_file, sizeof (zpool_cap_file), "%s%s",
	    pool_mntpnt, BE_CAP_FILE);

	free(pool_mntpnt);
	pool_mntpnt = NULL;

	if ((cap_fp = fopen(cap_file, "r")) == NULL) {
		err = errno;
		be_print_err(gettext("be_do_installgrub: failed to open grub "
		    "capability file\n"));
		ret = errno_to_be_err(err);
		goto done;
	}
	if ((zpool_cap_fp = fopen(zpool_cap_file, "w")) == NULL) {
		err = errno;
		be_print_err(gettext("be_do_installgrub: failed to open new "
		    "grub capability file\n"));
		ret = errno_to_be_err(err);
		(void) fclose(cap_fp);
		goto done;
	}

	while (fgets(line, BUFSIZ, cap_fp)) {
		(void) fputs(line, zpool_cap_fp);
	}

	(void) fclose(zpool_cap_fp);
	(void) fclose(cap_fp);

done:
	if (pool_mounted) {
		int iret = 0;
		iret = be_unmount_pool(zhp, ptmp_mntpnt, orig_mntpnt);
		if (ret == BE_SUCCESS)
			ret = iret;
		free(orig_mntpnt);
		free(ptmp_mntpnt);
	}
	ZFS_CLOSE(zhp);
	if (be_mounted)
		(void) _be_unmount(bt->obe_name, 0);
	zpool_close(zphp);
	free(tmp_mntpt);
	return (ret);
}
Пример #23
0
/*
 * The device associated with the given vdev (either by devid or physical path)
 * has been added to the system.  If 'isdisk' is set, then we only attempt a
 * replacement if it's a whole disk.  This also implies that we should label the
 * disk first.
 *
 * First, we attempt to online the device (making sure to undo any spare
 * operation when finished).  If this succeeds, then we're done.  If it fails,
 * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
 * but that the label was not what we expected.  If the 'autoreplace' property
 * is enabled, then we relabel the disk (if specified), and attempt a 'zpool
 * replace'.  If the online is successful, but the new state is something else
 * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
 * race, and we should avoid attempting to relabel the disk.
 *
 * Also can arrive here from a ESC_ZFS_VDEV_CHECK event
 */
static void
zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
{
	char *path;
	vdev_state_t newstate;
	nvlist_t *nvroot, *newvd;
	pendingdev_t *device;
	uint64_t wholedisk = 0ULL;
	uint64_t offline = 0ULL;
	uint64_t guid = 0ULL;
	char *physpath = NULL, *new_devid = NULL, *enc_sysfs_path = NULL;
	char rawpath[PATH_MAX], fullpath[PATH_MAX];
	char devpath[PATH_MAX];
	int ret;
	int is_dm = 0;
	int is_sd = 0;
	uint_t c;
	vdev_stat_t *vs;

	if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
		return;

	/* Skip healthy disks */
	verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
	    (uint64_t **)&vs, &c) == 0);
	if (vs->vs_state == VDEV_STATE_HEALTHY) {
		zed_log_msg(LOG_INFO, "%s: %s is already healthy, skip it.",
		    __func__, path);
		return;
	}

	(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
	(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
	    &enc_sysfs_path);
	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_GUID, &guid);

	if (offline)
		return;  /* don't intervene if it was taken offline */

	is_dm = zfs_dev_is_dm(path);
	zed_log_msg(LOG_INFO, "zfs_process_add: pool '%s' vdev '%s', phys '%s'"
	    " wholedisk %d, dm %d (%llu)", zpool_get_name(zhp), path,
	    physpath ? physpath : "NULL", wholedisk, is_dm,
	    (long long unsigned int)guid);

	/*
	 * The VDEV guid is preferred for identification (gets passed in path)
	 */
	if (guid != 0) {
		(void) snprintf(fullpath, sizeof (fullpath), "%llu",
		    (long long unsigned int)guid);
	} else {
		/*
		 * otherwise use path sans partition suffix for whole disks
		 */
		(void) strlcpy(fullpath, path, sizeof (fullpath));
		if (wholedisk) {
			char *spath = zfs_strip_partition(fullpath);
			if (!spath) {
				zed_log_msg(LOG_INFO, "%s: Can't alloc",
				    __func__);
				return;
			}

			(void) strlcpy(fullpath, spath, sizeof (fullpath));
			free(spath);
		}
	}

	/*
	 * Attempt to online the device.
	 */
	if (zpool_vdev_online(zhp, fullpath,
	    ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
	    (newstate == VDEV_STATE_HEALTHY ||
	    newstate == VDEV_STATE_DEGRADED)) {
		zed_log_msg(LOG_INFO, "  zpool_vdev_online: vdev %s is %s",
		    fullpath, (newstate == VDEV_STATE_HEALTHY) ?
		    "HEALTHY" : "DEGRADED");
		return;
	}

	/*
	 * vdev_id alias rule for using scsi_debug devices (FMA automated
	 * testing)
	 */
	if (physpath != NULL && strcmp("scsidebug", physpath) == 0)
		is_sd = 1;

	/*
	 * If the pool doesn't have the autoreplace property set, then use
	 * vdev online to trigger a FMA fault by posting an ereport.
	 */
	if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
	    !(wholedisk || is_dm) || (physpath == NULL)) {
		(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
		    &newstate);
		zed_log_msg(LOG_INFO, "Pool's autoreplace is not enabled or "
		    "not a whole disk for '%s'", fullpath);
		return;
	}

	/*
	 * Convert physical path into its current device node.  Rawpath
	 * needs to be /dev/disk/by-vdev for a scsi_debug device since
	 * /dev/disk/by-path will not be present.
	 */
	(void) snprintf(rawpath, sizeof (rawpath), "%s%s",
	    is_sd ? DEV_BYVDEV_PATH : DEV_BYPATH_PATH, physpath);

	if (realpath(rawpath, devpath) == NULL && !is_dm) {
		zed_log_msg(LOG_INFO, "  realpath: %s failed (%s)",
		    rawpath, strerror(errno));

		(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
		    &newstate);

		zed_log_msg(LOG_INFO, "  zpool_vdev_online: %s FORCEFAULT (%s)",
		    fullpath, libzfs_error_description(g_zfshdl));
		return;
	}

	/* Only autoreplace bad disks */
	if ((vs->vs_state != VDEV_STATE_DEGRADED) &&
	    (vs->vs_state != VDEV_STATE_FAULTED) &&
	    (vs->vs_state != VDEV_STATE_CANT_OPEN)) {
		return;
	}

	nvlist_lookup_string(vdev, "new_devid", &new_devid);

	if (is_dm) {
		/* Don't label device mapper or multipath disks. */
	} else if (!labeled) {
		/*
		 * we're auto-replacing a raw disk, so label it first
		 */
		char *leafname;

		/*
		 * If this is a request to label a whole disk, then attempt to
		 * write out the label.  Before we can label the disk, we need
		 * to map the physical string that was matched on to the under
		 * lying device node.
		 *
		 * If any part of this process fails, then do a force online
		 * to trigger a ZFS fault for the device (and any hot spare
		 * replacement).
		 */
		leafname = strrchr(devpath, '/') + 1;

		/*
		 * If this is a request to label a whole disk, then attempt to
		 * write out the label.
		 */
		if (zpool_label_disk(g_zfshdl, zhp, leafname) != 0) {
			zed_log_msg(LOG_INFO, "  zpool_label_disk: could not "
			    "label '%s' (%s)", leafname,
			    libzfs_error_description(g_zfshdl));

			(void) zpool_vdev_online(zhp, fullpath,
			    ZFS_ONLINE_FORCEFAULT, &newstate);
			return;
		}

		/*
		 * The disk labeling is asynchronous on Linux. Just record
		 * this label request and return as there will be another
		 * disk add event for the partition after the labeling is
		 * completed.
		 */
		device = malloc(sizeof (pendingdev_t));
		(void) strlcpy(device->pd_physpath, physpath,
		    sizeof (device->pd_physpath));
		list_insert_tail(&g_device_list, device);

		zed_log_msg(LOG_INFO, "  zpool_label_disk: async '%s' (%llu)",
		    leafname, (u_longlong_t)guid);

		return;	/* resumes at EC_DEV_ADD.ESC_DISK for partition */

	} else /* labeled */ {
		boolean_t found = B_FALSE;
		/*
		 * match up with request above to label the disk
		 */
		for (device = list_head(&g_device_list); device != NULL;
		    device = list_next(&g_device_list, device)) {
			if (strcmp(physpath, device->pd_physpath) == 0) {
				list_remove(&g_device_list, device);
				free(device);
				found = B_TRUE;
				break;
			}
			zed_log_msg(LOG_INFO, "zpool_label_disk: %s != %s",
			    physpath, device->pd_physpath);
		}
		if (!found) {
			/* unexpected partition slice encountered */
			zed_log_msg(LOG_INFO, "labeled disk %s unexpected here",
			    fullpath);
			(void) zpool_vdev_online(zhp, fullpath,
			    ZFS_ONLINE_FORCEFAULT, &newstate);
			return;
		}

		zed_log_msg(LOG_INFO, "  zpool_label_disk: resume '%s' (%llu)",
		    physpath, (u_longlong_t)guid);

		(void) snprintf(devpath, sizeof (devpath), "%s%s",
		    DEV_BYID_PATH, new_devid);
	}

	/*
	 * Construct the root vdev to pass to zpool_vdev_attach().  While adding
	 * the entire vdev structure is harmless, we construct a reduced set of
	 * path/physpath/wholedisk to keep it simple.
	 */
	if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) {
		zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
		return;
	}
	if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
		zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
		nvlist_free(nvroot);
		return;
	}

	if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
	    nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
	    nvlist_add_string(newvd, ZPOOL_CONFIG_DEVID, new_devid) != 0 ||
	    (physpath != NULL && nvlist_add_string(newvd,
	    ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
	    (enc_sysfs_path != NULL && nvlist_add_string(newvd,
	    ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH, enc_sysfs_path) != 0) ||
	    nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
	    nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
	    nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &newvd,
	    1) != 0) {
		zed_log_msg(LOG_WARNING, "zfs_mod: unable to add nvlist pairs");
		nvlist_free(newvd);
		nvlist_free(nvroot);
		return;
	}

	nvlist_free(newvd);

	/*
	 * Wait for udev to verify the links exist, then auto-replace
	 * the leaf disk at same physical location.
	 */
	if (zpool_label_disk_wait(path, 3000) != 0) {
		zed_log_msg(LOG_WARNING, "zfs_mod: expected replacement "
		    "disk %s is missing", path);
		nvlist_free(nvroot);
		return;
	}

	ret = zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE);

	zed_log_msg(LOG_INFO, "  zpool_vdev_replace: %s with %s (%s)",
	    fullpath, path, (ret == 0) ? "no errors" :
	    libzfs_error_description(g_zfshdl));

	nvlist_free(nvroot);
}
Пример #24
0
/*
 * Function:	be_promote_zone_ds
 * Description:	This function finds the zones for the BE being activated
 *              and the active zonepath dataset for each zone. Then each
 *              active zonepath dataset is promoted.
 *
 * Parameters:
 *              be_name - the name of the global zone BE that we need to
 *                       find the zones for.
 *              be_root_ds - the root dataset for be_name.
 * Return:
 *		BE_SUCCESS - Success
 *		be_errno_t - Failure
 *
 * Scope:
 *		Private
 */
static int
be_promote_zone_ds(char *be_name, char *be_root_ds)
{
	char		*zone_ds = NULL;
	char		*temp_mntpt = NULL;
	char		origin[MAXPATHLEN];
	char		zoneroot_ds[MAXPATHLEN];
	zfs_handle_t	*zhp = NULL;
	zfs_handle_t	*z_zhp = NULL;
	zoneList_t	zone_list = NULL;
	zoneBrandList_t *brands = NULL;
	boolean_t	be_mounted = B_FALSE;
	int		zone_index = 0;
	int		err = BE_SUCCESS;

	/*
	 * Get the supported zone brands so we can pass that
	 * to z_get_nonglobal_zone_list_by_brand. Currently
	 * only the ipkg and labeled brand zones are supported
	 *
	 */
	if ((brands = be_get_supported_brandlist()) == NULL) {
		be_print_err(gettext("be_promote_zone_ds: no supported "
		    "brands\n"));
		return (BE_SUCCESS);
	}

	if ((zhp = zfs_open(g_zfs, be_root_ds,
	    ZFS_TYPE_FILESYSTEM)) == NULL) {
		be_print_err(gettext("be_promote_zone_ds: Failed to open "
		    "dataset (%s): %s\n"), be_root_ds,
		    libzfs_error_description(g_zfs));
		err = zfs_err_to_be_err(g_zfs);
		z_free_brand_list(brands);
		return (err);
	}

	if (!zfs_is_mounted(zhp, &temp_mntpt)) {
		if ((err = _be_mount(be_name, &temp_mntpt,
		    BE_MOUNT_FLAG_NO_ZONES)) != BE_SUCCESS) {
			be_print_err(gettext("be_promote_zone_ds: failed to "
			    "mount the BE for zones procesing.\n"));
			ZFS_CLOSE(zhp);
			z_free_brand_list(brands);
			return (err);
		}
		be_mounted = B_TRUE;
	}

	/*
	 * Set the zone root to the temp mount point for the BE we just mounted.
	 */
	z_set_zone_root(temp_mntpt);

	/*
	 * Get all the zones based on the brands we're looking for. If no zones
	 * are found that we're interested in unmount the BE and move on.
	 */
	if ((zone_list = z_get_nonglobal_zone_list_by_brand(brands)) == NULL) {
		if (be_mounted)
			(void) _be_unmount(be_name, 0);
		ZFS_CLOSE(zhp);
		z_free_brand_list(brands);
		free(temp_mntpt);
		return (BE_SUCCESS);
	}
	for (zone_index = 0; z_zlist_get_zonename(zone_list, zone_index)
	    != NULL; zone_index++) {
		char *zone_path = NULL;

		/* Skip zones that aren't at least installed */
		if (z_zlist_get_current_state(zone_list, zone_index) <
		    ZONE_STATE_INSTALLED)
			continue;

		if (((zone_path =
		    z_zlist_get_zonepath(zone_list, zone_index)) == NULL) ||
		    ((zone_ds = be_get_ds_from_dir(zone_path)) == NULL) ||
		    !be_zone_supported(zone_ds))
			continue;

		if (be_find_active_zone_root(zhp, zone_ds,
		    zoneroot_ds, sizeof (zoneroot_ds)) != 0) {
			be_print_err(gettext("be_promote_zone_ds: "
			    "Zone does not have an active root "
			    "dataset, skipping this zone.\n"));
			continue;
		}

		if ((z_zhp = zfs_open(g_zfs, zoneroot_ds,
		    ZFS_TYPE_FILESYSTEM)) == NULL) {
			be_print_err(gettext("be_promote_zone_ds: "
			    "Failed to open dataset "
			    "(%s): %s\n"), zoneroot_ds,
			    libzfs_error_description(g_zfs));
			err = zfs_err_to_be_err(g_zfs);
			goto done;
		}

		if (zfs_prop_get(z_zhp, ZFS_PROP_ORIGIN, origin,
		    sizeof (origin), NULL, NULL, 0, B_FALSE) != 0) {
			ZFS_CLOSE(z_zhp);
			continue;
		}

		/*
		 * We don't need to close the zfs handle at this
		 * point because the callback funtion
		 * be_promote_ds_callback() will close it for us.
		 */
		if (be_promote_ds_callback(z_zhp, NULL) != 0) {
			be_print_err(gettext("be_promote_zone_ds: "
			    "failed to activate the "
			    "datasets for %s: %s\n"),
			    zoneroot_ds,
			    libzfs_error_description(g_zfs));
			err = BE_ERR_PROMOTE;
			goto done;
		}
	}
done:
	if (be_mounted)
		(void) _be_unmount(be_name, 0);
	ZFS_CLOSE(zhp);
	free(temp_mntpt);
	z_free_brand_list(brands);
	z_free_zone_list(zone_list);
	return (err);
}