Beispiel #1
0
/*
 * Given a cache file, return the contents as a list of importable pools.
 * poolname or guid (but not both) are provided by the caller when trying
 * to import a specific pool.
 */
nvlist_t *
zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
    char *poolname, uint64_t guid)
{
	char *buf;
	int fd;
	struct stat64 statbuf;
	nvlist_t *raw, *src, *dst;
	nvlist_t *pools;
	nvpair_t *elem;
	char *name;
	uint64_t this_guid;
	boolean_t active;

	verify(poolname == NULL || guid == 0);

	if ((fd = open(cachefile, O_RDONLY)) < 0) {
		zfs_error_aux(hdl, "%s", strerror(errno));
		(void) zfs_error(hdl, EZFS_BADCACHE,
		    dgettext(TEXT_DOMAIN, "failed to open cache file"));
		return (NULL);
	}

	if (fstat64(fd, &statbuf) != 0) {
		zfs_error_aux(hdl, "%s", strerror(errno));
		(void) close(fd);
		(void) zfs_error(hdl, EZFS_BADCACHE,
		    dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
		return (NULL);
	}

	if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
		(void) close(fd);
		return (NULL);
	}

	if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
		(void) close(fd);
		free(buf);
		(void) zfs_error(hdl, EZFS_BADCACHE,
		    dgettext(TEXT_DOMAIN,
		    "failed to read cache file contents"));
		return (NULL);
	}

	(void) close(fd);

	if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
		free(buf);
		(void) zfs_error(hdl, EZFS_BADCACHE,
		    dgettext(TEXT_DOMAIN,
		    "invalid or corrupt cache file contents"));
		return (NULL);
	}

	free(buf);

	/*
	 * Go through and get the current state of the pools and refresh their
	 * state.
	 */
	if (nvlist_alloc(&pools, 0, 0) != 0) {
		(void) no_memory(hdl);
		nvlist_free(raw);
		return (NULL);
	}

	elem = NULL;
	while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
		verify(nvpair_value_nvlist(elem, &src) == 0);

		verify(nvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME,
		    &name) == 0);
		if (poolname != NULL && strcmp(poolname, name) != 0)
			continue;

		verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
		    &this_guid) == 0);
		if (guid != 0) {
			verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
			    &this_guid) == 0);
			if (guid != this_guid)
				continue;
		}

		if (pool_active(hdl, name, this_guid, &active) != 0) {
			nvlist_free(raw);
			nvlist_free(pools);
			return (NULL);
		}

		if (active)
			continue;

		if ((dst = refresh_config(hdl, src)) == NULL) {
			nvlist_free(raw);
			nvlist_free(pools);
			return (NULL);
		}

		if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
			(void) no_memory(hdl);
			nvlist_free(dst);
			nvlist_free(raw);
			nvlist_free(pools);
			return (NULL);
		}
		nvlist_free(dst);
	}

	nvlist_free(raw);
	return (pools);
}
Beispiel #2
0
/*
 * Convert our list of pools into the definitive set of configurations.  We
 * start by picking the best config for each toplevel vdev.  Once that's done,
 * we assemble the toplevel vdevs into a full config for the pool.  We make a
 * pass to fix up any incorrect paths, and then add it to the main list to
 * return to the user.
 */
static nvlist_t *
get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
{
	pool_entry_t *pe;
	vdev_entry_t *ve;
	config_entry_t *ce;
	nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot;
	nvlist_t **spares, **l2cache;
	uint_t i, nspares, nl2cache;
	boolean_t config_seen;
	uint64_t best_txg;
	char *name, *hostname;
	uint64_t version, guid;
	uint_t children = 0;
	nvlist_t **child = NULL;
	uint_t holes;
	uint64_t *hole_array, max_id;
	uint_t c;
	boolean_t isactive;
	uint64_t hostid;
	nvlist_t *nvl;
	boolean_t found_one = B_FALSE;
	boolean_t valid_top_config = B_FALSE;

	if (nvlist_alloc(&ret, 0, 0) != 0)
		goto nomem;

	for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
		uint64_t id, max_txg = 0;

		if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
			goto nomem;
		config_seen = B_FALSE;

		/*
		 * Iterate over all toplevel vdevs.  Grab the pool configuration
		 * from the first one we find, and then go through the rest and
		 * add them as necessary to the 'vdevs' member of the config.
		 */
		for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {

			/*
			 * Determine the best configuration for this vdev by
			 * selecting the config with the latest transaction
			 * group.
			 */
			best_txg = 0;
			for (ce = ve->ve_configs; ce != NULL;
			    ce = ce->ce_next) {

				if (ce->ce_txg > best_txg) {
					tmp = ce->ce_config;
					best_txg = ce->ce_txg;
				}
			}

			/*
			 * We rely on the fact that the max txg for the
			 * pool will contain the most up-to-date information
			 * about the valid top-levels in the vdev namespace.
			 */
			if (best_txg > max_txg) {
				(void) nvlist_remove(config,
				    ZPOOL_CONFIG_VDEV_CHILDREN,
				    DATA_TYPE_UINT64);
				(void) nvlist_remove(config,
				    ZPOOL_CONFIG_HOLE_ARRAY,
				    DATA_TYPE_UINT64_ARRAY);

				max_txg = best_txg;
				hole_array = NULL;
				holes = 0;
				max_id = 0;
				valid_top_config = B_FALSE;

				if (nvlist_lookup_uint64(tmp,
				    ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
					verify(nvlist_add_uint64(config,
					    ZPOOL_CONFIG_VDEV_CHILDREN,
					    max_id) == 0);
					valid_top_config = B_TRUE;
				}

				if (nvlist_lookup_uint64_array(tmp,
				    ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
				    &holes) == 0) {
					verify(nvlist_add_uint64_array(config,
					    ZPOOL_CONFIG_HOLE_ARRAY,
					    hole_array, holes) == 0);
				}
			}

			if (!config_seen) {
				/*
				 * Copy the relevant pieces of data to the pool
				 * configuration:
				 *
				 *	version
				 * 	pool guid
				 * 	name
				 * 	pool state
				 *	hostid (if available)
				 *	hostname (if available)
				 */
				uint64_t state;

				verify(nvlist_lookup_uint64(tmp,
				    ZPOOL_CONFIG_VERSION, &version) == 0);
				if (nvlist_add_uint64(config,
				    ZPOOL_CONFIG_VERSION, version) != 0)
					goto nomem;
				verify(nvlist_lookup_uint64(tmp,
				    ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
				if (nvlist_add_uint64(config,
				    ZPOOL_CONFIG_POOL_GUID, guid) != 0)
					goto nomem;
				verify(nvlist_lookup_string(tmp,
				    ZPOOL_CONFIG_POOL_NAME, &name) == 0);
				if (nvlist_add_string(config,
				    ZPOOL_CONFIG_POOL_NAME, name) != 0)
					goto nomem;
				verify(nvlist_lookup_uint64(tmp,
				    ZPOOL_CONFIG_POOL_STATE, &state) == 0);
				if (nvlist_add_uint64(config,
				    ZPOOL_CONFIG_POOL_STATE, state) != 0)
					goto nomem;
				hostid = 0;
				if (nvlist_lookup_uint64(tmp,
				    ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
					if (nvlist_add_uint64(config,
					    ZPOOL_CONFIG_HOSTID, hostid) != 0)
						goto nomem;
					verify(nvlist_lookup_string(tmp,
					    ZPOOL_CONFIG_HOSTNAME,
					    &hostname) == 0);
					if (nvlist_add_string(config,
					    ZPOOL_CONFIG_HOSTNAME,
					    hostname) != 0)
						goto nomem;
				}

				config_seen = B_TRUE;
			}

			/*
			 * Add this top-level vdev to the child array.
			 */
			verify(nvlist_lookup_nvlist(tmp,
			    ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
			verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
			    &id) == 0);

			if (id >= children) {
				nvlist_t **newchild;

				newchild = zfs_alloc(hdl, (id + 1) *
				    sizeof (nvlist_t *));
				if (newchild == NULL)
					goto nomem;

				for (c = 0; c < children; c++)
					newchild[c] = child[c];

				free(child);
				child = newchild;
				children = id + 1;
			}
			if (nvlist_dup(nvtop, &child[id], 0) != 0)
				goto nomem;

		}

		/*
		 * If we have information about all the top-levels then
		 * clean up the nvlist which we've constructed. This
		 * means removing any extraneous devices that are
		 * beyond the valid range or adding devices to the end
		 * of our array which appear to be missing.
		 */
		if (valid_top_config) {
			if (max_id < children) {
				for (c = max_id; c < children; c++)
					nvlist_free(child[c]);
				children = max_id;
			} else if (max_id > children) {
				nvlist_t **newchild;

				newchild = zfs_alloc(hdl, (max_id) *
				    sizeof (nvlist_t *));
				if (newchild == NULL)
					goto nomem;

				for (c = 0; c < children; c++)
					newchild[c] = child[c];

				free(child);
				child = newchild;
				children = max_id;
			}
		}

		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
		    &guid) == 0);

		/*
		 * The vdev namespace may contain holes as a result of
		 * device removal. We must add them back into the vdev
		 * tree before we process any missing devices.
		 */
		if (holes > 0) {
			ASSERT(valid_top_config);

			for (c = 0; c < children; c++) {
				nvlist_t *holey;

				if (child[c] != NULL ||
				    !vdev_is_hole(hole_array, holes, c))
					continue;

				if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
				    0) != 0)
					goto nomem;

				/*
				 * Holes in the namespace are treated as
				 * "hole" top-level vdevs and have a
				 * special flag set on them.
				 */
				if (nvlist_add_string(holey,
				    ZPOOL_CONFIG_TYPE,
				    VDEV_TYPE_HOLE) != 0 ||
				    nvlist_add_uint64(holey,
				    ZPOOL_CONFIG_ID, c) != 0 ||
				    nvlist_add_uint64(holey,
				    ZPOOL_CONFIG_GUID, 0ULL) != 0)
					goto nomem;
				child[c] = holey;
			}
		}

		/*
		 * Look for any missing top-level vdevs.  If this is the case,
		 * create a faked up 'missing' vdev as a placeholder.  We cannot
		 * simply compress the child array, because the kernel performs
		 * certain checks to make sure the vdev IDs match their location
		 * in the configuration.
		 */
		for (c = 0; c < children; c++) {
			if (child[c] == NULL) {
				nvlist_t *missing;
				if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
				    0) != 0)
					goto nomem;
				if (nvlist_add_string(missing,
				    ZPOOL_CONFIG_TYPE,
				    VDEV_TYPE_MISSING) != 0 ||
				    nvlist_add_uint64(missing,
				    ZPOOL_CONFIG_ID, c) != 0 ||
				    nvlist_add_uint64(missing,
				    ZPOOL_CONFIG_GUID, 0ULL) != 0) {
					nvlist_free(missing);
					goto nomem;
				}
				child[c] = missing;
			}
		}

		/*
		 * Put all of this pool's top-level vdevs into a root vdev.
		 */
		if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
			goto nomem;
		if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
		    VDEV_TYPE_ROOT) != 0 ||
		    nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
		    nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
		    nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
		    child, children) != 0) {
			nvlist_free(nvroot);
			goto nomem;
		}

		for (c = 0; c < children; c++)
			nvlist_free(child[c]);
		free(child);
		children = 0;
		child = NULL;

		/*
		 * Go through and fix up any paths and/or devids based on our
		 * known list of vdev GUID -> path mappings.
		 */
		if (fix_paths(nvroot, pl->names) != 0) {
			nvlist_free(nvroot);
			goto nomem;
		}

		/*
		 * Add the root vdev to this pool's configuration.
		 */
		if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
		    nvroot) != 0) {
			nvlist_free(nvroot);
			goto nomem;
		}
		nvlist_free(nvroot);

		/*
		 * zdb uses this path to report on active pools that were
		 * imported or created using -R.
		 */
		if (active_ok)
			goto add_pool;

		/*
		 * Determine if this pool is currently active, in which case we
		 * can't actually import it.
		 */
		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
		    &name) == 0);
		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
		    &guid) == 0);

		if (pool_active(hdl, name, guid, &isactive) != 0)
			goto error;

		if (isactive) {
			nvlist_free(config);
			config = NULL;
			continue;
		}

		if ((nvl = refresh_config(hdl, config)) == NULL) {
			nvlist_free(config);
			config = NULL;
			continue;
		}

		nvlist_free(config);
		config = nvl;

		/*
		 * Go through and update the paths for spares, now that we have
		 * them.
		 */
		verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
		    &nvroot) == 0);
		if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
		    &spares, &nspares) == 0) {
			for (i = 0; i < nspares; i++) {
				if (fix_paths(spares[i], pl->names) != 0)
					goto nomem;
			}
		}

		/*
		 * Update the paths for l2cache devices.
		 */
		if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
		    &l2cache, &nl2cache) == 0) {
			for (i = 0; i < nl2cache; i++) {
				if (fix_paths(l2cache[i], pl->names) != 0)
					goto nomem;
			}
		}

		/*
		 * Restore the original information read from the actual label.
		 */
		(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
		    DATA_TYPE_UINT64);
		(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
		    DATA_TYPE_STRING);
		if (hostid != 0) {
			verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
			    hostid) == 0);
			verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
			    hostname) == 0);
		}

add_pool:
		/*
		 * Add this pool to the list of configs.
		 */
		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
		    &name) == 0);
		if (nvlist_add_nvlist(ret, name, config) != 0)
			goto nomem;

		found_one = B_TRUE;
		nvlist_free(config);
		config = NULL;
	}

	if (!found_one) {
		nvlist_free(ret);
		ret = NULL;
	}

	return (ret);

nomem:
	(void) no_memory(hdl);
error:
	nvlist_free(config);
	nvlist_free(ret);
	for (c = 0; c < children; c++)
		nvlist_free(child[c]);
	free(child);

	return (NULL);
}
Beispiel #3
0
/*
 * The full semantics of this function are described in the comment above
 * lzc_release().
 *
 * To summarize:
 * Releases holds specified in the nvl holds.
 *
 * holds is nvl of snapname -> { holdname, ... }
 * errlist will be filled in with snapname -> error
 *
 * If tmpdp is not NULL the names for holds should be the dsobj's of snapshots,
 * otherwise they should be the names of shapshots.
 *
 * As a release may cause snapshots to be destroyed this trys to ensure they
 * aren't mounted.
 *
 * The release of non-existent holds are skipped.
 *
 * At least one hold must have been released for the this function to succeed
 * and return 0.
 */
static int
dsl_dataset_user_release_impl(nvlist_t *holds, nvlist_t *errlist,
    dsl_pool_t *tmpdp)
{
	dsl_dataset_user_release_arg_t ddura;
	nvpair_t *pair;
	char *pool;
	int error;

	pair = nvlist_next_nvpair(holds, NULL);
	if (pair == NULL)
		return (0);

	/*
	 * The release may cause snapshots to be destroyed; make sure they
	 * are not mounted.
	 */
	if (tmpdp != NULL) {
		/* Temporary holds are specified by dsobj string. */
		ddura.ddura_holdfunc = dsl_dataset_hold_obj_string;
		pool = spa_name(tmpdp->dp_spa);
#ifdef _KERNEL
		dsl_pool_config_enter(tmpdp, FTAG);
		for (pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
		    pair = nvlist_next_nvpair(holds, pair)) {
			dsl_dataset_t *ds;

			error = dsl_dataset_hold_obj_string(tmpdp,
			    nvpair_name(pair), FTAG, &ds);
			if (error == 0) {
				char name[MAXNAMELEN];
				dsl_dataset_name(ds, name);
				dsl_dataset_rele(ds, FTAG);
				(void) zfs_unmount_snap(name);
			}
		}
		dsl_pool_config_exit(tmpdp, FTAG);
#endif
	} else {
		/* Non-temporary holds are specified by name. */
		ddura.ddura_holdfunc = dsl_dataset_hold;
		pool = nvpair_name(pair);
#ifdef _KERNEL
		for (pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
		    pair = nvlist_next_nvpair(holds, pair)) {
			(void) zfs_unmount_snap(nvpair_name(pair));
		}
#endif
	}

	ddura.ddura_holds = holds;
	ddura.ddura_errlist = errlist;
	VERIFY0(nvlist_alloc(&ddura.ddura_todelete, NV_UNIQUE_NAME,
	    KM_PUSHPAGE));
	VERIFY0(nvlist_alloc(&ddura.ddura_chkholds, NV_UNIQUE_NAME,
	    KM_PUSHPAGE));

	error = dsl_sync_task(pool, dsl_dataset_user_release_check,
	    dsl_dataset_user_release_sync, &ddura,
	    fnvlist_num_pairs(holds));
	fnvlist_free(ddura.ddura_todelete);
	fnvlist_free(ddura.ddura_chkholds);

	return (error);
}
Beispiel #4
0
static cfga_sata_ret_t
setup_for_devctl_cmd(
    const char *ap_id,
    devctl_hdl_t *devctl_hdl,
    nvlist_t **user_nvlistp,
    uint_t oflag)
{

	uint_t	port;
	cfga_sata_ret_t	rv = CFGA_SATA_OK;
	char *lap_id, *pdyn;

	lap_id = strdup(ap_id);
	if (lap_id == NULL)
		return (CFGA_SATA_ALLOC_FAIL);
	if ((pdyn = GET_DYN(lap_id)) != NULL) {
		*pdyn = '\0';
	}

	/* Get a devctl handle to pass to the devctl_ap_XXX functions */
	if ((*devctl_hdl = devctl_ap_acquire((char *)lap_id, oflag)) == NULL) {
		(void) fprintf(stderr, "[libcfgadm:sata] "
		    "setup_for_devctl_cmd: devctl_ap_acquire failed: %s\n",
		    strerror(errno));
		rv = CFGA_SATA_DEVCTL;
		goto bailout;
	}

	/* Set up nvlist to pass the port number down to the driver */
	if (nvlist_alloc(user_nvlistp, NV_UNIQUE_NAME_TYPE, NULL) != 0) {
		*user_nvlistp = NULL;
		rv = CFGA_SATA_NVLIST;
		(void) printf("nvlist_alloc failed\n");
		goto bailout;
	}

	/*
	 * Get port id, for Port Multiplier port, things could be a little bit
	 * complicated because of "port.port" format in ap_id, thus for
	 * port multiplier port, port number should be coded as 32bit int
	 * with the sig 16 bit as sata channel number, least 16 bit as
	 * the port number of sata port multiplier port.
	 */
	if ((rv = get_port_num(lap_id, &port)) != CFGA_SATA_OK) {
		(void) printf(
		    "setup_for_devctl_cmd: get_port_num, errno: %d\n",
		    errno);
		goto bailout;
	}

	/* Creates an int32_t entry */
	if (nvlist_add_int32(*user_nvlistp, PORT, port) == -1) {
		(void) printf("nvlist_add_int32 failed\n");
		rv = CFGA_SATA_NVLIST;
		goto bailout;
	}

	free(lap_id);
	return (rv);

bailout:
	free(lap_id);
	(void) cleanup_after_devctl_cmd(*devctl_hdl, *user_nvlistp);

	return (rv);
}
static int
ippctl_mod_list_actions(
	char		*modname)
{
	ipp_mod_id_t	mid;
	nvlist_t	*nvlp;
	int		ipp_rc;
	int		rc = 0;
	ipp_action_id_t	*aid_array;
	char		**aname_array = NULL;
	int		nelt;
	int		length;
	int		i;

	/*
	 * Get the module id.
	 */

	mid = ipp_mod_lookup(modname);
	FREE_TEXT(modname);

	/*
	 * Get a list of all the action ids for the module. If that succeeds,
	 * translate the ids into names.
	 *
	 * NOTE: This translation may fail if an action is
	 * destroyed during this operation. If this occurs, EAGAIN
	 * will be passed back to libipp note that a transient
	 * problem occured.
	 */

	if ((ipp_rc = ipp_mod_list_actions(mid, &aid_array, &nelt)) == 0) {

		/*
		 * It is possible that there are no actions defined.
		 * (This is unlikely though as the module would normally
		 * be auto-unloaded fairly quickly)
		 */

		if (nelt > 0) {
			length = nelt * sizeof (char *);
			aname_array = kmem_zalloc(length, KM_SLEEP);

			for (i = 0; i < nelt; i++) {
				if (ipp_action_name(aid_array[i],
				    &aname_array[i]) != 0) {
					kmem_free(aid_array, nelt *
					    sizeof (ipp_action_id_t));
					FREE_TEXT_ARRAY(aname_array, nelt);
					ipp_rc = EAGAIN;
					goto done;
				}
			}

			kmem_free(aid_array, nelt * sizeof (ipp_action_id_t));

			if ((rc = nvlist_alloc(&nvlp, NV_UNIQUE_NAME,
			    KM_SLEEP)) != 0) {
				FREE_TEXT_ARRAY(aname_array, nelt);
				return (rc);
			}

			if ((rc = ippctl_attach_aname_array(nvlp, aname_array,
			    nelt)) != 0) {
				FREE_TEXT_ARRAY(aname_array, nelt);
				nvlist_free(nvlp);
				return (rc);
			}

			FREE_TEXT_ARRAY(aname_array, nelt);

			if ((rc = ippctl_callback(nvlp, NULL)) != 0) {
				nvlist_free(nvlp);
				return (rc);
			}

			nvlist_free(nvlp);
		}
	}

done:
	/*
	 * Add an nvlist containing the kernel return code to the
	 * set of nvlists to pass back to libipp.
	 */

	if ((rc = ippctl_set_rc(ipp_rc)) != 0)
		return (rc);

	return (0);
}
Beispiel #6
0
static int
dsl_dataset_user_release_check_one(dsl_dataset_user_release_arg_t *ddura,
    dsl_dataset_t *ds, nvlist_t *holds, const char *snapname)
{
	uint64_t zapobj;
	nvlist_t *holds_found;
	nvpair_t *pair;
	objset_t *mos;
	int numholds;

	if (!dsl_dataset_is_snapshot(ds))
		return (SET_ERROR(EINVAL));

	if (nvlist_empty(holds))
		return (0);

	numholds = 0;
	mos = ds->ds_dir->dd_pool->dp_meta_objset;
	zapobj = ds->ds_phys->ds_userrefs_obj;
	VERIFY0(nvlist_alloc(&holds_found, NV_UNIQUE_NAME, KM_PUSHPAGE));

	for (pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
	    pair = nvlist_next_nvpair(holds, pair)) {
		uint64_t tmp;
		int error;
		const char *holdname = nvpair_name(pair);

		if (zapobj != 0)
			error = zap_lookup(mos, zapobj, holdname, 8, 1, &tmp);
		else
			error = SET_ERROR(ENOENT);

		/*
		 * Non-existent holds are put on the errlist, but don't
		 * cause an overall failure.
		 */
		if (error == ENOENT) {
			if (ddura->ddura_errlist != NULL) {
				char *errtag = kmem_asprintf("%s#%s",
				    snapname, holdname);
				fnvlist_add_int32(ddura->ddura_errlist, errtag,
				    ENOENT);
				strfree(errtag);
			}
			continue;
		}

		if (error != 0) {
			fnvlist_free(holds_found);
			return (error);
		}

		fnvlist_add_boolean(holds_found, holdname);
		numholds++;
	}

	if (DS_IS_DEFER_DESTROY(ds) && ds->ds_phys->ds_num_children == 1 &&
	    ds->ds_userrefs == numholds) {
		/* we need to destroy the snapshot as well */
		if (dsl_dataset_long_held(ds)) {
			fnvlist_free(holds_found);
			return (SET_ERROR(EBUSY));
		}
		fnvlist_add_boolean(ddura->ddura_todelete, snapname);
	}

	if (numholds != 0) {
		fnvlist_add_nvlist(ddura->ddura_chkholds, snapname,
		    holds_found);
	}
	fnvlist_free(holds_found);

	return (0);
}
static int
ippctl_list_mods(
	void)
{
	nvlist_t	*nvlp;
	int		ipp_rc;
	int		rc = 0;
	ipp_mod_id_t	*mid_array;
	char		**modname_array = NULL;
	int		nelt;
	int		length;
	int		i;

	/*
	 * Get a list of all the module ids. If that succeeds,
	 * translate the ids into names.
	 *
	 * NOTE: This translation may fail if a module is
	 * unloaded during this operation. If this occurs, EAGAIN
	 * will be passed back to libipp note that a transient
	 * problem occured.
	 */

	if ((ipp_rc = ipp_list_mods(&mid_array, &nelt)) == 0) {

		/*
		 * It is possible that there are no modules
		 * registered.
		 */

		if (nelt > 0) {
			length = nelt * sizeof (char *);
			modname_array = kmem_zalloc(length, KM_SLEEP);

			for (i = 0; i < nelt; i++) {
				if (ipp_mod_name(mid_array[i],
				    &modname_array[i]) != 0) {
					kmem_free(mid_array, nelt *
					    sizeof (ipp_mod_id_t));
					FREE_TEXT_ARRAY(modname_array, nelt);
					ipp_rc = EAGAIN;
					goto done;
				}
			}

			kmem_free(mid_array, nelt * sizeof (ipp_mod_id_t));

			if ((rc = nvlist_alloc(&nvlp, NV_UNIQUE_NAME,
			    KM_SLEEP)) != 0) {
				FREE_TEXT_ARRAY(modname_array, nelt);
				return (rc);
			}

			if ((rc = ippctl_attach_modname_array(nvlp,
			    modname_array, nelt)) != 0) {
				FREE_TEXT_ARRAY(modname_array, nelt);
				nvlist_free(nvlp);
				return (rc);
			}

			FREE_TEXT_ARRAY(modname_array, nelt);

			if ((rc = ippctl_callback(nvlp, NULL)) != 0) {
				nvlist_free(nvlp);
				return (rc);
			}

			nvlist_free(nvlp);
		}
	}

done:
	/*
	 * Add an nvlist containing the kernel return code to the
	 * set of nvlists to pass back to libipp.
	 */

	if ((rc = ippctl_set_rc(ipp_rc)) != 0)
		return (rc);

	return (0);
}
Beispiel #8
0
/*ARGSUSED*/
static void
spa_history_log_sync(void *arg1, void *arg2, dmu_tx_t *tx)
{
	spa_t		*spa = arg1;
	history_arg_t	*hap = arg2;
	const char	*history_str = hap->ha_history_str;
	objset_t	*mos = spa->spa_meta_objset;
	dmu_buf_t	*dbp;
	spa_history_phys_t *shpp;
	size_t		reclen;
	uint64_t	le_len;
	nvlist_t	*nvrecord;
	char		*record_packed = NULL;
	int		ret;

	/*
	 * If we have an older pool that doesn't have a command
	 * history object, create it now.
	 */
	mutex_enter(&spa->spa_history_lock);
	if (!spa->spa_history)
		spa_history_create_obj(spa, tx);
	mutex_exit(&spa->spa_history_lock);

	/*
	 * Get the offset of where we need to write via the bonus buffer.
	 * Update the offset when the write completes.
	 */
	VERIFY(0 == dmu_bonus_hold(mos, spa->spa_history, FTAG, &dbp));
	shpp = dbp->db_data;

	dmu_buf_will_dirty(dbp, tx);

#ifdef ZFS_DEBUG
	{
		dmu_object_info_t doi;
		dmu_object_info_from_db(dbp, &doi);
		ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_SPA_HISTORY_OFFSETS);
	}
#endif

	VERIFY(nvlist_alloc(&nvrecord, NV_UNIQUE_NAME, KM_PUSHPAGE) == 0);
	VERIFY(nvlist_add_uint64(nvrecord, ZPOOL_HIST_TIME,
	    gethrestime_sec()) == 0);
	VERIFY(nvlist_add_uint64(nvrecord, ZPOOL_HIST_WHO, hap->ha_uid) == 0);
	if (hap->ha_zone != NULL)
		VERIFY(nvlist_add_string(nvrecord, ZPOOL_HIST_ZONE,
		    hap->ha_zone) == 0);
#ifdef _KERNEL
	VERIFY(nvlist_add_string(nvrecord, ZPOOL_HIST_HOST,
	    utsname.nodename) == 0);
#endif
	if (hap->ha_log_type == LOG_CMD_POOL_CREATE ||
	    hap->ha_log_type == LOG_CMD_NORMAL) {
		VERIFY(nvlist_add_string(nvrecord, ZPOOL_HIST_CMD,
		    history_str) == 0);

		zfs_dbgmsg("command: %s", history_str);
	} else {
		VERIFY(nvlist_add_uint64(nvrecord, ZPOOL_HIST_INT_EVENT,
		    hap->ha_event) == 0);
		VERIFY(nvlist_add_uint64(nvrecord, ZPOOL_HIST_TXG,
		    tx->tx_txg) == 0);
		VERIFY(nvlist_add_string(nvrecord, ZPOOL_HIST_INT_STR,
		    history_str) == 0);

		zfs_dbgmsg("internal %s pool:%s txg:%llu %s",
		    zfs_history_event_names[hap->ha_event], spa_name(spa),
		    (longlong_t)tx->tx_txg, history_str);

	}

	VERIFY(nvlist_size(nvrecord, &reclen, NV_ENCODE_XDR) == 0);
	record_packed = kmem_alloc(reclen, KM_PUSHPAGE);

	VERIFY(nvlist_pack(nvrecord, &record_packed, &reclen,
	    NV_ENCODE_XDR, KM_PUSHPAGE) == 0);

	mutex_enter(&spa->spa_history_lock);
	if (hap->ha_log_type == LOG_CMD_POOL_CREATE)
		VERIFY(shpp->sh_eof == shpp->sh_pool_create_len);

	/* write out the packed length as little endian */
	le_len = LE_64((uint64_t)reclen);
	ret = spa_history_write(spa, &le_len, sizeof (le_len), shpp, tx);
	if (!ret)
		ret = spa_history_write(spa, record_packed, reclen, shpp, tx);

	if (!ret && hap->ha_log_type == LOG_CMD_POOL_CREATE) {
		shpp->sh_pool_create_len += sizeof (le_len) + reclen;
		shpp->sh_bof = shpp->sh_pool_create_len;
	}

	mutex_exit(&spa->spa_history_lock);
	nvlist_free(nvrecord);
	kmem_free(record_packed, reclen);
	dmu_buf_rele(dbp, FTAG);

	strfree(hap->ha_history_str);
	if (hap->ha_zone != NULL)
		strfree(hap->ha_zone);
	kmem_free(hap, sizeof (history_arg_t));
}
Beispiel #9
0
nvlist_t *
drive_get_stats(descriptor_t *dp, int stat_type, int *errp)
{
	disk_t		*diskp;
	nvlist_t	*stats;

	diskp = dp->p.disk;

	if (nvlist_alloc(&stats, NVATTRS, 0) != 0) {
	    *errp = ENOMEM;
	    return (NULL);
	}

	if (stat_type == DM_DRV_STAT_PERFORMANCE ||
	    stat_type == DM_DRV_STAT_DIAGNOSTIC) {

	    alias_t	*ap;
	    kstat_ctl_t	*kc;

	    ap = diskp->aliases;
	    if (ap == NULL || ap->kstat_name == NULL) {
		nvlist_free(stats);
		*errp = EACCES;
		return (NULL);
	    }

	    if ((kc = kstat_open()) == NULL) {
		nvlist_free(stats);
		*errp = EACCES;
		return (NULL);
	    }

	    while (ap != NULL) {
		int	status;

		if (ap->kstat_name == NULL) {
		    continue;
		}

		if (stat_type == DM_DRV_STAT_PERFORMANCE) {
		    status = get_io_kstats(kc, ap->kstat_name, stats);
		} else {
		    status = get_err_kstats(kc, ap->kstat_name, stats);
		}

		if (status != 0) {
		    nvlist_free(stats);
		    (void) kstat_close(kc);
		    *errp = ENOMEM;
		    return (NULL);
		}

		ap = ap->next;
	    }

	    (void) kstat_close(kc);

	    *errp = 0;
	    return (stats);
	}

	if (stat_type == DM_DRV_STAT_TEMPERATURE) {
	    int		fd;

	    if ((fd = drive_open_disk(diskp, NULL, 0)) >= 0) {
		struct dk_temperature	temp;

		if (ioctl(fd, DKIOCGTEMPERATURE, &temp) >= 0) {
		    if (nvlist_add_uint32(stats, DM_TEMPERATURE,
			temp.dkt_cur_temp) != 0) {
			*errp = ENOMEM;
			nvlist_free(stats);
			return (NULL);
		    }
		} else {
		    *errp = errno;
		    nvlist_free(stats);
		    return (NULL);
		}
		(void) close(fd);
	    } else {
		*errp = errno;
		nvlist_free(stats);
		return (NULL);
	    }

	    *errp = 0;
	    return (stats);
	}

	nvlist_free(stats);
	*errp = EINVAL;
	return (NULL);
}
Beispiel #10
0
/*
 * sync out AVL trees to persistent storage.
 */
void
zfs_fuid_sync(zfs_sb_t *zsb, dmu_tx_t *tx)
{
	nvlist_t *nvp;
	nvlist_t **fuids;
	size_t nvsize = 0;
	char *packed;
	dmu_buf_t *db;
	fuid_domain_t *domnode;
	int numnodes;
	int i;

	if (!zsb->z_fuid_dirty) {
		return;
	}

	rw_enter(&zsb->z_fuid_lock, RW_WRITER);

	/*
	 * First see if table needs to be created?
	 */
	if (zsb->z_fuid_obj == 0) {
		zsb->z_fuid_obj = dmu_object_alloc(zsb->z_os,
		    DMU_OT_FUID, 1 << 14, DMU_OT_FUID_SIZE,
		    sizeof (uint64_t), tx);
		VERIFY(zap_add(zsb->z_os, MASTER_NODE_OBJ,
		    ZFS_FUID_TABLES, sizeof (uint64_t), 1,
		    &zsb->z_fuid_obj, tx) == 0);
	}

	VERIFY(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);

	numnodes = avl_numnodes(&zsb->z_fuid_idx);
	fuids = kmem_alloc(numnodes * sizeof (void *), KM_SLEEP);
	for (i = 0, domnode = avl_first(&zsb->z_fuid_domain); domnode; i++,
	    domnode = AVL_NEXT(&zsb->z_fuid_domain, domnode)) {
		VERIFY(nvlist_alloc(&fuids[i], NV_UNIQUE_NAME, KM_SLEEP) == 0);
		VERIFY(nvlist_add_uint64(fuids[i], FUID_IDX,
		    domnode->f_idx) == 0);
		VERIFY(nvlist_add_uint64(fuids[i], FUID_OFFSET, 0) == 0);
		VERIFY(nvlist_add_string(fuids[i], FUID_DOMAIN,
		    domnode->f_ksid->kd_name) == 0);
	}
	VERIFY(nvlist_add_nvlist_array(nvp, FUID_NVP_ARRAY,
	    fuids, numnodes) == 0);
	for (i = 0; i != numnodes; i++)
		nvlist_free(fuids[i]);
	kmem_free(fuids, numnodes * sizeof (void *));
	VERIFY(nvlist_size(nvp, &nvsize, NV_ENCODE_XDR) == 0);
	packed = kmem_alloc(nvsize, KM_SLEEP);
	VERIFY(nvlist_pack(nvp, &packed, &nvsize,
	    NV_ENCODE_XDR, KM_SLEEP) == 0);
	nvlist_free(nvp);
	zsb->z_fuid_size = nvsize;
	dmu_write(zsb->z_os, zsb->z_fuid_obj, 0, zsb->z_fuid_size, packed, tx);
	kmem_free(packed, zsb->z_fuid_size);
	VERIFY(0 == dmu_bonus_hold(zsb->z_os, zsb->z_fuid_obj,
	    FTAG, &db));
	dmu_buf_will_dirty(db, tx);
	*(uint64_t *)db->db_data = zsb->z_fuid_size;
	dmu_buf_rele(db, FTAG);

	zsb->z_fuid_dirty = B_FALSE;
	rw_exit(&zsb->z_fuid_lock);
}
Beispiel #11
0
/*
 * Create the storage dirs, and pass the path list to the kernel.
 * This requires the nfssrv module to be loaded; the _nfssys() syscall
 * will fail ENOTSUP if it is not.
 * Use libnvpair(3LIB) to pass the data to the kernel.
 */
static int
dss_init(uint_t npaths, char **pathnames)
{
	int i, j, nskipped, error;
	char *bufp;
	uint32_t bufsize;
	size_t buflen;
	nvlist_t *nvl;

	if (npaths > 1) {
		/*
		 * We need to remove duplicate paths; this might be user error
		 * in the general case, but HA-NFSv4 can also cause this.
		 * Sort the pathnames array, and NULL out duplicates,
		 * then write the non-NULL entries to a new array.
		 * Sorting will also allow the kernel to optimise its searches.
		 */

		qsort(pathnames, npaths, sizeof (char *), qstrcmp);

		/* now NULL out any duplicates */
		i = 0; j = 1; nskipped = 0;
		while (j < npaths) {
			if (strcmp(pathnames[i], pathnames[j]) == NULL) {
				pathnames[j] = NULL;
				j++;
				nskipped++;
				continue;
			}

			/* skip i over any of its NULLed duplicates */
			i = j++;
		}

		/* finally, write the non-NULL entries to a new array */
		if (nskipped > 0) {
			int nreal;
			size_t sz;
			char **tmp_pathnames;

			nreal = npaths - nskipped;

			sz = nreal * sizeof (char *);
			tmp_pathnames = (char **)malloc(sz);
			if (tmp_pathnames == NULL) {
				fprintf(stderr, "tmp_pathnames malloc "
				    "failed\n");
				exit(1);
			}

			for (i = 0, j = 0; i < npaths; i++)
				if (pathnames[i] != NULL)
					tmp_pathnames[j++] = pathnames[i];
			free(pathnames);
			pathnames = tmp_pathnames;
			npaths = nreal;
		}

	}

	/* Create directories to store the distributed state files */
	dss_mkleafdirs(npaths, pathnames);

	/* Create the name-value pair list */
	error = nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0);
	if (error) {
		fprintf(stderr, "nvlist_alloc failed: %s\n", strerror(errno));
		return (1);
	}

	/* Add the pathnames array as a single name-value pair */
	error = nvlist_add_string_array(nvl, NFS4_DSS_NVPAIR_NAME,
	    pathnames, npaths);
	if (error) {
		fprintf(stderr, "nvlist_add_string_array failed: %s\n",
		    strerror(errno));
		nvlist_free(nvl);
		return (1);
	}

	/*
	 * Pack list into contiguous memory, for passing to kernel.
	 * nvlist_pack() will allocate the memory for the buffer,
	 * which we should free() when no longer needed.
	 * NV_ENCODE_XDR for safety across ILP32/LP64 kernel boundary.
	 */
	bufp = NULL;
	error = nvlist_pack(nvl, &bufp, &buflen, NV_ENCODE_XDR, 0);
	if (error) {
		fprintf(stderr, "nvlist_pack failed: %s\n", strerror(errno));
		nvlist_free(nvl);
		return (1);
	}

	/* Now we have the packed buffer, we no longer need the list */
	nvlist_free(nvl);

	/*
	 * Let the kernel know in advance how big the buffer is.
	 * NOTE: we cannot just pass buflen, since size_t is a long, and
	 * thus a different size between ILP32 userland and LP64 kernel.
	 * Use an int for the transfer, since that should be big enough;
	 * this is a no-op at the moment, here, since nfsd is 32-bit, but
	 * that could change.
	 */
	bufsize = (uint32_t)buflen;
	error = _nfssys(NFS4_DSS_SETPATHS_SIZE, &bufsize);
	if (error) {
		fprintf(stderr,
		    "_nfssys(NFS4_DSS_SETPATHS_SIZE) failed: %s\n",
		    strerror(errno));
		free(bufp);
		return (1);
	}

	/* Pass the packed buffer to the kernel */
	error = _nfssys(NFS4_DSS_SETPATHS, bufp);
	if (error) {
		fprintf(stderr,
		    "_nfssys(NFS4_DSS_SETPATHS) failed: %s\n", strerror(errno));
		free(bufp);
		return (1);
	}

	/*
	 * The kernel has now unpacked the buffer and extracted the
	 * pathnames array, we no longer need the buffer.
	 */
	free(bufp);

	return (0);
}