Exemple #1
0
int
fmd_fmri_replaced(nvlist_t *nvl)
{
	uint64_t pool_guid, vdev_guid;
	cbdata_t cb;
	int ret;

	(void) nvlist_lookup_uint64(nvl, FM_FMRI_ZFS_POOL, &pool_guid);

	cb.cb_guid = pool_guid;
	cb.cb_pool = NULL;

	if (zpool_iter(g_zfs, find_pool, &cb) != 1)
		return (FMD_OBJ_STATE_NOT_PRESENT);

	if (nvlist_lookup_uint64(nvl, FM_FMRI_ZFS_VDEV, &vdev_guid) != 0) {
		zpool_close(cb.cb_pool);
		return (FMD_OBJ_STATE_STILL_PRESENT);
	}

	ret = (find_vdev(cb.cb_pool, vdev_guid) != NULL) ?
	    FMD_OBJ_STATE_STILL_PRESENT : FMD_OBJ_STATE_NOT_PRESENT;

	zpool_close(cb.cb_pool);

	return (ret);
}
Exemple #2
0
int
fmd_fmri_present(nvlist_t *nvl)
{
	uint64_t pool_guid, vdev_guid;
	cbdata_t cb;
	int ret;

	(void) nvlist_lookup_uint64(nvl, FM_FMRI_ZFS_POOL, &pool_guid);

	cb.cb_guid = pool_guid;
	cb.cb_pool = NULL;

	if (zpool_iter(g_zfs, find_pool, &cb) != 1)
		return (0);

	if (nvlist_lookup_uint64(nvl, FM_FMRI_ZFS_VDEV, &vdev_guid) != 0) {
		zpool_close(cb.cb_pool);
		return (1);
	}

	ret = (find_vdev(cb.cb_pool, vdev_guid) != NULL);

	zpool_close(cb.cb_pool);

	return (ret);
}
Exemple #3
0
int
zfs_deliver_dle(nvlist_t *nvl)
{
	char *devname;
	if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devname) != 0) {
		syseventd_print(9, "zfs_deliver_event: no physpath\n");
		return (-1);
	}
	if (strncmp(devname, DEVICE_PREFIX, strlen(DEVICE_PREFIX)) != 0) {
		syseventd_print(9, "zfs_deliver_event: invalid "
		    "device '%s'", devname);
		return (-1);
	}

	/*
	 * We try to find the device using the physical
	 * path that has been supplied. We need to strip off
	 * the /devices prefix before starting our search.
	 */
	devname += strlen(DEVICE_PREFIX);
	if (zpool_iter(g_zfshdl, zfsdle_vdev_online, devname) != 1) {
		syseventd_print(9, "zfs_deliver_event: device '%s' not"
		    " found\n", devname);
		return (1);
	}
	return (0);
}
Exemple #4
0
/*
 * Given a /devices path, lookup the corresponding devid for each minor node,
 * and find any vdevs with matching devids.  Doing this straight up would be
 * rather inefficient, O(minor nodes * vdevs in system), so we take advantage of
 * the fact that each devid ends with "/<minornode>".  Once we find any valid
 * minor node, we chop off the portion after the last slash, and then search for
 * matching vdevs, which is O(vdevs in system).
 */
static boolean_t
devid_iter(const char *devpath, zfs_process_func_t func, boolean_t wholedisk)
{
	size_t len = strlen(devpath) + sizeof ("/devices") +
	    sizeof (PHYS_PATH) - 1;
	char *fullpath;
	int fd;
	ddi_devid_t devid;
	char *devidstr, *fulldevid;
	dev_data_t data = { 0 };

	/*
	 * Try to open a known minor node.
	 */
	fullpath = alloca(len);
	(void) snprintf(fullpath, len, "/devices%s%s", devpath, PHYS_PATH);
	if ((fd = open(fullpath, O_RDONLY)) < 0)
		return (B_FALSE);

	/*
	 * Determine the devid as a string, with no trailing slash for the minor
	 * node.
	 */
	if (devid_get(fd, &devid) != 0) {
		(void) close(fd);
		return (B_FALSE);
	}
	(void) close(fd);

	if ((devidstr = devid_str_encode(devid, NULL)) == NULL) {
		devid_free(devid);
		return (B_FALSE);
	}

	len = strlen(devidstr) + 2;
	fulldevid = alloca(len);
	(void) snprintf(fulldevid, len, "%s/", devidstr);

	data.dd_compare = fulldevid;
	data.dd_func = func;
	data.dd_prop = ZPOOL_CONFIG_DEVID;
	data.dd_found = B_FALSE;
	data.dd_isdisk = wholedisk;

	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);

	devid_str_free(devidstr);
	devid_free(devid);

	return (data.dd_found);
}
Exemple #5
0
nvlist_t *
zpool_search_import(libzfs_handle_t *hdl, importargs_t *import)
{
	verify(import->poolname == NULL || import->guid == 0);

	if (import->unique)
		import->exists = zpool_iter(hdl, name_or_guid_exists, import);

	if (import->cachefile != NULL)
		return (zpool_find_import_cached(hdl, import->cachefile,
		    import->poolname, import->guid));

	return (zpool_find_import_impl(hdl, import));
}
Exemple #6
0
/*
 * Given a physical device path, iterate over all (pool, vdev) pairs which
 * correspond to the given path.
 */
static boolean_t
devpath_iter(const char *devpath, zfs_process_func_t func, boolean_t wholedisk)
{
	dev_data_t data = { 0 };

	data.dd_compare = devpath;
	data.dd_func = func;
	data.dd_prop = ZPOOL_CONFIG_PHYS_PATH;
	data.dd_found = B_FALSE;
	data.dd_isdisk = wholedisk;

	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);

	return (data.dd_found);
}
Exemple #7
0
/*
 * Given a device identifier, find any vdevs with a matching devid.
 * On Linux we can match devid directly which is always a whole disk.
 */
static boolean_t
devid_iter(const char *devid, zfs_process_func_t func, boolean_t is_slice)
{
	dev_data_t data = { 0 };

	data.dd_compare = devid;
	data.dd_func = func;
	data.dd_prop = ZPOOL_CONFIG_DEVID;
	data.dd_found = B_FALSE;
	data.dd_islabeled = is_slice;
	data.dd_new_devid = devid;

	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);

	return (data.dd_found);
}
Exemple #8
0
/*
 * Given a physical device location, iterate over all
 * (pool, vdev) pairs which correspond to that location.
 */
static boolean_t
devphys_iter(const char *physical, const char *devid, zfs_process_func_t func,
    boolean_t is_slice)
{
	dev_data_t data = { 0 };

	data.dd_compare = physical;
	data.dd_func = func;
	data.dd_prop = ZPOOL_CONFIG_PHYS_PATH;
	data.dd_found = B_FALSE;
	data.dd_islabeled = is_slice;
	data.dd_new_devid = devid;	/* used by auto replace code */

	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);

	return (data.dd_found);
}
Exemple #9
0
static void
zfs_purge_cases(fmd_hdl_t *hdl)
{
	zfs_case_t *zcp;
	uu_list_walk_t *walk;
	libzfs_handle_t *zhdl = fmd_hdl_getspecific(hdl);

	/*
	 * There is no way to open a pool by GUID, or lookup a vdev by GUID.  No
	 * matter what we do, we're going to have to stomach an O(vdevs * cases)
	 * algorithm.  In reality, both quantities are likely so small that
	 * neither will matter. Given that iterating over pools is more
	 * expensive than iterating over the in-memory case list, we opt for a
	 * 'present' flag in each case that starts off cleared.  We then iterate
	 * over all pools, marking those that are still present, and removing
	 * those that aren't found.
	 *
	 * Note that we could also construct an FMRI and rely on
	 * fmd_nvl_fmri_present(), but this would end up doing the same search.
	 */

	/*
	 * Mark the cases as not present.
	 */
	for (zcp = uu_list_first(zfs_cases); zcp != NULL;
	    zcp = uu_list_next(zfs_cases, zcp))
		zcp->zc_present = B_FALSE;

	/*
	 * Iterate over all pools and mark the pools and vdevs found.  If this
	 * fails (most probably because we're out of memory), then don't close
	 * any of the cases and we cannot be sure they are accurate.
	 */
	if (zpool_iter(zhdl, zfs_mark_pool, NULL) != 0)
		return;

	/*
	 * Remove those cases which were not found.
	 */
	walk = uu_list_walk_start(zfs_cases, UU_WALK_ROBUST);
	while ((zcp = uu_list_walk_next(walk)) != NULL) {
		if (!zcp->zc_present)
			fmd_case_close(hdl, zcp->zc_case);
	}
	uu_list_walk_end(walk);
}
Exemple #10
0
/*
 * This function handles the ESC_DEV_DLE event.
 */
static int
zfs_deliver_dle(nvlist_t *nvl)
{
    char *devname;

    if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devname) != 0) {
        zed_log_msg(LOG_INFO, "zfs_deliver_event: no physpath");
        return (-1);
    }

    if (zpool_iter(g_zfshdl, zfsdle_vdev_online, devname) != 1) {
        zed_log_msg(LOG_INFO, "zfs_deliver_event: device '%s' not "
                    "found", devname);
        return (1);
    }
    return (0);
}
Exemple #11
0
/*
 * Called when we receive a VDEV_CHECK event, which indicates a device could not
 * be opened during initial pool open, but the autoreplace property was set on
 * the pool.  In this case, we treat it as if it were an add event.
 */
static int
zfs_deliver_check(nvlist_t *nvl)
{
	dev_data_t data = { 0 };

	if (nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID,
	    &data.dd_pool_guid) != 0 ||
	    nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID,
	    &data.dd_vdev_guid) != 0 ||
	    data.dd_vdev_guid == 0)
		return (0);

	data.dd_isdisk = B_TRUE;
	data.dd_func = zfs_process_add;

	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);

	return (0);
}
Exemple #12
0
/*
 * Called when we receive a VDEV_CHECK event, which indicates a device could not
 * be opened during initial pool open, but the autoreplace property was set on
 * the pool.  In this case, we treat it as if it were an add event.
 */
static int
zfs_deliver_check(nvlist_t *nvl)
{
	dev_data_t data = { 0 };

	if (nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID,
	    &data.dd_pool_guid) != 0 ||
	    nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID,
	    &data.dd_vdev_guid) != 0 ||
	    data.dd_vdev_guid == 0)
		return (0);

	zed_log_msg(LOG_INFO, "zfs_deliver_check: pool '%llu', vdev %llu",
	    data.dd_pool_guid, data.dd_vdev_guid);

	data.dd_func = zfs_process_add;

	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);

	return (0);
}
Exemple #13
0
/*
 * Create a list of pools based on the given arguments.  If we're given no
 * arguments, then iterate over all pools in the system and add them to the AVL
 * tree.  Otherwise, add only those pool explicitly specified on the command
 * line.
 */
zpool_list_t *
pool_list_get(int argc, char **argv, zprop_list_t **proplist, int *err)
{
	zpool_list_t *zlp;

	zlp = safe_malloc(sizeof (zpool_list_t));

	zlp->zl_pool = uu_avl_pool_create("zfs_pool", sizeof (zpool_node_t),
	    offsetof(zpool_node_t, zn_avlnode), zpool_compare, UU_DEFAULT);

	if (zlp->zl_pool == NULL)
		zpool_no_memory();

	if ((zlp->zl_avl = uu_avl_create(zlp->zl_pool, NULL,
	    UU_DEFAULT)) == NULL)
		zpool_no_memory();

	zlp->zl_proplist = proplist;

	if (argc == 0) {
		(void) zpool_iter(g_zfs, add_pool, zlp);
		zlp->zl_findall = B_TRUE;
	} else {
		int i;

		for (i = 0; i < argc; i++) {
			zpool_handle_t *zhp;

			if ((zhp = zpool_open_canfail(g_zfs, argv[i])) !=
			    NULL) {
				if (add_pool(zhp, zlp) != 0)
					*err = B_TRUE;
			} else {
				*err = B_TRUE;
			}
		}
	}

	return (zlp);
}
Exemple #14
0
int
fmd_fmri_unusable(nvlist_t *nvl)
{
	uint64_t pool_guid, vdev_guid;
	cbdata_t cb;
	nvlist_t *vd;
	int ret;

	(void) nvlist_lookup_uint64(nvl, FM_FMRI_ZFS_POOL, &pool_guid);

	cb.cb_guid = pool_guid;
	cb.cb_pool = NULL;

	if (zpool_iter(g_zfs, find_pool, &cb) != 1)
		return (1);

	if (nvlist_lookup_uint64(nvl, FM_FMRI_ZFS_VDEV, &vdev_guid) != 0) {
		ret = (zpool_get_state(cb.cb_pool) == POOL_STATE_UNAVAIL);
		zpool_close(cb.cb_pool);
		return (ret);
	}

	vd = find_vdev(cb.cb_pool, vdev_guid);
	if (vd == NULL) {
		ret = 1;
	} else {
		vdev_stat_t *vs;
		uint_t c;

		(void) nvlist_lookup_uint64_array(vd, ZPOOL_CONFIG_STATS,
		    (uint64_t **)&vs, &c);

		ret = (vs->vs_state < VDEV_STATE_DEGRADED);
	}

	zpool_close(cb.cb_pool);

	return (ret);
}
Exemple #15
0
/*
 * This function handles the ESC_DEV_DLE device change event.  Use the
 * provided vdev guid when looking up a disk or partition, when the guid
 * is not present assume the entire disk is owned by ZFS and append the
 * expected -part1 partition information then lookup by physical path.
 */
static int
zfs_deliver_dle(nvlist_t *nvl)
{
	char *devname, name[MAXPATHLEN];
	uint64_t guid;

	if (nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID, &guid) == 0) {
		sprintf(name, "%llu", (u_longlong_t)guid);
	} else if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devname) == 0) {
		strlcpy(name, devname, MAXPATHLEN);
		zfs_append_partition(name, MAXPATHLEN);
	} else {
		zed_log_msg(LOG_INFO, "zfs_deliver_dle: no guid or physpath");
	}

	if (zpool_iter(g_zfshdl, zfsdle_vdev_online, name) != 1) {
		zed_log_msg(LOG_INFO, "zfs_deliver_dle: device '%s' not "
		    "found", name);
		return (1);
	}

	return (0);
}
Exemple #16
0
ssize_t
fmd_fmri_nvl2str(nvlist_t *nvl, char *buf, size_t buflen)
{
	uint64_t pool_guid, vdev_guid;
	cbdata_t cb;
	ssize_t len;
	const char *name;
	char guidbuf[64];

	(void) nvlist_lookup_uint64(nvl, FM_FMRI_ZFS_POOL, &pool_guid);

	/*
	 * Attempt to convert the pool guid to a name.
	 */
	cb.cb_guid = pool_guid;
	cb.cb_pool = NULL;

	if (zpool_iter(g_zfs, find_pool, &cb) == 1) {
		name = zpool_get_name(cb.cb_pool);
	} else {
		(void) snprintf(guidbuf, sizeof (guidbuf), "%llx", pool_guid);
		name = guidbuf;
	}

	if (nvlist_lookup_uint64(nvl, FM_FMRI_ZFS_VDEV, &vdev_guid) == 0)
		len = snprintf(buf, buflen, "%s://pool=%s/vdev=%llx",
		    FM_FMRI_SCHEME_ZFS, name, vdev_guid);
	else
		len = snprintf(buf, buflen, "%s://pool=%s",
		    FM_FMRI_SCHEME_ZFS, name);

	if (cb.cb_pool)
		zpool_close(cb.cb_pool);

	return (len);
}
Exemple #17
0
/*
 * Search for any new pools, adding them to the list.  We only add pools when no
 * options were given on the command line.  Otherwise, we keep the list fixed as
 * those that were explicitly specified.
 */
void
pool_list_update(zpool_list_t *zlp)
{
	if (zlp->zl_findall)
		(void) zpool_iter(g_zfs, add_pool, zlp);
}
Exemple #18
0
/*
 * Determines if the pool is in use.  If so, it returns true and the state of
 * the pool as well as the name of the pool.  Both strings are allocated and
 * must be freed by the caller.
 */
int
zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
    boolean_t *inuse)
{
	nvlist_t *config;
	char *name;
	boolean_t ret;
	uint64_t guid, vdev_guid;
	zpool_handle_t *zhp;
	nvlist_t *pool_config;
	uint64_t stateval, isspare;
	aux_cbdata_t cb = { 0 };
	boolean_t isactive;

	*inuse = B_FALSE;

	if (zpool_read_label(fd, &config, NULL) != 0 && errno == ENOMEM) {
		(void) no_memory(hdl);
		return (-1);
	}

	if (config == NULL)
		return (0);

	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
	    &stateval) == 0);
	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
	    &vdev_guid) == 0);

	if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
		    &name) == 0);
		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
		    &guid) == 0);
	}

	switch (stateval) {
	case POOL_STATE_EXPORTED:
		/*
		 * A pool with an exported state may in fact be imported
		 * read-only, so check the in-core state to see if it's
		 * active and imported read-only.  If it is, set
		 * its state to active.
		 */
		if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
		    (zhp = zpool_open_canfail(hdl, name)) != NULL) {
			if (zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
				stateval = POOL_STATE_ACTIVE;

			/*
			 * All we needed the zpool handle for is the
			 * readonly prop check.
			 */
			zpool_close(zhp);
		}

		ret = B_TRUE;
		break;

	case POOL_STATE_ACTIVE:
		/*
		 * For an active pool, we have to determine if it's really part
		 * of a currently active pool (in which case the pool will exist
		 * and the guid will be the same), or whether it's part of an
		 * active pool that was disconnected without being explicitly
		 * exported.
		 */
		if (pool_active(hdl, name, guid, &isactive) != 0) {
			nvlist_free(config);
			return (-1);
		}

		if (isactive) {
			/*
			 * Because the device may have been removed while
			 * offlined, we only report it as active if the vdev is
			 * still present in the config.  Otherwise, pretend like
			 * it's not in use.
			 */
			if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
			    (pool_config = zpool_get_config(zhp, NULL))
			    != NULL) {
				nvlist_t *nvroot;

				verify(nvlist_lookup_nvlist(pool_config,
				    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
				ret = find_guid(nvroot, vdev_guid);
			} else {
				ret = B_FALSE;
			}

			/*
			 * If this is an active spare within another pool, we
			 * treat it like an unused hot spare.  This allows the
			 * user to create a pool with a hot spare that currently
			 * in use within another pool.  Since we return B_TRUE,
			 * libdiskmgt will continue to prevent generic consumers
			 * from using the device.
			 */
			if (ret && nvlist_lookup_uint64(config,
			    ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
				stateval = POOL_STATE_SPARE;

			if (zhp != NULL)
				zpool_close(zhp);
		} else {
			stateval = POOL_STATE_POTENTIALLY_ACTIVE;
			ret = B_TRUE;
		}
		break;

	case POOL_STATE_SPARE:
		/*
		 * For a hot spare, it can be either definitively in use, or
		 * potentially active.  To determine if it's in use, we iterate
		 * over all pools in the system and search for one with a spare
		 * with a matching guid.
		 *
		 * Due to the shared nature of spares, we don't actually report
		 * the potentially active case as in use.  This means the user
		 * can freely create pools on the hot spares of exported pools,
		 * but to do otherwise makes the resulting code complicated, and
		 * we end up having to deal with this case anyway.
		 */
		cb.cb_zhp = NULL;
		cb.cb_guid = vdev_guid;
		cb.cb_type = ZPOOL_CONFIG_SPARES;
		if (zpool_iter(hdl, find_aux, &cb) == 1) {
			name = (char *)zpool_get_name(cb.cb_zhp);
			ret = B_TRUE;
		} else {
			ret = B_FALSE;
		}
		break;

	case POOL_STATE_L2CACHE:

		/*
		 * Check if any pool is currently using this l2cache device.
		 */
		cb.cb_zhp = NULL;
		cb.cb_guid = vdev_guid;
		cb.cb_type = ZPOOL_CONFIG_L2CACHE;
		if (zpool_iter(hdl, find_aux, &cb) == 1) {
			name = (char *)zpool_get_name(cb.cb_zhp);
			ret = B_TRUE;
		} else {
			ret = B_FALSE;
		}
		break;

	default:
		ret = B_FALSE;
	}


	if (ret) {
		if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
			if (cb.cb_zhp)
				zpool_close(cb.cb_zhp);
			nvlist_free(config);
			return (-1);
		}
		*state = (pool_state_t)stateval;
	}

	if (cb.cb_zhp)
		zpool_close(cb.cb_zhp);

	nvlist_free(config);
	*inuse = ret;
	return (0);
}