Beispiel #1
0
static int
zpool_find_load_time(zpool_handle_t *zhp, void *arg)
{
	struct load_time_arg *lta = arg;
	uint64_t pool_guid;
	uint64_t *tod;
	nvlist_t *config;
	uint_t nelem;

	if (lta->lt_found)
		return (0);

	pool_guid = zpool_get_prop_int(zhp, ZPOOL_PROP_GUID, NULL);
	if (pool_guid != lta->lt_guid)
		return (0);

	if ((config = zpool_get_config(zhp, NULL)) == NULL) {
		zpool_close(zhp);
		return (-1);
	}

	if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_LOADED_TIME,
	    &tod, &nelem) == 0 && nelem == 2) {
		lta->lt_found = B_TRUE;
		lta->lt_time->ertv_sec = tod[0];
		lta->lt_time->ertv_nsec = tod[1];
	}

	return (0);
}
Beispiel #2
0
/*ARGSUSED*/
static int
zfs_mark_pool(zpool_handle_t *zhp, void *unused)
{
	zfs_case_t *zcp;
	uint64_t pool_guid;
	nvlist_t *config, *vd;
	int ret;

	pool_guid = zpool_get_prop_int(zhp, ZPOOL_PROP_GUID, NULL);
	/*
	 * Mark any cases associated with just this pool.
	 */
	for (zcp = uu_list_first(zfs_cases); zcp != NULL;
	    zcp = uu_list_next(zfs_cases, zcp)) {
		if (zcp->zc_data.zc_pool_guid == pool_guid &&
		    zcp->zc_data.zc_vdev_guid == 0)
			zcp->zc_present = B_TRUE;
	}

	if ((config = zpool_get_config(zhp, NULL)) == NULL) {
		zpool_close(zhp);
		return (-1);
	}

	ret = nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &vd);
	assert(ret == 0);

	zfs_mark_vdev(pool_guid, vd);

	zpool_close(zhp);

	return (0);
}
Beispiel #3
0
static int
zfsdle_vdev_online(zpool_handle_t *zhp, void *data)
{
    char *devname = data;
    boolean_t avail_spare, l2cache;
    vdev_state_t newstate;
    nvlist_t *tgt;

    zed_log_msg(LOG_INFO, "zfsdle_vdev_online: searching for '%s' in '%s'",
                devname, zpool_get_name(zhp));

    if ((tgt = zpool_find_vdev_by_physpath(zhp, devname,
                                           &avail_spare, &l2cache, NULL)) != NULL) {
        char *path, fullpath[MAXPATHLEN];
        uint64_t wholedisk = 0ULL;

        verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
                                    &path) == 0);
        verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
                                    &wholedisk) == 0);

        (void) strlcpy(fullpath, path, sizeof (fullpath));
        if (wholedisk) {
            char *spath = zfs_strip_partition(fullpath);
            if (!spath) {
                zed_log_msg(LOG_INFO, "%s: Can't alloc",
                            __func__);
                return (0);
            }

            (void) strlcpy(fullpath, spath, sizeof (fullpath));
            free(spath);

            /*
             * We need to reopen the pool associated with this
             * device so that the kernel can update the size
             * of the expanded device.
             */
            (void) zpool_reopen(zhp);
        }

        if (zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
            zed_log_msg(LOG_INFO, "zfsdle_vdev_online: setting "
                        "device '%s' to ONLINE state in pool '%s'",
                        fullpath, zpool_get_name(zhp));
            if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL)
                (void) zpool_vdev_online(zhp, fullpath, 0,
                                         &newstate);
        }
        zpool_close(zhp);
        return (1);
    }
    zpool_close(zhp);
    return (0);
}
Beispiel #4
0
/*
 * Include snaps if they were requested or if this a zfs list where types
 * were not specified and the "listsnapshots" property is set on this pool.
 */
static int
zfs_include_snapshots(zfs_handle_t *zhp, callback_data_t *cb)
{
	zpool_handle_t *zph;

	if ((cb->cb_flags & ZFS_ITER_PROP_LISTSNAPS) == 0)
		return (cb->cb_types & ZFS_TYPE_SNAPSHOT);

	zph = zfs_get_pool_handle(zhp);
	return (zpool_get_prop_int(zph, ZPOOL_PROP_LISTSNAPS, NULL));
}
Beispiel #5
0
/*ARGSUSED*/
static int
zfs_mark_pool(zpool_handle_t *zhp, void *unused)
{
	zfs_case_t *zcp;
	uint64_t pool_guid;
	uint64_t *tod;
	er_timeval_t loaded = { 0 };
	nvlist_t *config, *vd;
	uint_t nelem = 0;
	int ret;

	pool_guid = zpool_get_prop_int(zhp, ZPOOL_PROP_GUID, NULL);
	/*
	 * Mark any cases associated with just this pool.
	 */
	for (zcp = uu_list_first(zfs_cases); zcp != NULL;
	    zcp = uu_list_next(zfs_cases, zcp)) {
		if (zcp->zc_data.zc_pool_guid == pool_guid &&
		    zcp->zc_data.zc_vdev_guid == 0)
			zcp->zc_present = B_TRUE;
	}

	if ((config = zpool_get_config(zhp, NULL)) == NULL) {
		zpool_close(zhp);
		return (-1);
	}

	(void) nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_LOADED_TIME,
	    &tod, &nelem);
	if (nelem == 2) {
		loaded.ertv_sec = tod[0];
		loaded.ertv_nsec = tod[1];
		for (zcp = uu_list_first(zfs_cases); zcp != NULL;
		    zcp = uu_list_next(zfs_cases, zcp)) {
			if (zcp->zc_data.zc_pool_guid == pool_guid &&
			    zcp->zc_data.zc_vdev_guid == 0) {
				zcp->zc_when = loaded;
			}
		}
	}

	ret = nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &vd);
	if (ret) {
		zpool_close(zhp);
		return (-1);
	}

	zfs_mark_vdev(pool_guid, vd, &loaded);

	zpool_close(zhp);

	return (0);
}
Beispiel #6
0
static int
find_pool(zpool_handle_t *zhp, void *data)
{
	cbdata_t *cbp = data;

	if (zpool_get_prop_int(zhp, ZPOOL_PROP_GUID, NULL) == cbp->cb_guid) {
		cbp->cb_pool = zhp;
		return (1);
	}

	zpool_close(zhp);

	return (0);
}
Beispiel #7
0
static int
zfsdle_vdev_online(zpool_handle_t *zhp, void *data)
{
	char *devname = data;
	boolean_t avail_spare, l2cache;
	vdev_state_t newstate;
	nvlist_t *tgt;

	syseventd_print(9, "zfsdle_vdev_online: searching for %s in pool %s\n",
	    devname, zpool_get_name(zhp));

	if ((tgt = zpool_find_vdev_by_physpath(zhp, devname,
	    &avail_spare, &l2cache, NULL)) != NULL) {
		char *path, fullpath[MAXPATHLEN];
		uint64_t wholedisk = 0ULL;

		verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
		    &path) == 0);
		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
		    &wholedisk) == 0);

		(void) strlcpy(fullpath, path, sizeof (fullpath));
		if (wholedisk) {
			fullpath[strlen(fullpath) - 2] = '\0';

			/*
			 * We need to reopen the pool associated with this
			 * device so that the kernel can update the size
			 * of the expanded device.
			 */
			(void) zpool_reopen(zhp);
		}

		if (zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
			syseventd_print(9, "zfsdle_vdev_online: setting device"
			    " device %s to ONLINE state in pool %s.\n",
			    fullpath, zpool_get_name(zhp));
			if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL)
				(void) zpool_vdev_online(zhp, fullpath, 0,
				    &newstate);
		}
		zpool_close(zhp);
		return (1);
	}
	zpool_close(zhp);
	return (0);
}
Beispiel #8
0
static boolean_t
encryption_feature_is_enabled(zpool_handle_t *zph)
{
	nvlist_t *features;
	uint64_t feat_refcount;

	/* check that features can be enabled */
	if (zpool_get_prop_int(zph, ZPOOL_PROP_VERSION, NULL)
	    < SPA_VERSION_FEATURES)
		return (B_FALSE);

	/* check for crypto feature */
	features = zpool_get_features(zph);
	if (!features || nvlist_lookup_uint64(features,
	    spa_feature_table[SPA_FEATURE_ENCRYPTION].fi_guid,
	    &feat_refcount) != 0)
		return (B_FALSE);

	return (B_TRUE);
}
Beispiel #9
0
static int
zfsdle_vdev_online(zpool_handle_t *zhp, void *data)
{
	char *devname = data;
	boolean_t avail_spare, l2cache;
	nvlist_t *tgt;
	int error;

	zed_log_msg(LOG_INFO, "zfsdle_vdev_online: searching for '%s' in '%s'",
	    devname, zpool_get_name(zhp));

	if ((tgt = zpool_find_vdev_by_physpath(zhp, devname,
	    &avail_spare, &l2cache, NULL)) != NULL) {
		char *path, fullpath[MAXPATHLEN];
		uint64_t wholedisk;

		error = nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &path);
		if (error) {
			zpool_close(zhp);
			return (0);
		}

		error = nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
		    &wholedisk);
		if (error)
			wholedisk = 0;

		if (wholedisk) {
			path = strrchr(path, '/');
			if (path != NULL) {
				path = zfs_strip_partition(path + 1);
				if (path == NULL) {
					zpool_close(zhp);
					return (0);
				}
			} else {
				zpool_close(zhp);
				return (0);
			}

			(void) strlcpy(fullpath, path, sizeof (fullpath));
			free(path);

			/*
			 * We need to reopen the pool associated with this
			 * device so that the kernel can update the size of
			 * the expanded device.  When expanding there is no
			 * need to restart the scrub from the beginning.
			 */
			boolean_t scrub_restart = B_FALSE;
			(void) zpool_reopen_one(zhp, &scrub_restart);
		} else {
			(void) strlcpy(fullpath, path, sizeof (fullpath));
		}

		if (zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
			vdev_state_t newstate;

			if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {
				error = zpool_vdev_online(zhp, fullpath, 0,
				    &newstate);
				zed_log_msg(LOG_INFO, "zfsdle_vdev_online: "
				    "setting device '%s' to ONLINE state "
				    "in pool '%s': %d", fullpath,
				    zpool_get_name(zhp), error);
			}
		}
		zpool_close(zhp);
		return (1);
	}
	zpool_close(zhp);
	return (0);
}
Beispiel #10
0
/*
 * The device associated with the given vdev (either by devid or physical path)
 * has been added to the system.  If 'isdisk' is set, then we only attempt a
 * replacement if it's a whole disk.  This also implies that we should label the
 * disk first.
 *
 * First, we attempt to online the device (making sure to undo any spare
 * operation when finished).  If this succeeds, then we're done.  If it fails,
 * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
 * but that the label was not what we expected.  If the 'autoreplace' property
 * is enabled, then we relabel the disk (if specified), and attempt a 'zpool
 * replace'.  If the online is successful, but the new state is something else
 * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
 * race, and we should avoid attempting to relabel the disk.
 *
 * Also can arrive here from a ESC_ZFS_VDEV_CHECK event
 */
static void
zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
{
	char *path;
	vdev_state_t newstate;
	nvlist_t *nvroot, *newvd;
	pendingdev_t *device;
	uint64_t wholedisk = 0ULL;
	uint64_t offline = 0ULL;
	uint64_t guid = 0ULL;
	char *physpath = NULL, *new_devid = NULL, *enc_sysfs_path = NULL;
	char rawpath[PATH_MAX], fullpath[PATH_MAX];
	char devpath[PATH_MAX];
	int ret;
	int is_dm = 0;
	int is_sd = 0;
	uint_t c;
	vdev_stat_t *vs;

	if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
		return;

	/* Skip healthy disks */
	verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
	    (uint64_t **)&vs, &c) == 0);
	if (vs->vs_state == VDEV_STATE_HEALTHY) {
		zed_log_msg(LOG_INFO, "%s: %s is already healthy, skip it.",
		    __func__, path);
		return;
	}

	(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
	(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
	    &enc_sysfs_path);
	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_GUID, &guid);

	if (offline)
		return;  /* don't intervene if it was taken offline */

	is_dm = zfs_dev_is_dm(path);
	zed_log_msg(LOG_INFO, "zfs_process_add: pool '%s' vdev '%s', phys '%s'"
	    " wholedisk %d, dm %d (%llu)", zpool_get_name(zhp), path,
	    physpath ? physpath : "NULL", wholedisk, is_dm,
	    (long long unsigned int)guid);

	/*
	 * The VDEV guid is preferred for identification (gets passed in path)
	 */
	if (guid != 0) {
		(void) snprintf(fullpath, sizeof (fullpath), "%llu",
		    (long long unsigned int)guid);
	} else {
		/*
		 * otherwise use path sans partition suffix for whole disks
		 */
		(void) strlcpy(fullpath, path, sizeof (fullpath));
		if (wholedisk) {
			char *spath = zfs_strip_partition(fullpath);
			if (!spath) {
				zed_log_msg(LOG_INFO, "%s: Can't alloc",
				    __func__);
				return;
			}

			(void) strlcpy(fullpath, spath, sizeof (fullpath));
			free(spath);
		}
	}

	/*
	 * Attempt to online the device.
	 */
	if (zpool_vdev_online(zhp, fullpath,
	    ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
	    (newstate == VDEV_STATE_HEALTHY ||
	    newstate == VDEV_STATE_DEGRADED)) {
		zed_log_msg(LOG_INFO, "  zpool_vdev_online: vdev %s is %s",
		    fullpath, (newstate == VDEV_STATE_HEALTHY) ?
		    "HEALTHY" : "DEGRADED");
		return;
	}

	/*
	 * vdev_id alias rule for using scsi_debug devices (FMA automated
	 * testing)
	 */
	if (physpath != NULL && strcmp("scsidebug", physpath) == 0)
		is_sd = 1;

	/*
	 * If the pool doesn't have the autoreplace property set, then use
	 * vdev online to trigger a FMA fault by posting an ereport.
	 */
	if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
	    !(wholedisk || is_dm) || (physpath == NULL)) {
		(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
		    &newstate);
		zed_log_msg(LOG_INFO, "Pool's autoreplace is not enabled or "
		    "not a whole disk for '%s'", fullpath);
		return;
	}

	/*
	 * Convert physical path into its current device node.  Rawpath
	 * needs to be /dev/disk/by-vdev for a scsi_debug device since
	 * /dev/disk/by-path will not be present.
	 */
	(void) snprintf(rawpath, sizeof (rawpath), "%s%s",
	    is_sd ? DEV_BYVDEV_PATH : DEV_BYPATH_PATH, physpath);

	if (realpath(rawpath, devpath) == NULL && !is_dm) {
		zed_log_msg(LOG_INFO, "  realpath: %s failed (%s)",
		    rawpath, strerror(errno));

		(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
		    &newstate);

		zed_log_msg(LOG_INFO, "  zpool_vdev_online: %s FORCEFAULT (%s)",
		    fullpath, libzfs_error_description(g_zfshdl));
		return;
	}

	/* Only autoreplace bad disks */
	if ((vs->vs_state != VDEV_STATE_DEGRADED) &&
	    (vs->vs_state != VDEV_STATE_FAULTED) &&
	    (vs->vs_state != VDEV_STATE_CANT_OPEN)) {
		return;
	}

	nvlist_lookup_string(vdev, "new_devid", &new_devid);

	if (is_dm) {
		/* Don't label device mapper or multipath disks. */
	} else if (!labeled) {
		/*
		 * we're auto-replacing a raw disk, so label it first
		 */
		char *leafname;

		/*
		 * If this is a request to label a whole disk, then attempt to
		 * write out the label.  Before we can label the disk, we need
		 * to map the physical string that was matched on to the under
		 * lying device node.
		 *
		 * If any part of this process fails, then do a force online
		 * to trigger a ZFS fault for the device (and any hot spare
		 * replacement).
		 */
		leafname = strrchr(devpath, '/') + 1;

		/*
		 * If this is a request to label a whole disk, then attempt to
		 * write out the label.
		 */
		if (zpool_label_disk(g_zfshdl, zhp, leafname) != 0) {
			zed_log_msg(LOG_INFO, "  zpool_label_disk: could not "
			    "label '%s' (%s)", leafname,
			    libzfs_error_description(g_zfshdl));

			(void) zpool_vdev_online(zhp, fullpath,
			    ZFS_ONLINE_FORCEFAULT, &newstate);
			return;
		}

		/*
		 * The disk labeling is asynchronous on Linux. Just record
		 * this label request and return as there will be another
		 * disk add event for the partition after the labeling is
		 * completed.
		 */
		device = malloc(sizeof (pendingdev_t));
		(void) strlcpy(device->pd_physpath, physpath,
		    sizeof (device->pd_physpath));
		list_insert_tail(&g_device_list, device);

		zed_log_msg(LOG_INFO, "  zpool_label_disk: async '%s' (%llu)",
		    leafname, (u_longlong_t)guid);

		return;	/* resumes at EC_DEV_ADD.ESC_DISK for partition */

	} else /* labeled */ {
		boolean_t found = B_FALSE;
		/*
		 * match up with request above to label the disk
		 */
		for (device = list_head(&g_device_list); device != NULL;
		    device = list_next(&g_device_list, device)) {
			if (strcmp(physpath, device->pd_physpath) == 0) {
				list_remove(&g_device_list, device);
				free(device);
				found = B_TRUE;
				break;
			}
			zed_log_msg(LOG_INFO, "zpool_label_disk: %s != %s",
			    physpath, device->pd_physpath);
		}
		if (!found) {
			/* unexpected partition slice encountered */
			zed_log_msg(LOG_INFO, "labeled disk %s unexpected here",
			    fullpath);
			(void) zpool_vdev_online(zhp, fullpath,
			    ZFS_ONLINE_FORCEFAULT, &newstate);
			return;
		}

		zed_log_msg(LOG_INFO, "  zpool_label_disk: resume '%s' (%llu)",
		    physpath, (u_longlong_t)guid);

		(void) snprintf(devpath, sizeof (devpath), "%s%s",
		    DEV_BYID_PATH, new_devid);
	}

	/*
	 * Construct the root vdev to pass to zpool_vdev_attach().  While adding
	 * the entire vdev structure is harmless, we construct a reduced set of
	 * path/physpath/wholedisk to keep it simple.
	 */
	if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) {
		zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
		return;
	}
	if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
		zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
		nvlist_free(nvroot);
		return;
	}

	if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
	    nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
	    nvlist_add_string(newvd, ZPOOL_CONFIG_DEVID, new_devid) != 0 ||
	    (physpath != NULL && nvlist_add_string(newvd,
	    ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
	    (enc_sysfs_path != NULL && nvlist_add_string(newvd,
	    ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH, enc_sysfs_path) != 0) ||
	    nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
	    nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
	    nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &newvd,
	    1) != 0) {
		zed_log_msg(LOG_WARNING, "zfs_mod: unable to add nvlist pairs");
		nvlist_free(newvd);
		nvlist_free(nvroot);
		return;
	}

	nvlist_free(newvd);

	/*
	 * Wait for udev to verify the links exist, then auto-replace
	 * the leaf disk at same physical location.
	 */
	if (zpool_label_disk_wait(path, 3000) != 0) {
		zed_log_msg(LOG_WARNING, "zfs_mod: expected replacement "
		    "disk %s is missing", path);
		nvlist_free(nvroot);
		return;
	}

	ret = zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE);

	zed_log_msg(LOG_INFO, "  zpool_vdev_replace: %s with %s (%s)",
	    fullpath, path, (ret == 0) ? "no errors" :
	    libzfs_error_description(g_zfshdl));

	nvlist_free(nvroot);
}
Beispiel #11
0
/*
 * Determines if the pool is in use.  If so, it returns true and the state of
 * the pool as well as the name of the pool.  Both strings are allocated and
 * must be freed by the caller.
 */
int
zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
    boolean_t *inuse)
{
	nvlist_t *config;
	char *name;
	boolean_t ret;
	uint64_t guid, vdev_guid;
	zpool_handle_t *zhp;
	nvlist_t *pool_config;
	uint64_t stateval, isspare;
	aux_cbdata_t cb = { 0 };
	boolean_t isactive;

	*inuse = B_FALSE;

	if (zpool_read_label(fd, &config, NULL) != 0 && errno == ENOMEM) {
		(void) no_memory(hdl);
		return (-1);
	}

	if (config == NULL)
		return (0);

	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
	    &stateval) == 0);
	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
	    &vdev_guid) == 0);

	if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
		    &name) == 0);
		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
		    &guid) == 0);
	}

	switch (stateval) {
	case POOL_STATE_EXPORTED:
		/*
		 * A pool with an exported state may in fact be imported
		 * read-only, so check the in-core state to see if it's
		 * active and imported read-only.  If it is, set
		 * its state to active.
		 */
		if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
		    (zhp = zpool_open_canfail(hdl, name)) != NULL) {
			if (zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
				stateval = POOL_STATE_ACTIVE;

			/*
			 * All we needed the zpool handle for is the
			 * readonly prop check.
			 */
			zpool_close(zhp);
		}

		ret = B_TRUE;
		break;

	case POOL_STATE_ACTIVE:
		/*
		 * For an active pool, we have to determine if it's really part
		 * of a currently active pool (in which case the pool will exist
		 * and the guid will be the same), or whether it's part of an
		 * active pool that was disconnected without being explicitly
		 * exported.
		 */
		if (pool_active(hdl, name, guid, &isactive) != 0) {
			nvlist_free(config);
			return (-1);
		}

		if (isactive) {
			/*
			 * Because the device may have been removed while
			 * offlined, we only report it as active if the vdev is
			 * still present in the config.  Otherwise, pretend like
			 * it's not in use.
			 */
			if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
			    (pool_config = zpool_get_config(zhp, NULL))
			    != NULL) {
				nvlist_t *nvroot;

				verify(nvlist_lookup_nvlist(pool_config,
				    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
				ret = find_guid(nvroot, vdev_guid);
			} else {
				ret = B_FALSE;
			}

			/*
			 * If this is an active spare within another pool, we
			 * treat it like an unused hot spare.  This allows the
			 * user to create a pool with a hot spare that currently
			 * in use within another pool.  Since we return B_TRUE,
			 * libdiskmgt will continue to prevent generic consumers
			 * from using the device.
			 */
			if (ret && nvlist_lookup_uint64(config,
			    ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
				stateval = POOL_STATE_SPARE;

			if (zhp != NULL)
				zpool_close(zhp);
		} else {
			stateval = POOL_STATE_POTENTIALLY_ACTIVE;
			ret = B_TRUE;
		}
		break;

	case POOL_STATE_SPARE:
		/*
		 * For a hot spare, it can be either definitively in use, or
		 * potentially active.  To determine if it's in use, we iterate
		 * over all pools in the system and search for one with a spare
		 * with a matching guid.
		 *
		 * Due to the shared nature of spares, we don't actually report
		 * the potentially active case as in use.  This means the user
		 * can freely create pools on the hot spares of exported pools,
		 * but to do otherwise makes the resulting code complicated, and
		 * we end up having to deal with this case anyway.
		 */
		cb.cb_zhp = NULL;
		cb.cb_guid = vdev_guid;
		cb.cb_type = ZPOOL_CONFIG_SPARES;
		if (zpool_iter(hdl, find_aux, &cb) == 1) {
			name = (char *)zpool_get_name(cb.cb_zhp);
			ret = B_TRUE;
		} else {
			ret = B_FALSE;
		}
		break;

	case POOL_STATE_L2CACHE:

		/*
		 * Check if any pool is currently using this l2cache device.
		 */
		cb.cb_zhp = NULL;
		cb.cb_guid = vdev_guid;
		cb.cb_type = ZPOOL_CONFIG_L2CACHE;
		if (zpool_iter(hdl, find_aux, &cb) == 1) {
			name = (char *)zpool_get_name(cb.cb_zhp);
			ret = B_TRUE;
		} else {
			ret = B_FALSE;
		}
		break;

	default:
		ret = B_FALSE;
	}


	if (ret) {
		if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
			if (cb.cb_zhp)
				zpool_close(cb.cb_zhp);
			nvlist_free(config);
			return (-1);
		}
		*state = (pool_state_t)stateval;
	}

	if (cb.cb_zhp)
		zpool_close(cb.cb_zhp);

	nvlist_free(config);
	*inuse = ret;
	return (0);
}
/*
 * Mount the given filesystem.
 */
int
zfs_mount(zfs_handle_t *zhp, const char *options, int flags)
{
	struct stat buf;
	char mountpoint[ZFS_MAXPROPLEN];
	char mntopts[MNT_LINE_MAX];
	libzfs_handle_t *hdl = zhp->zfs_hdl;
	int remount = 0, rc;

	if (options == NULL) {
		(void) strlcpy(mntopts, MNTOPT_DEFAULTS, sizeof (mntopts));
	} else {
		(void) strlcpy(mntopts, options, sizeof (mntopts));
	}

	if (strstr(mntopts, MNTOPT_REMOUNT) != NULL)
		remount = 1;

	/*
	 * If the pool is imported read-only then all mounts must be read-only
	 */
	if (zpool_get_prop_int(zhp->zpool_hdl, ZPOOL_PROP_READONLY, NULL))
		(void) strlcat(mntopts, "," MNTOPT_RO, sizeof (mntopts));

    /*
     * Load encryption key if required and not already present.
     * Don't need to check ZFS_PROP_ENCRYPTION because encrypted
     * datasets have keystatus of ZFS_CRYPT_KEY_NONE.
     */
    fprintf(stderr, "zfs_mount: mount, keystatus is %d\r\n",
            zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS));
    if (zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
        ZFS_CRYPT_KEY_UNAVAILABLE) {
        fprintf(stderr, "loading KEY\r\n");
        (void )zfs_key_load(zhp, B_FALSE, B_FALSE, B_FALSE);
    }

	/*
	 * Append default mount options which apply to the mount point.
	 * This is done because under Linux (unlike Solaris) multiple mount
	 * points may reference a single super block.  This means that just
	 * given a super block there is no back reference to update the per
	 * mount point options.
	 */
	rc = zfs_add_options(zhp, mntopts, sizeof (mntopts));
	if (rc) {
		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
		    "default options unavailable"));
		return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
		    dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
		    mountpoint));
	}

	/*
	 * Append zfsutil option so the mount helper allow the mount
	 */
	strlcat(mntopts, "," MNTOPT_ZFSUTIL, sizeof (mntopts));

	if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL))
		return (0);

	/* Create the directory if it doesn't already exist */
	if (lstat(mountpoint, &buf) != 0) {
		if (mkdirp(mountpoint, 0755) != 0) {
			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
			    "failed to create mountpoint"));
			return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
			    dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
			    mountpoint));
		}
	}

	/*
	 * Determine if the mountpoint is empty.  If so, refuse to perform the
	 * mount.  We don't perform this check if 'remount' is
	 * specified or if overlay option(-O) is given
	 */
	if ((flags & MS_OVERLAY) == 0 && !remount &&
	    !dir_is_empty(mountpoint)) {
		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
		    "directory is not empty"));
		return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
		    dgettext(TEXT_DOMAIN, "cannot mount '%s'"), mountpoint));
	}

	/* perform the mount */
	rc = do_mount(zfs_get_name(zhp), mountpoint, mntopts);
	if (rc) {
		/*
		 * Generic errors are nasty, but there are just way too many
		 * from mount(), and they're well-understood.  We pick a few
		 * common ones to improve upon.
		 */
		if (rc == EBUSY) {
			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
			    "mountpoint or dataset is busy"));
		} else if (rc == EPERM) {
			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
			    "Insufficient privileges"));
		} else if (rc == ENOTSUP) {
			char buf[256];
			int spa_version;

			VERIFY(zfs_spa_version(zhp, &spa_version) == 0);
			(void) snprintf(buf, sizeof (buf),
			    dgettext(TEXT_DOMAIN, "Can't mount a version %lld "
			    "file system on a version %d pool. Pool must be"
			    " upgraded to mount this file system."),
			    (u_longlong_t)zfs_prop_get_int(zhp,
			    ZFS_PROP_VERSION), spa_version);
			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, buf));
		} else {
			zfs_error_aux(hdl, strerror(rc));
		}
		return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
		    dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
		    zhp->zfs_name));
	}

	/* remove the mounted entry before re-adding on remount */
	if (remount)
		libzfs_mnttab_remove(hdl, zhp->zfs_name);

	/* add the mounted entry into our cache */
	libzfs_mnttab_add(hdl, zfs_get_name(zhp), mountpoint, mntopts);
	return (0);
}
Beispiel #13
0
/*
 * Mount the given filesystem.
 */
int
zfs_mount(zfs_handle_t *zhp, const char *options, int flags)
{
    struct stat buf;
    char mountpoint[ZFS_MAXPROPLEN];
    char mntopts[MNT_LINE_MAX];
    libzfs_handle_t *hdl = zhp->zfs_hdl;

    if (options == NULL)
        mntopts[0] = '\0';
    else
        (void) strlcpy(mntopts, options, sizeof (mntopts));

    /*
     * If the pool is imported read-only then all mounts must be read-only
     */
    if (zpool_get_prop_int(zhp->zpool_hdl, ZPOOL_PROP_READONLY, NULL))
        flags |= MS_RDONLY;

    if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL))
        return (0);

    /* Create the directory if it doesn't already exist */
    if (lstat(mountpoint, &buf) != 0) {
        if (mkdirp(mountpoint, 0755) != 0) {
            zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
                                        "failed to create mountpoint"));
            return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
                                  dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
                                  mountpoint));
        }
    }

    /*
     * Determine if the mountpoint is empty.  If so, refuse to perform the
     * mount.  We don't perform this check if MS_OVERLAY is specified, which
     * would defeat the point.  We also avoid this check if 'remount' is
     * specified.
     */
    if ((flags & MS_OVERLAY) == 0 &&
            strstr(mntopts, MNTOPT_REMOUNT) == NULL &&
            !dir_is_empty(mountpoint)) {
        zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
                                    "directory is not empty"));
        return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
                              dgettext(TEXT_DOMAIN, "cannot mount '%s'"), mountpoint));
    }

    /* perform the mount */
    if (mount(zfs_get_name(zhp), mountpoint, MS_OPTIONSTR | flags,
              MNTTYPE_ZFS, NULL, 0, mntopts, sizeof (mntopts)) != 0) {
        /*
         * Generic errors are nasty, but there are just way too many
         * from mount(), and they're well-understood.  We pick a few
         * common ones to improve upon.
         */
        if (errno == EBUSY) {
            zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
                                        "mountpoint or dataset is busy"));
        } else if (errno == EPERM) {
            zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
                                        "Insufficient privileges"));
        } else if (errno == ENOTSUP) {
            char buf[256];
            int spa_version;

            VERIFY(zfs_spa_version(zhp, &spa_version) == 0);
            (void) snprintf(buf, sizeof (buf),
                            dgettext(TEXT_DOMAIN, "Can't mount a version %lld "
                                     "file system on a version %d pool. Pool must be"
                                     " upgraded to mount this file system."),
                            (u_longlong_t)zfs_prop_get_int(zhp,
                                    ZFS_PROP_VERSION), spa_version);
            zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, buf));
        } else {
            zfs_error_aux(hdl, strerror(errno));
        }
        return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
                              dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
                              zhp->zfs_name));
    }

    /* add the mounted entry into our cache */
    libzfs_mnttab_add(hdl, zfs_get_name(zhp), mountpoint,
                      mntopts);
    return (0);
}
Beispiel #14
0
/*
 * The device associated with the given vdev (either by devid or physical path)
 * has been added to the system.  If 'isdisk' is set, then we only attempt a
 * replacement if it's a whole disk.  This also implies that we should label the
 * disk first.
 *
 * First, we attempt to online the device (making sure to undo any spare
 * operation when finished).  If this succeeds, then we're done.  If it fails,
 * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
 * but that the label was not what we expected.  If the 'autoreplace' property
 * is not set, then we relabel the disk (if specified), and attempt a 'zpool
 * replace'.  If the online is successful, but the new state is something else
 * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
 * race, and we should avoid attempting to relabel the disk.
 */
static void
zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t isdisk)
{
	char *path;
	vdev_state_t newstate;
	nvlist_t *nvroot, *newvd;
	uint64_t wholedisk = 0ULL;
	uint64_t offline = 0ULL;
	char *physpath = NULL;
	char rawpath[PATH_MAX], fullpath[PATH_MAX];
	size_t len;

	if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
		return;

	(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);

	/*
	 * We should have a way to online a device by guid.  With the current
	 * interface, we are forced to chop off the 's0' for whole disks.
	 */
	(void) strlcpy(fullpath, path, sizeof (fullpath));
	if (wholedisk)
		fullpath[strlen(fullpath) - 2] = '\0';

	/*
	 * Attempt to online the device.  It would be nice to online this by
	 * GUID, but the current interface only supports lookup by path.
	 */
	if (offline ||
	    (zpool_vdev_online(zhp, fullpath,
	    ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
	    (newstate == VDEV_STATE_HEALTHY ||
	    newstate == VDEV_STATE_DEGRADED)))
		return;

	/*
	 * If the pool doesn't have the autoreplace property set, then attempt a
	 * true online (without the unspare flag), which will trigger a FMA
	 * fault.
	 */
	if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
	    (isdisk && !wholedisk)) {
		(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
		    &newstate);
		return;
	}

	if (isdisk) {
		/*
		 * If this is a request to label a whole disk, then attempt to
		 * write out the label.  Before we can label the disk, we need
		 * access to a raw node.  Ideally, we'd like to walk the devinfo
		 * tree and find a raw node from the corresponding parent node.
		 * This is overly complicated, and since we know how we labeled
		 * this device in the first place, we know it's save to switch
		 * from /dev/dsk to /dev/rdsk and append the backup slice.
		 *
		 * If any part of this process fails, then do a force online to
		 * trigger a ZFS fault for the device (and any hot spare
		 * replacement).
		 */
		if (strncmp(path, "/dev/dsk/", 9) != 0) {
			(void) zpool_vdev_online(zhp, fullpath,
			    ZFS_ONLINE_FORCEFAULT, &newstate);
			return;
		}

		(void) strlcpy(rawpath, path + 9, sizeof (rawpath));
		len = strlen(rawpath);
		rawpath[len - 2] = '\0';

		if (zpool_label_disk(g_zfshdl, zhp, rawpath) != 0) {
			(void) zpool_vdev_online(zhp, fullpath,
			    ZFS_ONLINE_FORCEFAULT, &newstate);
			return;
		}
	}

	/*
	 * Cosntruct the root vdev to pass to zpool_vdev_attach().  While adding
	 * the entire vdev structure is harmless, we construct a reduced set of
	 * path/physpath/wholedisk to keep it simple.
	 */
	if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
		return;

	if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
		nvlist_free(nvroot);
		return;
	}

	if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
	    nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
	    (physpath != NULL && nvlist_add_string(newvd,
	    ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
	    nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
	    nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
	    nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &newvd,
	    1) != 0) {
		nvlist_free(newvd);
		nvlist_free(nvroot);
		return;
	}

	nvlist_free(newvd);

	(void) zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE);

	nvlist_free(nvroot);

}
Beispiel #15
0
/*
 * The device associated with the given vdev (either by devid or physical path)
 * has been added to the system.  If 'isdisk' is set, then we only attempt a
 * replacement if it's a whole disk.  This also implies that we should label the
 * disk first.
 *
 * First, we attempt to online the device (making sure to undo any spare
 * operation when finished).  If this succeeds, then we're done.  If it fails,
 * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
 * but that the label was not what we expected.  If the 'autoreplace' property
 * is not set, then we relabel the disk (if specified), and attempt a 'zpool
 * replace'.  If the online is successful, but the new state is something else
 * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
 * race, and we should avoid attempting to relabel the disk.
 *
 * Also can arrive here from a ESC_ZFS_VDEV_CHECK event
 */
static void
zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
{
    char *path;
    vdev_state_t newstate;
    nvlist_t *nvroot, *newvd;
    pendingdev_t *device;
    uint64_t wholedisk = 0ULL;
    uint64_t offline = 0ULL;
    uint64_t guid = 0ULL;
    char *physpath = NULL, *new_devid = NULL;
    char rawpath[PATH_MAX], fullpath[PATH_MAX];
    char devpath[PATH_MAX];
    int ret;

    if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
        return;

    (void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
    (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
    (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
    (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_GUID, &guid);

    if (offline)
        return;  /* don't intervene if it was taken offline */

    zed_log_msg(LOG_INFO, "zfs_process_add: pool '%s' vdev '%s' (%llu)",
                zpool_get_name(zhp), path, (long long unsigned int)guid);

    /*
     * The VDEV guid is preferred for identification (gets passed in path)
     */
    if (guid != 0) {
        (void) snprintf(fullpath, sizeof (fullpath), "%llu",
                        (long long unsigned int)guid);
    } else {
        /*
         * otherwise use path sans partition suffix for whole disks
         */
        (void) strlcpy(fullpath, path, sizeof (fullpath));
        if (wholedisk) {
            char *spath = zfs_strip_partition(g_zfshdl, fullpath);

            (void) strlcpy(fullpath, spath, sizeof (fullpath));
            free(spath);
        }
    }

    /*
     * Attempt to online the device.
     */
    if (zpool_vdev_online(zhp, fullpath,
                          ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
            (newstate == VDEV_STATE_HEALTHY ||
             newstate == VDEV_STATE_DEGRADED)) {
        zed_log_msg(LOG_INFO, "  zpool_vdev_online: vdev %s is %s",
                    fullpath, (newstate == VDEV_STATE_HEALTHY) ?
                    "HEALTHY" : "DEGRADED");
        return;
    }

    /*
     * If the pool doesn't have the autoreplace property set, then attempt
     * a true online (without the unspare flag), which will trigger a FMA
     * fault.
     */
    if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
            !wholedisk || physpath == NULL) {
        (void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
                                 &newstate);
        zed_log_msg(LOG_INFO, "  zpool_vdev_online: %s FORCEFAULT (%s)",
                    fullpath, libzfs_error_description(g_zfshdl));
        return;
    }

    /*
     * convert physical path into its current device node
     */
    (void) snprintf(rawpath, sizeof (rawpath), "%s%s", DEV_BYPATH_PATH,
                    physpath);
    if (realpath(rawpath, devpath) == NULL) {
        zed_log_msg(LOG_INFO, "  realpath: %s failed (%s)",
                    rawpath, strerror(errno));

        (void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
                                 &newstate);

        zed_log_msg(LOG_INFO, "  zpool_vdev_online: %s FORCEFAULT (%s)",
                    fullpath, libzfs_error_description(g_zfshdl));
        return;
    }

    /*
     * we're auto-replacing a raw disk, so label it first
     */
    if (!labeled) {
        char *leafname;

        /*
         * If this is a request to label a whole disk, then attempt to
         * write out the label.  Before we can label the disk, we need
         * to map the physical string that was matched on to the under
         * lying device node.
         *
         * If any part of this process fails, then do a force online
         * to trigger a ZFS fault for the device (and any hot spare
         * replacement).
         */
        leafname = strrchr(devpath, '/') + 1;

        /*
         * If this is a request to label a whole disk, then attempt to
         * write out the label.
         */
        if (zpool_label_disk(g_zfshdl, zhp, leafname) != 0) {
            zed_log_msg(LOG_INFO, "  zpool_label_disk: could not "
                        "label '%s' (%s)", leafname,
                        libzfs_error_description(g_zfshdl));

            (void) zpool_vdev_online(zhp, fullpath,
                                     ZFS_ONLINE_FORCEFAULT, &newstate);
            return;
        }

        /*
         * The disk labeling is asynchronous on Linux. Just record
         * this label request and return as there will be another
         * disk add event for the partition after the labeling is
         * completed.
         */
        device = malloc(sizeof (pendingdev_t));
        (void) strlcpy(device->pd_physpath, physpath,
                       sizeof (device->pd_physpath));
        list_insert_tail(&g_device_list, device);

        zed_log_msg(LOG_INFO, "  zpool_label_disk: async '%s' (%llu)",
                    leafname, (long long unsigned int)guid);

        return;	/* resumes at EC_DEV_ADD.ESC_DISK for partition */

    } else { /* labeled */
        boolean_t found = B_FALSE;
        /*
         * match up with request above to label the disk
         */
        for (device = list_head(&g_device_list); device != NULL;
                device = list_next(&g_device_list, device)) {
            if (strcmp(physpath, device->pd_physpath) == 0) {
                list_remove(&g_device_list, device);
                free(device);
                found = B_TRUE;
                break;
            }
        }
        if (!found) {
            /* unexpected partition slice encountered */
            (void) zpool_vdev_online(zhp, fullpath,
                                     ZFS_ONLINE_FORCEFAULT, &newstate);
            return;
        }

        zed_log_msg(LOG_INFO, "  zpool_label_disk: resume '%s' (%llu)",
                    physpath, (long long unsigned int)guid);

        if (nvlist_lookup_string(vdev, "new_devid", &new_devid) != 0) {
            zed_log_msg(LOG_INFO, "  auto replace: missing devid!");
            return;
        }

        (void) snprintf(devpath, sizeof (devpath), "%s%s",
                        DEV_BYID_PATH, new_devid);
        path = devpath;
    }

    /*
     * Construct the root vdev to pass to zpool_vdev_attach().  While adding
     * the entire vdev structure is harmless, we construct a reduced set of
     * path/physpath/wholedisk to keep it simple.
     */
    if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) {
        zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
        return;
    }
    if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
        zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
        nvlist_free(nvroot);
        return;
    }

    if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
            nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
            nvlist_add_string(newvd, ZPOOL_CONFIG_DEVID, new_devid) != 0 ||
            (physpath != NULL && nvlist_add_string(newvd,
                    ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
            nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
            nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
            nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &newvd,
                                    1) != 0) {
        zed_log_msg(LOG_WARNING, "zfs_mod: unable to add nvlist pairs");
        nvlist_free(newvd);
        nvlist_free(nvroot);
        return;
    }

    nvlist_free(newvd);

    /*
     * auto replace a leaf disk at same physical location
     */
    ret = zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE);

    zed_log_msg(LOG_INFO, "  zpool_vdev_replace: %s with %s (%s)",
                fullpath, path, (ret == 0) ? "no errors" :
                libzfs_error_description(g_zfshdl));

    nvlist_free(nvroot);
}
Beispiel #16
0
/*
 * Mount the given filesystem.
 *
 * 'flags' appears pretty much always 0 here.
 */
int
zfs_mount(zfs_handle_t *zhp, const char *options, int flags)
{
	struct stat buf;
	char mountpoint[ZFS_MAXPROPLEN];
	char mntopts[MNT_LINE_MAX];
	libzfs_handle_t *hdl = zhp->zfs_hdl;
	int remount;

	if (options == NULL) {
		mntopts[0] = '\0';
	} else {
		(void) strlcpy(mntopts, options, sizeof (mntopts));
	}

	if (strstr(mntopts, MNTOPT_REMOUNT) != NULL)
		remount = 1;

	/*
	 * If the pool is imported read-only then all mounts must be read-only
	 */
#ifdef __LINUX__
	if (zpool_get_prop_int(zhp->zpool_hdl, ZPOOL_PROP_READONLY, NULL))
		(void) strlcat(mntopts, "," MNTOPT_RO, sizeof (mntopts));
#else
	if (zpool_get_prop_int(zhp->zpool_hdl, ZPOOL_PROP_READONLY, NULL))
		flags |= MS_RDONLY;
#endif /* __LINUX__ */

	if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL))
		return (0);

#ifdef __LINUX__

	/*
	 * Append default mount options which apply to the mount point.
	 * This is done because under Linux (unlike Solaris) multiple mount
	 * points may reference a single super block.  This means that just
	 * given a super block there is no back reference to update the per
	 * mount point options.
	 */
	rc = zfs_add_options(zhp, &flags);
	if (rc) {
		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
		    "default options unavailable"));
		return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
		    dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
		    mountpoint));
	}

	/*
	 * Append zfsutil option so the mount helper allow the mount
	 */
	strlcat(mntopts, "," MNTOPT_ZFSUTIL, sizeof (mntopts));
#endif /* __LINUX__ */

	/* Create the directory if it doesn't already exist */
#ifdef __APPLE__
	if (zfs_get_type(zhp) != ZFS_TYPE_SNAPSHOT &&
	    lstat(mountpoint, &buf) != 0) {
#else
	if (lstat(mountpoint, &buf) != 0) {
#endif
		if (mkdirp(mountpoint, 0755) != 0) {
			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
			    "failed to create mountpoint"));
			return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
			    dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
			    mountpoint));
		}

	}

	/*
	 * Determine if the mountpoint is empty.  If so, refuse to perform the
	 * mount.  We don't perform this check if 'remount' is
	 * specified or if overlay option(-O) is given
	 */
	if ((flags & MS_OVERLAY) == 0 && !remount &&
	    !dir_is_empty(mountpoint)) {
		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
		    "directory is not empty"));
		return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
		    dgettext(TEXT_DOMAIN, "cannot mount '%s'"), mountpoint));
	}

	/* perform the mount */
#ifdef __LINUX__
	rc = do_mount(zfs_get_name(zhp), mountpoint, mntopts);
#elif defined(__APPLE__) || defined (__FREEBSD__)
	if (zmount(zfs_get_name(zhp), mountpoint, MS_OPTIONSTR | flags,
	    MNTTYPE_ZFS, NULL, 0, mntopts, sizeof (mntopts)) != 0) {
#elif defined(__illumos__)
	if (mount(zfs_get_name(zhp), mountpoint, MS_OPTIONSTR | flags,
	    MNTTYPE_ZFS, NULL, 0, mntopts, sizeof (mntopts)) != 0) {
#endif /* __LINUX__*/
		/*
		 * Generic errors are nasty, but there are just way too many
		 * from mount(), and they're well-understood.  We pick a few
		 * common ones to improve upon.
		 */
		if (errno == EBUSY) {
			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
			    "mountpoint or dataset is busy"));
		} else if (errno == EPERM) {
			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
			    "Insufficient privileges"));
		} else if (errno == ENOTSUP) {
			char buf[256];
			int spa_version;

			VERIFY(zfs_spa_version(zhp, &spa_version) == 0);
			(void) snprintf(buf, sizeof (buf),
			    dgettext(TEXT_DOMAIN, "Can't mount a version %lld "
			    "file system on a version %d pool. Pool must be"
			    " upgraded to mount this file system."),
			    (u_longlong_t)zfs_prop_get_int(zhp,
			    ZFS_PROP_VERSION), spa_version);
			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, buf));
#ifdef __APPLE__
		} else if (((errno == ESRCH) || (errno == EINVAL) ||
		    (errno == ENOENT && lstat(mountpoint, &buf) != 0)) &&
		    zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT) {
			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
			    "The parent file system must be mounted first."));
#endif
		} else {
			zfs_error_aux(hdl, strerror(errno));
		}
		return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
		    dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
		    zhp->zfs_name));
	}

#ifdef __APPLE__
	if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT)
		fprintf(stderr, "ZFS: snapshot mountpoint '%s'\n", mountpoint);

	if (!(flags & MS_RDONLY))
		zfs_mount_seticon(mountpoint);
#endif

	/* remove the mounted entry before re-adding on remount */
	if (remount)
		libzfs_mnttab_remove(hdl, zhp->zfs_name);

	/* add the mounted entry into our cache */
	libzfs_mnttab_add(hdl, zfs_get_name(zhp), mountpoint, mntopts);
	return (0);
}

/*
 * Unmount a single filesystem.
 */
static int
unmount_one(libzfs_handle_t *hdl, const char *mountpoint, int flags)
{
    int error;
#if 0
    error = unmount(mountpoint, flags);
    if (unmount(mountpoint, flags) != 0) {
		return (zfs_error_fmt(hdl, EZFS_UMOUNTFAILED,
		    dgettext(TEXT_DOMAIN, "cannot unmount '%s'"),
		    mountpoint));
	}
#else
    error = do_unmount(mountpoint, flags);
    if (error != 0) {
        return (zfs_error_fmt(hdl, EZFS_UMOUNTFAILED,
                              dgettext(TEXT_DOMAIN, "cannot unmount '%s'"),
                    mountpoint));
    }
#endif

	return (0);
}

/*
 * Unmount the given filesystem.
 */
int
zfs_unmount(zfs_handle_t *zhp, const char *mountpoint, int flags)
{
	libzfs_handle_t *hdl = zhp->zfs_hdl;
#ifdef __LINUX__
	struct mnttab search = { 0 }, entry;
#else
	struct mnttab entry;
#endif /* __LINUX__ */
	char *mntpt = NULL;

	/* check to see if need to unmount the filesystem */
	if (mountpoint != NULL ||
	    (((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) ||
	    (zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT)) &&
	    libzfs_mnttab_find(hdl, zhp->zfs_name, &entry) == 0)) {

		/*
		 * mountpoint may have come from a call to
		 * getmnt/getmntany if it isn't NULL. If it is NULL,
		 * we know it comes from getmntany which can then get
		 * overwritten later. We strdup it to play it safe.
		 */
		if (mountpoint == NULL)
			mntpt = zfs_strdup(zhp->zfs_hdl, entry.mnt_mountp);
		else
			mntpt = zfs_strdup(zhp->zfs_hdl, mountpoint);

		/*
		 * Unshare and unmount the filesystem
		 */
#ifdef __illumos__
		if (zfs_unshare_proto(zhp, mntpt, share_all_proto) != 0)
#else
		if (zfs_unshare_nfs(zhp, mntpt) != 0)
#endif
		return (-1);

		if (unmount_one(hdl, mntpt, flags) != 0) {
			free(mntpt);
#ifdef __illumos__
			(void) zfs_shareall(zhp);
#else
			(void) zfs_share_nfs(zhp);
#endif
			return (-1);
		}
		libzfs_mnttab_remove(hdl, zhp->zfs_name);
		free(mntpt);

	}

	return (0);
}