Exemplo n.º 1
0
/*
 * The device associated with the given vdev (either by devid or physical path)
 * has been added to the system.  If 'isdisk' is set, then we only attempt a
 * replacement if it's a whole disk.  This also implies that we should label the
 * disk first.
 *
 * First, we attempt to online the device (making sure to undo any spare
 * operation when finished).  If this succeeds, then we're done.  If it fails,
 * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
 * but that the label was not what we expected.  If the 'autoreplace' property
 * is not set, then we relabel the disk (if specified), and attempt a 'zpool
 * replace'.  If the online is successful, but the new state is something else
 * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
 * race, and we should avoid attempting to relabel the disk.
 */
static void
zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t isdisk)
{
	char *path;
	vdev_state_t newstate;
	nvlist_t *nvroot, *newvd;
	uint64_t wholedisk = 0ULL;
	uint64_t offline = 0ULL;
	char *physpath = NULL;
	char rawpath[PATH_MAX], fullpath[PATH_MAX];
	size_t len;

	if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
		return;

	(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);

	/*
	 * We should have a way to online a device by guid.  With the current
	 * interface, we are forced to chop off the 's0' for whole disks.
	 */
	(void) strlcpy(fullpath, path, sizeof (fullpath));
	if (wholedisk)
		fullpath[strlen(fullpath) - 2] = '\0';

	/*
	 * Attempt to online the device.  It would be nice to online this by
	 * GUID, but the current interface only supports lookup by path.
	 */
	if (offline ||
	    (zpool_vdev_online(zhp, fullpath,
	    ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
	    (newstate == VDEV_STATE_HEALTHY ||
	    newstate == VDEV_STATE_DEGRADED)))
		return;

	/*
	 * If the pool doesn't have the autoreplace property set, then attempt a
	 * true online (without the unspare flag), which will trigger a FMA
	 * fault.
	 */
	if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
	    (isdisk && !wholedisk)) {
		(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
		    &newstate);
		return;
	}

	if (isdisk) {
		/*
		 * If this is a request to label a whole disk, then attempt to
		 * write out the label.  Before we can label the disk, we need
		 * access to a raw node.  Ideally, we'd like to walk the devinfo
		 * tree and find a raw node from the corresponding parent node.
		 * This is overly complicated, and since we know how we labeled
		 * this device in the first place, we know it's save to switch
		 * from /dev/dsk to /dev/rdsk and append the backup slice.
		 *
		 * If any part of this process fails, then do a force online to
		 * trigger a ZFS fault for the device (and any hot spare
		 * replacement).
		 */
		if (strncmp(path, "/dev/dsk/", 9) != 0) {
			(void) zpool_vdev_online(zhp, fullpath,
			    ZFS_ONLINE_FORCEFAULT, &newstate);
			return;
		}

		(void) strlcpy(rawpath, path + 9, sizeof (rawpath));
		len = strlen(rawpath);
		rawpath[len - 2] = '\0';

		if (zpool_label_disk(g_zfshdl, zhp, rawpath) != 0) {
			(void) zpool_vdev_online(zhp, fullpath,
			    ZFS_ONLINE_FORCEFAULT, &newstate);
			return;
		}
	}

	/*
	 * Cosntruct the root vdev to pass to zpool_vdev_attach().  While adding
	 * the entire vdev structure is harmless, we construct a reduced set of
	 * path/physpath/wholedisk to keep it simple.
	 */
	if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
		return;

	if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
		nvlist_free(nvroot);
		return;
	}

	if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
	    nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
	    (physpath != NULL && nvlist_add_string(newvd,
	    ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
	    nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
	    nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
	    nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &newvd,
	    1) != 0) {
		nvlist_free(newvd);
		nvlist_free(nvroot);
		return;
	}

	nvlist_free(newvd);

	(void) zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE);

	nvlist_free(nvroot);

}
Exemplo n.º 2
0
/*
 * The device associated with the given vdev (either by devid or physical path)
 * has been added to the system.  If 'isdisk' is set, then we only attempt a
 * replacement if it's a whole disk.  This also implies that we should label the
 * disk first.
 *
 * First, we attempt to online the device (making sure to undo any spare
 * operation when finished).  If this succeeds, then we're done.  If it fails,
 * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
 * but that the label was not what we expected.  If the 'autoreplace' property
 * is enabled, then we relabel the disk (if specified), and attempt a 'zpool
 * replace'.  If the online is successful, but the new state is something else
 * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
 * race, and we should avoid attempting to relabel the disk.
 *
 * Also can arrive here from a ESC_ZFS_VDEV_CHECK event
 */
static void
zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
{
	char *path;
	vdev_state_t newstate;
	nvlist_t *nvroot, *newvd;
	pendingdev_t *device;
	uint64_t wholedisk = 0ULL;
	uint64_t offline = 0ULL;
	uint64_t guid = 0ULL;
	char *physpath = NULL, *new_devid = NULL, *enc_sysfs_path = NULL;
	char rawpath[PATH_MAX], fullpath[PATH_MAX];
	char devpath[PATH_MAX];
	int ret;
	int is_dm = 0;
	int is_sd = 0;
	uint_t c;
	vdev_stat_t *vs;

	if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
		return;

	/* Skip healthy disks */
	verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
	    (uint64_t **)&vs, &c) == 0);
	if (vs->vs_state == VDEV_STATE_HEALTHY) {
		zed_log_msg(LOG_INFO, "%s: %s is already healthy, skip it.",
		    __func__, path);
		return;
	}

	(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
	(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
	    &enc_sysfs_path);
	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_GUID, &guid);

	if (offline)
		return;  /* don't intervene if it was taken offline */

	is_dm = zfs_dev_is_dm(path);
	zed_log_msg(LOG_INFO, "zfs_process_add: pool '%s' vdev '%s', phys '%s'"
	    " wholedisk %d, dm %d (%llu)", zpool_get_name(zhp), path,
	    physpath ? physpath : "NULL", wholedisk, is_dm,
	    (long long unsigned int)guid);

	/*
	 * The VDEV guid is preferred for identification (gets passed in path)
	 */
	if (guid != 0) {
		(void) snprintf(fullpath, sizeof (fullpath), "%llu",
		    (long long unsigned int)guid);
	} else {
		/*
		 * otherwise use path sans partition suffix for whole disks
		 */
		(void) strlcpy(fullpath, path, sizeof (fullpath));
		if (wholedisk) {
			char *spath = zfs_strip_partition(fullpath);
			if (!spath) {
				zed_log_msg(LOG_INFO, "%s: Can't alloc",
				    __func__);
				return;
			}

			(void) strlcpy(fullpath, spath, sizeof (fullpath));
			free(spath);
		}
	}

	/*
	 * Attempt to online the device.
	 */
	if (zpool_vdev_online(zhp, fullpath,
	    ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
	    (newstate == VDEV_STATE_HEALTHY ||
	    newstate == VDEV_STATE_DEGRADED)) {
		zed_log_msg(LOG_INFO, "  zpool_vdev_online: vdev %s is %s",
		    fullpath, (newstate == VDEV_STATE_HEALTHY) ?
		    "HEALTHY" : "DEGRADED");
		return;
	}

	/*
	 * vdev_id alias rule for using scsi_debug devices (FMA automated
	 * testing)
	 */
	if (physpath != NULL && strcmp("scsidebug", physpath) == 0)
		is_sd = 1;

	/*
	 * If the pool doesn't have the autoreplace property set, then use
	 * vdev online to trigger a FMA fault by posting an ereport.
	 */
	if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
	    !(wholedisk || is_dm) || (physpath == NULL)) {
		(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
		    &newstate);
		zed_log_msg(LOG_INFO, "Pool's autoreplace is not enabled or "
		    "not a whole disk for '%s'", fullpath);
		return;
	}

	/*
	 * Convert physical path into its current device node.  Rawpath
	 * needs to be /dev/disk/by-vdev for a scsi_debug device since
	 * /dev/disk/by-path will not be present.
	 */
	(void) snprintf(rawpath, sizeof (rawpath), "%s%s",
	    is_sd ? DEV_BYVDEV_PATH : DEV_BYPATH_PATH, physpath);

	if (realpath(rawpath, devpath) == NULL && !is_dm) {
		zed_log_msg(LOG_INFO, "  realpath: %s failed (%s)",
		    rawpath, strerror(errno));

		(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
		    &newstate);

		zed_log_msg(LOG_INFO, "  zpool_vdev_online: %s FORCEFAULT (%s)",
		    fullpath, libzfs_error_description(g_zfshdl));
		return;
	}

	/* Only autoreplace bad disks */
	if ((vs->vs_state != VDEV_STATE_DEGRADED) &&
	    (vs->vs_state != VDEV_STATE_FAULTED) &&
	    (vs->vs_state != VDEV_STATE_CANT_OPEN)) {
		return;
	}

	nvlist_lookup_string(vdev, "new_devid", &new_devid);

	if (is_dm) {
		/* Don't label device mapper or multipath disks. */
	} else if (!labeled) {
		/*
		 * we're auto-replacing a raw disk, so label it first
		 */
		char *leafname;

		/*
		 * If this is a request to label a whole disk, then attempt to
		 * write out the label.  Before we can label the disk, we need
		 * to map the physical string that was matched on to the under
		 * lying device node.
		 *
		 * If any part of this process fails, then do a force online
		 * to trigger a ZFS fault for the device (and any hot spare
		 * replacement).
		 */
		leafname = strrchr(devpath, '/') + 1;

		/*
		 * If this is a request to label a whole disk, then attempt to
		 * write out the label.
		 */
		if (zpool_label_disk(g_zfshdl, zhp, leafname) != 0) {
			zed_log_msg(LOG_INFO, "  zpool_label_disk: could not "
			    "label '%s' (%s)", leafname,
			    libzfs_error_description(g_zfshdl));

			(void) zpool_vdev_online(zhp, fullpath,
			    ZFS_ONLINE_FORCEFAULT, &newstate);
			return;
		}

		/*
		 * The disk labeling is asynchronous on Linux. Just record
		 * this label request and return as there will be another
		 * disk add event for the partition after the labeling is
		 * completed.
		 */
		device = malloc(sizeof (pendingdev_t));
		(void) strlcpy(device->pd_physpath, physpath,
		    sizeof (device->pd_physpath));
		list_insert_tail(&g_device_list, device);

		zed_log_msg(LOG_INFO, "  zpool_label_disk: async '%s' (%llu)",
		    leafname, (u_longlong_t)guid);

		return;	/* resumes at EC_DEV_ADD.ESC_DISK for partition */

	} else /* labeled */ {
		boolean_t found = B_FALSE;
		/*
		 * match up with request above to label the disk
		 */
		for (device = list_head(&g_device_list); device != NULL;
		    device = list_next(&g_device_list, device)) {
			if (strcmp(physpath, device->pd_physpath) == 0) {
				list_remove(&g_device_list, device);
				free(device);
				found = B_TRUE;
				break;
			}
			zed_log_msg(LOG_INFO, "zpool_label_disk: %s != %s",
			    physpath, device->pd_physpath);
		}
		if (!found) {
			/* unexpected partition slice encountered */
			zed_log_msg(LOG_INFO, "labeled disk %s unexpected here",
			    fullpath);
			(void) zpool_vdev_online(zhp, fullpath,
			    ZFS_ONLINE_FORCEFAULT, &newstate);
			return;
		}

		zed_log_msg(LOG_INFO, "  zpool_label_disk: resume '%s' (%llu)",
		    physpath, (u_longlong_t)guid);

		(void) snprintf(devpath, sizeof (devpath), "%s%s",
		    DEV_BYID_PATH, new_devid);
	}

	/*
	 * Construct the root vdev to pass to zpool_vdev_attach().  While adding
	 * the entire vdev structure is harmless, we construct a reduced set of
	 * path/physpath/wholedisk to keep it simple.
	 */
	if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) {
		zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
		return;
	}
	if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
		zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
		nvlist_free(nvroot);
		return;
	}

	if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
	    nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
	    nvlist_add_string(newvd, ZPOOL_CONFIG_DEVID, new_devid) != 0 ||
	    (physpath != NULL && nvlist_add_string(newvd,
	    ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
	    (enc_sysfs_path != NULL && nvlist_add_string(newvd,
	    ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH, enc_sysfs_path) != 0) ||
	    nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
	    nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
	    nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &newvd,
	    1) != 0) {
		zed_log_msg(LOG_WARNING, "zfs_mod: unable to add nvlist pairs");
		nvlist_free(newvd);
		nvlist_free(nvroot);
		return;
	}

	nvlist_free(newvd);

	/*
	 * Wait for udev to verify the links exist, then auto-replace
	 * the leaf disk at same physical location.
	 */
	if (zpool_label_disk_wait(path, 3000) != 0) {
		zed_log_msg(LOG_WARNING, "zfs_mod: expected replacement "
		    "disk %s is missing", path);
		nvlist_free(nvroot);
		return;
	}

	ret = zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE);

	zed_log_msg(LOG_INFO, "  zpool_vdev_replace: %s with %s (%s)",
	    fullpath, path, (ret == 0) ? "no errors" :
	    libzfs_error_description(g_zfshdl));

	nvlist_free(nvroot);
}
Exemplo n.º 3
0
Arquivo: zfs_mod.c Projeto: pyavdr/zfs
/*
 * The device associated with the given vdev (either by devid or physical path)
 * has been added to the system.  If 'isdisk' is set, then we only attempt a
 * replacement if it's a whole disk.  This also implies that we should label the
 * disk first.
 *
 * First, we attempt to online the device (making sure to undo any spare
 * operation when finished).  If this succeeds, then we're done.  If it fails,
 * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
 * but that the label was not what we expected.  If the 'autoreplace' property
 * is not set, then we relabel the disk (if specified), and attempt a 'zpool
 * replace'.  If the online is successful, but the new state is something else
 * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
 * race, and we should avoid attempting to relabel the disk.
 *
 * Also can arrive here from a ESC_ZFS_VDEV_CHECK event
 */
static void
zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
{
    char *path;
    vdev_state_t newstate;
    nvlist_t *nvroot, *newvd;
    pendingdev_t *device;
    uint64_t wholedisk = 0ULL;
    uint64_t offline = 0ULL;
    uint64_t guid = 0ULL;
    char *physpath = NULL, *new_devid = NULL;
    char rawpath[PATH_MAX], fullpath[PATH_MAX];
    char devpath[PATH_MAX];
    int ret;

    if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
        return;

    (void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
    (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
    (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
    (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_GUID, &guid);

    if (offline)
        return;  /* don't intervene if it was taken offline */

    zed_log_msg(LOG_INFO, "zfs_process_add: pool '%s' vdev '%s' (%llu)",
                zpool_get_name(zhp), path, (long long unsigned int)guid);

    /*
     * The VDEV guid is preferred for identification (gets passed in path)
     */
    if (guid != 0) {
        (void) snprintf(fullpath, sizeof (fullpath), "%llu",
                        (long long unsigned int)guid);
    } else {
        /*
         * otherwise use path sans partition suffix for whole disks
         */
        (void) strlcpy(fullpath, path, sizeof (fullpath));
        if (wholedisk) {
            char *spath = zfs_strip_partition(g_zfshdl, fullpath);

            (void) strlcpy(fullpath, spath, sizeof (fullpath));
            free(spath);
        }
    }

    /*
     * Attempt to online the device.
     */
    if (zpool_vdev_online(zhp, fullpath,
                          ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
            (newstate == VDEV_STATE_HEALTHY ||
             newstate == VDEV_STATE_DEGRADED)) {
        zed_log_msg(LOG_INFO, "  zpool_vdev_online: vdev %s is %s",
                    fullpath, (newstate == VDEV_STATE_HEALTHY) ?
                    "HEALTHY" : "DEGRADED");
        return;
    }

    /*
     * If the pool doesn't have the autoreplace property set, then attempt
     * a true online (without the unspare flag), which will trigger a FMA
     * fault.
     */
    if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
            !wholedisk || physpath == NULL) {
        (void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
                                 &newstate);
        zed_log_msg(LOG_INFO, "  zpool_vdev_online: %s FORCEFAULT (%s)",
                    fullpath, libzfs_error_description(g_zfshdl));
        return;
    }

    /*
     * convert physical path into its current device node
     */
    (void) snprintf(rawpath, sizeof (rawpath), "%s%s", DEV_BYPATH_PATH,
                    physpath);
    if (realpath(rawpath, devpath) == NULL) {
        zed_log_msg(LOG_INFO, "  realpath: %s failed (%s)",
                    rawpath, strerror(errno));

        (void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
                                 &newstate);

        zed_log_msg(LOG_INFO, "  zpool_vdev_online: %s FORCEFAULT (%s)",
                    fullpath, libzfs_error_description(g_zfshdl));
        return;
    }

    /*
     * we're auto-replacing a raw disk, so label it first
     */
    if (!labeled) {
        char *leafname;

        /*
         * If this is a request to label a whole disk, then attempt to
         * write out the label.  Before we can label the disk, we need
         * to map the physical string that was matched on to the under
         * lying device node.
         *
         * If any part of this process fails, then do a force online
         * to trigger a ZFS fault for the device (and any hot spare
         * replacement).
         */
        leafname = strrchr(devpath, '/') + 1;

        /*
         * If this is a request to label a whole disk, then attempt to
         * write out the label.
         */
        if (zpool_label_disk(g_zfshdl, zhp, leafname) != 0) {
            zed_log_msg(LOG_INFO, "  zpool_label_disk: could not "
                        "label '%s' (%s)", leafname,
                        libzfs_error_description(g_zfshdl));

            (void) zpool_vdev_online(zhp, fullpath,
                                     ZFS_ONLINE_FORCEFAULT, &newstate);
            return;
        }

        /*
         * The disk labeling is asynchronous on Linux. Just record
         * this label request and return as there will be another
         * disk add event for the partition after the labeling is
         * completed.
         */
        device = malloc(sizeof (pendingdev_t));
        (void) strlcpy(device->pd_physpath, physpath,
                       sizeof (device->pd_physpath));
        list_insert_tail(&g_device_list, device);

        zed_log_msg(LOG_INFO, "  zpool_label_disk: async '%s' (%llu)",
                    leafname, (long long unsigned int)guid);

        return;	/* resumes at EC_DEV_ADD.ESC_DISK for partition */

    } else { /* labeled */
        boolean_t found = B_FALSE;
        /*
         * match up with request above to label the disk
         */
        for (device = list_head(&g_device_list); device != NULL;
                device = list_next(&g_device_list, device)) {
            if (strcmp(physpath, device->pd_physpath) == 0) {
                list_remove(&g_device_list, device);
                free(device);
                found = B_TRUE;
                break;
            }
        }
        if (!found) {
            /* unexpected partition slice encountered */
            (void) zpool_vdev_online(zhp, fullpath,
                                     ZFS_ONLINE_FORCEFAULT, &newstate);
            return;
        }

        zed_log_msg(LOG_INFO, "  zpool_label_disk: resume '%s' (%llu)",
                    physpath, (long long unsigned int)guid);

        if (nvlist_lookup_string(vdev, "new_devid", &new_devid) != 0) {
            zed_log_msg(LOG_INFO, "  auto replace: missing devid!");
            return;
        }

        (void) snprintf(devpath, sizeof (devpath), "%s%s",
                        DEV_BYID_PATH, new_devid);
        path = devpath;
    }

    /*
     * Construct the root vdev to pass to zpool_vdev_attach().  While adding
     * the entire vdev structure is harmless, we construct a reduced set of
     * path/physpath/wholedisk to keep it simple.
     */
    if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) {
        zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
        return;
    }
    if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
        zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
        nvlist_free(nvroot);
        return;
    }

    if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
            nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
            nvlist_add_string(newvd, ZPOOL_CONFIG_DEVID, new_devid) != 0 ||
            (physpath != NULL && nvlist_add_string(newvd,
                    ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
            nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
            nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
            nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &newvd,
                                    1) != 0) {
        zed_log_msg(LOG_WARNING, "zfs_mod: unable to add nvlist pairs");
        nvlist_free(newvd);
        nvlist_free(nvroot);
        return;
    }

    nvlist_free(newvd);

    /*
     * auto replace a leaf disk at same physical location
     */
    ret = zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE);

    zed_log_msg(LOG_INFO, "  zpool_vdev_replace: %s with %s (%s)",
                fullpath, path, (ret == 0) ? "no errors" :
                libzfs_error_description(g_zfshdl));

    nvlist_free(nvroot);
}