Exemplo n.º 1
0
static void
dump_ddt_stat(const ddt_stat_t *dds, int h)
{
	char refcnt[6];
	char blocks[6], lsize[6], psize[6], dsize[6];
	char ref_blocks[6], ref_lsize[6], ref_psize[6], ref_dsize[6];

	if (dds == NULL || dds->dds_blocks == 0)
		return;

	if (h == -1)
		(void) strcpy(refcnt, "Total");
	else
		zfs_nicenum(1ULL << h, refcnt, sizeof (refcnt));

	zfs_nicenum(dds->dds_blocks, blocks, sizeof (blocks));
	zfs_nicenum(dds->dds_lsize, lsize, sizeof (lsize));
	zfs_nicenum(dds->dds_psize, psize, sizeof (psize));
	zfs_nicenum(dds->dds_dsize, dsize, sizeof (dsize));
	zfs_nicenum(dds->dds_ref_blocks, ref_blocks, sizeof (ref_blocks));
	zfs_nicenum(dds->dds_ref_lsize, ref_lsize, sizeof (ref_lsize));
	zfs_nicenum(dds->dds_ref_psize, ref_psize, sizeof (ref_psize));
	zfs_nicenum(dds->dds_ref_dsize, ref_dsize, sizeof (ref_dsize));

	(void) printf("%6s   %6s   %5s   %5s   %5s   %6s   %5s   %5s   %5s\n",
	    refcnt,
	    blocks, lsize, psize, dsize,
	    ref_blocks, ref_lsize, ref_psize, ref_dsize);
}
Exemplo n.º 2
0
/*
 * Add the given vdevs to the pool.  The caller must have already performed the
 * necessary verification to ensure that the vdev specification is well-formed.
 */
int
zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
{
	char *packed;
	size_t len;
	zfs_cmd_t zc;
	int ret;
	libzfs_handle_t *hdl = zhp->zpool_hdl;
	char msg[1024];
	nvlist_t **spares;
	uint_t nspares;

	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
	    "cannot add to '%s'"), zhp->zpool_name);

	if (zpool_get_version(zhp) < ZFS_VERSION_SPARES &&
	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
	    &spares, &nspares) == 0) {
		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
		    "upgraded to add hot spares"));
		return (zfs_error(hdl, EZFS_BADVERSION, msg));
	}

	verify(nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) == 0);

	if ((packed = zfs_alloc(zhp->zpool_hdl, len)) == NULL)
		return (-1);

	verify(nvlist_pack(nvroot, &packed, &len, NV_ENCODE_NATIVE, 0) == 0);

	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
	zc.zc_config_src = (uint64_t)(uintptr_t)packed;
	zc.zc_config_src_size = len;

	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ADD, &zc) != 0) {
		switch (errno) {
		case EBUSY:
			/*
			 * This can happen if the user has specified the same
			 * device multiple times.  We can't reliably detect this
			 * until we try to add it and see we already have a
			 * label.
			 */
			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
			    "one or more vdevs refer to the same device"));
			(void) zfs_error(hdl, EZFS_BADDEV, msg);
			break;

		case EOVERFLOW:
			/*
			 * This occurrs when one of the devices is below
			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
			 * device was the problem device since there's no
			 * reliable way to determine device size from userland.
			 */
			{
				char buf[64];

				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));

				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
				    "device is less than the minimum "
				    "size (%s)"), buf);
			}
			(void) zfs_error(hdl, EZFS_BADDEV, msg);
			break;

		case ENOTSUP:
			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
			    "pool must be upgraded to add raidz2 vdevs"));
			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
			break;

		default:
			(void) zpool_standard_error(hdl, errno, msg);
		}

		ret = -1;
	} else {
		ret = 0;
	}

	free(packed);

	return (ret);
}
Exemplo n.º 3
0
/*
 * Create the named pool, using the provided vdev list.  It is assumed
 * that the consumer has already validated the contents of the nvlist, so we
 * don't have to worry about error semantics.
 */
int
zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
    const char *altroot)
{
	zfs_cmd_t zc = { 0 };
	char *packed;
	size_t len;
	char msg[1024];

	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
	    "cannot create '%s'"), pool);

	if (!zpool_name_valid(hdl, B_FALSE, pool))
		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));

	if (altroot != NULL && altroot[0] != '/')
		return (zfs_error(hdl, EZFS_BADPATH,
		    dgettext(TEXT_DOMAIN, "bad alternate root '%s'"), altroot));

	if (nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) != 0)
		return (no_memory(hdl));

	if ((packed = zfs_alloc(hdl, len)) == NULL)
		return (-1);

	if (nvlist_pack(nvroot, &packed, &len,
	    NV_ENCODE_NATIVE, 0) != 0) {
		free(packed);
		return (no_memory(hdl));
	}

	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
	zc.zc_config_src = (uint64_t)(uintptr_t)packed;
	zc.zc_config_src_size = len;

	if (altroot != NULL)
		(void) strlcpy(zc.zc_root, altroot, sizeof (zc.zc_root));

	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_CREATE, &zc) != 0) {
		free(packed);

		switch (errno) {
		case EBUSY:
			/*
			 * This can happen if the user has specified the same
			 * device multiple times.  We can't reliably detect this
			 * until we try to add it and see we already have a
			 * label.
			 */
			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
			    "one or more vdevs refer to the same device"));
			return (zfs_error(hdl, EZFS_BADDEV, msg));

		case EOVERFLOW:
			/*
			 * This occurs when one of the devices is below
			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
			 * device was the problem device since there's no
			 * reliable way to determine device size from userland.
			 */
			{
				char buf[64];

				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));

				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
				    "one or more devices is less than the "
				    "minimum size (%s)"), buf);
			}
			return (zfs_error(hdl, EZFS_BADDEV, msg));

		case ENOSPC:
			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
			    "one or more devices is out of space"));
			return (zfs_error(hdl, EZFS_BADDEV, msg));

		default:
			return (zpool_standard_error(hdl, errno, msg));
		}
	}

	free(packed);

	/*
	 * If this is an alternate root pool, then we automatically set the
	 * moutnpoint of the root dataset to be '/'.
	 */
	if (altroot != NULL) {
		zfs_handle_t *zhp;

		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_ANY)) != NULL);
		verify(zfs_prop_set(zhp, ZFS_PROP_MOUNTPOINT, "/") == 0);

		zfs_close(zhp);
	}

	return (0);
}
Exemplo n.º 4
0
/*
 * Print out configuration state as requested by status_callback.
 */
void lzwu_zpool_print_status_config(libzfs_handle_t *p_zhd, zpool_handle_t *zhp,
                                    const char *name, nvlist_t *nv, int namewidth,
                                    int depth, boolean_t isspare)
{
        nvlist_t **child;
        uint_t children;
        unsigned c;
        vdev_stat_t *vs;
        char rbuf[6], wbuf[6], cbuf[6], repaired[7];
        char *vname;
        uint64_t notpresent;
        spare_cbdata_t cb;
        char *state;

        verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
            (uint64_t **)&vs, &c) == 0);

        if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
            &child, &children) != 0)
                children = 0;

        state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
        if(isspare)
        {
                /*
                 * For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for
                 * online drives.
                 */
                if(vs->vs_aux == VDEV_AUX_SPARED)
                        state = "INUSE";
                else if(vs->vs_state == VDEV_STATE_HEALTHY)
                        state = "AVAIL";
        }

        printf("\t%*s%-*s  %-8s", depth, "", namewidth - depth,
            name, state);

        if(!isspare)
        {
                zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));
                zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));
                zfs_nicenum(vs->vs_checksum_errors, cbuf, sizeof (cbuf));
                printf(" %5s %5s %5s", rbuf, wbuf, cbuf);
        }

        if(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &notpresent) == 0)
        {
                char *path;
                verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
                printf("  was %s", path);
        }
        else if(vs->vs_aux != 0)
        {
                printf("  ");

                switch (vs->vs_aux)
                {
                case VDEV_AUX_OPEN_FAILED:
                        printf("cannot open");
                        break;

                case VDEV_AUX_BAD_GUID_SUM:
                        printf("missing device");
                        break;

                case VDEV_AUX_NO_REPLICAS:
                        printf("insufficient replicas");
                        break;

                case VDEV_AUX_VERSION_NEWER:
                        printf("newer version");
                        break;

                case VDEV_AUX_SPARED:
                        verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
                            &cb.cb_guid) == 0);
                        if(zpool_iter(p_zhd, lzwu_find_spare, &cb) == 1)
                        {
                                if(strcmp(zpool_get_name(cb.cb_zhp),
                                    zpool_get_name(zhp)) == 0)
                                        printf("currently in use");
                                else
                                        printf("in use by pool '%s'", zpool_get_name(cb.cb_zhp));
                                zpool_close(cb.cb_zhp);
                        }
                        else
                                printf("currently in use");
                        break;

                case VDEV_AUX_ERR_EXCEEDED:
                        printf("too many errors");
                        break;

                case VDEV_AUX_IO_FAILURE:
                        printf("experienced I/O failures");
                        break;

                case VDEV_AUX_BAD_LOG:
                        printf("bad intent log");
                        break;

                case VDEV_AUX_EXTERNAL:
                        printf("external device fault");
                        break;

                case VDEV_AUX_SPLIT_POOL:
                        printf("split into new pool");
                        break;

                default:
                        printf("corrupted data");
                        break;
                }
        }
        else if(vs->vs_scrub_repaired != 0 && children == 0)
        {
                /*
                 * Report bytes resilvered/repaired on leaf devices.
                 */
                zfs_nicenum(vs->vs_scrub_repaired, repaired, sizeof (repaired));
                printf("  %s %s", repaired,
                       (vs->vs_scrub_type == POOL_SCRUB_RESILVER) ?
                       "resilvered" : "repaired");
        }

        printf("\n");

        for(unsigned c = 0; c < children; c++)
        {
                uint64_t islog = B_FALSE, ishole = B_FALSE;

                /* Don't print logs or holes here */
                nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, &islog);
                nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, &ishole);
                if(islog || ishole)
                        continue;
                vname = zpool_vdev_name(p_zhd, zhp, child[c], B_TRUE);
                lzwu_zpool_print_status_config(p_zhd, zhp, vname, child[c],
                                               namewidth, depth + 2, isspare);
                free(vname);
        }
}