コード例 #1
0
void lzwu_print_dedup_stats(nvlist_t *config)
{
        ddt_histogram_t *ddh;
        ddt_stat_t *dds;
        ddt_object_t *ddo;
        uint_t c;

        /*
         * If the pool was faulted then we may not have been able to
         * obtain the config. Otherwise, if have anything in the dedup
         * table continue processing the stats.
         */
        if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS,
            (uint64_t **)&ddo, &c) != 0 || ddo->ddo_count == 0)
                return;

        printf("\n");
        printf("DDT entries %llu, size %llu on disk, %llu in core\n",
               (u_longlong_t)ddo->ddo_count,
               (u_longlong_t)ddo->ddo_dspace,
               (u_longlong_t)ddo->ddo_mspace);

        verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
            (uint64_t **)&dds, &c) == 0);
        verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM,
            (uint64_t **)&ddh, &c) == 0);
        zpool_dump_ddt(dds, ddh);
}
コード例 #2
0
ファイル: zfs_de.c プロジェクト: AlainODea/illumos-gate
static int
zpool_find_load_time(zpool_handle_t *zhp, void *arg)
{
	struct load_time_arg *lta = arg;
	uint64_t pool_guid;
	uint64_t *tod;
	nvlist_t *config;
	uint_t nelem;

	if (lta->lt_found)
		return (0);

	pool_guid = zpool_get_prop_int(zhp, ZPOOL_PROP_GUID, NULL);
	if (pool_guid != lta->lt_guid)
		return (0);

	if ((config = zpool_get_config(zhp, NULL)) == NULL) {
		zpool_close(zhp);
		return (-1);
	}

	if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_LOADED_TIME,
	    &tod, &nelem) == 0 && nelem == 2) {
		lta->lt_found = B_TRUE;
		lta->lt_time->ertv_sec = tod[0];
		lta->lt_time->ertv_nsec = tod[1];
	}

	return (0);
}
コード例 #3
0
ファイル: libzfs_pool.c プロジェクト: andreiw/polaris
/*
 * Set the pool-wide health based on the vdev state of the root vdev.
 */
int
set_pool_health(nvlist_t *config)
{
	nvlist_t *nvroot;
	vdev_stat_t *vs;
	uint_t vsc;
	char *health;

	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
	    &nvroot) == 0);
	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
	    (uint64_t **)&vs, &vsc) == 0);

	switch (vs->vs_state) {

	case VDEV_STATE_CLOSED:
	case VDEV_STATE_CANT_OPEN:
	case VDEV_STATE_OFFLINE:
		health = dgettext(TEXT_DOMAIN, "FAULTED");
		break;

	case VDEV_STATE_DEGRADED:
		health = dgettext(TEXT_DOMAIN, "DEGRADED");
		break;

	case VDEV_STATE_HEALTHY:
		health = dgettext(TEXT_DOMAIN, "ONLINE");
		break;

	default:
		abort();
	}

	return (nvlist_add_string(config, ZPOOL_CONFIG_POOL_HEALTH, health));
}
コード例 #4
0
ファイル: availdevs.c プロジェクト: BillTheBest/illumos-gate
static int
add_pool_to_xml(nvlist_t *config, void *data)
{
	char *c;
	char *name;
	uint64_t guid;
	uint64_t version;
	uint64_t state;
	nvlist_t *devices;
	uint_t n;
	vdev_stat_t *vs;
	xmlNodePtr pool;
	xmlNodePtr importable = *((xmlNodePtr *)data);

	if (nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, &name) ||
	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &guid) ||
	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) ||
	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state) ||
	    nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &devices) ||
	    nvlist_lookup_uint64_array(
	    devices, ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &n)) {
		return (-1);
	}

	pool = xmlNewChild(importable, NULL, (xmlChar *)ELEMENT_POOL, NULL);
	(void) xmlSetProp(pool, (xmlChar *)ATTR_POOL_NAME, (xmlChar *)name);

	set_uint64_prop(pool, ATTR_POOL_ID, guid);
	set_uint64_prop(pool, ATTR_POOL_VERSION, version);
	set_uint64_prop(pool, ATTR_POOL_USED, vs->vs_alloc);
	set_uint64_prop(pool, ATTR_POOL_SIZE, vs->vs_space);
	set_uint64_prop(pool, ATTR_POOL_REPLACEMENT_SIZE, vs->vs_rsize);
	set_uint64_prop(pool, ATTR_POOL_READ_BYTES,
	    vs->vs_bytes[ZIO_TYPE_READ]);
	set_uint64_prop(pool, ATTR_POOL_WRITE_BYTES,
	    vs->vs_bytes[ZIO_TYPE_WRITE]);
	set_uint64_prop(pool, ATTR_POOL_READ_OPERATIONS,
	    vs->vs_ops[ZIO_TYPE_READ]);
	set_uint64_prop(pool, ATTR_POOL_WRITE_OPERATIONS,
	    vs->vs_ops[ZIO_TYPE_WRITE]);
	set_uint64_prop(pool, ATTR_POOL_READ_ERRORS, vs->vs_read_errors);
	set_uint64_prop(pool, ATTR_POOL_WRITE_ERRORS, vs->vs_write_errors);
	set_uint64_prop(pool, ATTR_POOL_CHECKSUM_ERRORS,
	    vs->vs_checksum_errors);

	(void) xmlSetProp(pool, (xmlChar *)ATTR_DEVICE_STATE,
	    (xmlChar *)zjni_vdev_state_to_str(vs->vs_state));

	(void) xmlSetProp(pool, (xmlChar *)ATTR_DEVICE_STATUS,
	    (xmlChar *)zjni_vdev_aux_to_str(vs->vs_aux));

	(void) xmlSetProp(pool, (xmlChar *)ATTR_POOL_STATE,
	    (xmlChar *)zjni_pool_state_to_str(state));

	(void) xmlSetProp(pool, (xmlChar *)ATTR_POOL_STATUS, (xmlChar *)
	    zjni_pool_status_to_str(zpool_import_status(config, &c)));

	return (0);
}
コード例 #5
0
/*
 * Print out detailed scrub status.
 * @param pnv_root: the root tree
 */
void lzwu_zpool_print_scrub_status(nvlist_t *pnv_root)
{
        vdev_stat_t *p_vs;
        unsigned vs_count;
        time_t start, end, now;
        double fraction_done;
        uint64_t examined, total, minutes_left, minutes_taken;
        const char *psz_scrub_type;

        assert(nvlist_lookup_uint64_array(pnv_root, ZPOOL_CONFIG_STATS,
            (uint64_t **)&p_vs, &vs_count) == 0);

        /*
         * If there's never been a scrub, there's not much to say.
         */
        if(p_vs->vs_scrub_end == 0 && p_vs->vs_scrub_type == POOL_SCRUB_NONE)
        {
                printf("none requested\n");
                return;
        }

        psz_scrub_type = (p_vs->vs_scrub_type == POOL_SCRUB_RESILVER) ? "resilver" : "scrub";

        start = p_vs->vs_scrub_start;
        end = p_vs->vs_scrub_end;
        now = time(NULL);
        examined = p_vs->vs_scrub_examined;
        total = p_vs->vs_alloc;

        if(end != 0)
        {
                minutes_taken = (uint64_t)((end - start) / 60);

                printf("%s %s after %lluh%um with %llu errors on %s",
                       psz_scrub_type, p_vs->vs_scrub_complete ? "completed" : "stopped",
                       (u_longlong_t)(minutes_taken / 60),
                       (uint_t)(minutes_taken % 60),
                       (u_longlong_t)p_vs->vs_scrub_errors, ctime(&end));
                return;
        }

        if(examined == 0)
                examined = 1;
        if(examined > total)
                total = examined;

        fraction_done = (double)examined / total;
        minutes_left = (uint64_t)((now - start) *
            (1 - fraction_done) / fraction_done / 60);
        minutes_taken = (uint64_t)((now - start) / 60);

        printf("%s in progress for %lluh%um, %.2f%% done, %lluh%um to go\n",
               psz_scrub_type, (u_longlong_t)(minutes_taken / 60),
               (uint_t)(minutes_taken % 60), 100 * fraction_done,
               (u_longlong_t)(minutes_left / 60), (uint_t)(minutes_left % 60));
}
コード例 #6
0
ファイル: zfs_diagnosis.c プロジェクト: DeHackEd/zfs
/*ARGSUSED*/
static int
zfs_mark_pool(zpool_handle_t *zhp, void *unused)
{
	zfs_case_t *zcp;
	uint64_t pool_guid;
	uint64_t *tod;
	er_timeval_t loaded = { 0 };
	nvlist_t *config, *vd;
	uint_t nelem = 0;
	int ret;

	pool_guid = zpool_get_prop_int(zhp, ZPOOL_PROP_GUID, NULL);
	/*
	 * Mark any cases associated with just this pool.
	 */
	for (zcp = uu_list_first(zfs_cases); zcp != NULL;
	    zcp = uu_list_next(zfs_cases, zcp)) {
		if (zcp->zc_data.zc_pool_guid == pool_guid &&
		    zcp->zc_data.zc_vdev_guid == 0)
			zcp->zc_present = B_TRUE;
	}

	if ((config = zpool_get_config(zhp, NULL)) == NULL) {
		zpool_close(zhp);
		return (-1);
	}

	(void) nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_LOADED_TIME,
	    &tod, &nelem);
	if (nelem == 2) {
		loaded.ertv_sec = tod[0];
		loaded.ertv_nsec = tod[1];
		for (zcp = uu_list_first(zfs_cases); zcp != NULL;
		    zcp = uu_list_next(zfs_cases, zcp)) {
			if (zcp->zc_data.zc_pool_guid == pool_guid &&
			    zcp->zc_data.zc_vdev_guid == 0) {
				zcp->zc_when = loaded;
			}
		}
	}

	ret = nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &vd);
	if (ret) {
		zpool_close(zhp);
		return (-1);
	}

	zfs_mark_vdev(pool_guid, vd, &loaded);

	zpool_close(zhp);

	return (0);
}
コード例 #7
0
ファイル: zfs_mod.c プロジェクト: DeHackEd/zfs
static int
zfs_toplevel_state(zpool_handle_t *zhp)
{
	nvlist_t *nvroot;
	vdev_stat_t *vs;
	unsigned int c;

	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
	    (uint64_t **)&vs, &c) == 0);
	return (vs->vs_state);
}
コード例 #8
0
ファイル: zfs_de.c プロジェクト: AlainODea/illumos-gate
/*ARGSUSED*/
static void
zfs_ereport_when(fmd_hdl_t *hdl, nvlist_t *nvl, er_timeval_t *when)
{
	uint64_t *tod;
	uint_t	nelem;

	if (nvlist_lookup_uint64_array(nvl, FMD_EVN_TOD, &tod, &nelem) == 0 &&
	    nelem == 2) {
		when->ertv_sec = tod[0];
		when->ertv_nsec = tod[1];
	} else {
		when->ertv_sec = when->ertv_nsec = UINT64_MAX;
	}
}
コード例 #9
0
ファイル: libzfs_pool.c プロジェクト: andreiw/polaris
/*
 * Return the total space in the pool.
 */
uint64_t
zpool_get_space_total(zpool_handle_t *zhp)
{
	nvlist_t *nvroot;
	vdev_stat_t *vs;
	uint_t vsc;

	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
	    &nvroot) == 0);
	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
	    (uint64_t **)&vs, &vsc) == 0);

	return (vs->vs_space);
}
コード例 #10
0
ファイル: libzfs_status.c プロジェクト: 0xbda2d2f8/freebsd
/*
 * Detect if any leaf devices that have seen errors or could not be opened.
 */
static boolean_t
find_vdev_problem(nvlist_t *vdev, int (*func)(vdev_stat_t *, uint_t),
    boolean_t ignore_replacing)
{
	nvlist_t **child;
	vdev_stat_t *vs;
	uint_t c, vsc, children;

	/*
	 * Ignore problems within a 'replacing' vdev, since we're presumably in
	 * the process of repairing any such errors, and don't want to call them
	 * out again.  We'll pick up the fact that a resilver is happening
	 * later.
	 */
	if (ignore_replacing == B_TRUE) {
		char *type;

		verify(nvlist_lookup_string(vdev, ZPOOL_CONFIG_TYPE,
		    &type) == 0);
		if (strcmp(type, VDEV_TYPE_REPLACING) == 0)
			return (B_FALSE);
	}

	if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_CHILDREN, &child,
	    &children) == 0) {
		for (c = 0; c < children; c++)
			if (find_vdev_problem(child[c], func, ignore_replacing))
				return (B_TRUE);
	} else {
		verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
		    (uint64_t **)&vs, &vsc) == 0);

		if (func(vs, vsc) != 0)
			return (B_TRUE);
	}

	/*
	 * Check any L2 cache devs
	 */
	if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_L2CACHE, &child,
	    &children) == 0) {
		for (c = 0; c < children; c++)
			if (find_vdev_problem(child[c], func, ignore_replacing))
				return (B_TRUE);
	}

	return (B_FALSE);
}
コード例 #11
0
ファイル: fx_subr.c プロジェクト: AlainODea/illumos-gate
/* ARGSUSED */
int
fab_prep_basic_erpt(fmd_hdl_t *hdl, nvlist_t *nvl, nvlist_t *erpt,
    boolean_t isRC)
{
	uint64_t	*now;
	uint64_t	ena;
	uint_t		nelem;
	nvlist_t	*detector, *new_detector;
	char		rcpath[255];
	int		err = 0;

	/* Grab the tod, ena and detector(FMRI) */
	err |= nvlist_lookup_uint64_array(nvl, "__tod", &now, &nelem);
	err |= nvlist_lookup_uint64(nvl, "ena", &ena);
	err |= nvlist_lookup_nvlist(nvl, FM_EREPORT_DETECTOR, &detector);
	if (err)
		return (err);

	/* Make a copy of the detector */
	err = nvlist_dup(detector, &new_detector, NV_UNIQUE_NAME);
	if (err)
		return (err);

	/* Copy the tod and ena to erpt */
	(void) nvlist_add_uint64(erpt, FM_EREPORT_ENA, ena);
	(void) nvlist_add_uint64_array(erpt, "__tod", now, nelem);

	/*
	 * Create the correct ROOT FMRI from PCIe leaf fabric ereports.	 Used
	 * only by fab_prep_fake_rc_erpt.  See the fab_pciex_fake_rc_erpt_tbl
	 * comments for more information.
	 */
	if (isRC && fab_get_rcpath(hdl, nvl, rcpath)) {
		/* Create the correct PCIe RC new_detector aka FMRI */
		(void) nvlist_remove(new_detector, FM_FMRI_DEV_PATH,
		    DATA_TYPE_STRING);
		(void) nvlist_add_string(new_detector, FM_FMRI_DEV_PATH,
		    rcpath);
	}

	/* Copy the FMRI to erpt */
	(void) nvlist_add_nvlist(erpt, FM_EREPORT_DETECTOR, new_detector);

	nvlist_free(new_detector);
	return (err);
}
コード例 #12
0
ファイル: zpoolstats.c プロジェクト: jeroen92/zfs-stats-mysql
/*
 *	Collects zpool stats of the pool configured in the configData var.
 *	It opens a handler to the pool, load a tree of vdevs and then the
 *	stats of the root vdev. These stats are temporarily saved and then
 *	inserted in the SQL database.
 */
int collectZpoolStats() {
	// Open a handle to the zfs filesystems
	libzfs_handle_t *g_zfs = libzfs_init();

	// Open a handle to the defined zpool
	zpool_handle_t *zhp;
	zhp = zpool_open_canfail(g_zfs, configData.poolname->value);

	// Declarations and such...
	nvlist_t *configuration, *vdevroot;
	vdev_stat_t *vdevstats;
	iostatcollection statisticscollection;
	int nelem;
	configuration = zpool_get_config(zhp, NULL);

	// Now we have the config, we can release the handle to the zpool
	libzfs_fini(g_zfs);
	free(zhp);

	// Put the vdev tree belonging to the pool in newconfig in newnvroot.
	verify(nvlist_lookup_nvlist(configuration, ZPOOL_CONFIG_VDEV_TREE, &vdevroot) == 0);

	// Load the new vdev stats in newvdevstat
	verify(nvlist_lookup_uint64_array(vdevroot, ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vdevstats, &nelem) == 0);

	// Place the collected statistics in the collection
	statisticscollection.readops = vdevstats->vs_ops[ZIO_TYPE_READ];
	statisticscollection.writeops = vdevstats->vs_ops[ZIO_TYPE_WRITE];
	statisticscollection.readbytes = vdevstats->vs_bytes[ZIO_TYPE_READ];
	statisticscollection.writebytes = vdevstats->vs_bytes[ZIO_TYPE_WRITE];
	statisticscollection.checksum_errors = vdevstats->vs_checksum_errors;
	statisticscollection.state = vdevstats->vs_state;
	statisticscollection.space_alloc = vdevstats->vs_alloc;
	statisticscollection.space = vdevstats->vs_space;

	
	// Create the query and post it to MySQL
	char queryString[1024];
	snprintf(queryString, 1024, "INSERT INTO io_stats (date, %s, %s, %s, %s, %s, %s, %s, %s) VALUES (NOW(), '%llu', '%llu', '%llu', '%llu', '%llu', '%llu', '%llu', '%llu')", "iop_read", "iop_write", "bandwidth_read", "bandwidth_write", "space", "space_alloc", "checksum_errors", "state", statisticscollection.readops, statisticscollection.writeops, statisticscollection.readbytes, statisticscollection.writebytes, statisticscollection.space, statisticscollection.space_alloc, statisticscollection.checksum_errors, statisticscollection.state);
	if (executeQuery(queryString)) return 1;
	return 0;
}
コード例 #13
0
ファイル: zio.c プロジェクト: matusso/zabbix-modules
int
zpool_get_stats(zpool_handle_t * zhp, void * data) {
	config_t * cnf = (config_t *)data;
	uint_t c;
    boolean_t missing;

	nvlist_t * nv, * config;
	vdev_stat_t * vs;

    if (zpool_refresh_stats(zhp, &missing) != 0)
		return 1;

	config = zpool_get_config(zhp, NULL);

	if (nvlist_lookup_nvlist(config,
                ZPOOL_CONFIG_VDEV_TREE, &nv) != 0) {
                return 1;
        }

        if (nvlist_lookup_uint64_array(nv,
                ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &c) != 0) {
                return 1;
        }

	if (!strcmp(zpool_get_name(zhp), cnf->zname)) {
		cnf->zpool.read_ops  = vs->vs_ops[ZIO_TYPE_READ];			
		cnf->zpool.write_ops = vs->vs_ops[ZIO_TYPE_WRITE];
		cnf->zpool.read_bts  = vs->vs_bytes[ZIO_TYPE_READ];
		cnf->zpool.write_bts = vs->vs_bytes[ZIO_TYPE_WRITE];
		cnf->zpool.alloc = vs->vs_alloc;
		cnf->zpool.free = vs->vs_space - vs->vs_alloc;
		cnf->zpool.health = zpool_get_health(zhp);
		cnf->zpool.dedupratio = zpool_get_dedupratio(zhp);
		cnf->zpool.name = zpool_get_poolname(zhp);
		cnf->zpool.ddt_memory = get_dedup_stats(config);
	}

	zpool_close(zhp);
	return 0;
}
コード例 #14
0
ファイル: zfs_ioctl_compat.c プロジェクト: 151706061/osv
static void
zfs_ioctl_compat_fix_stats_nvlist(nvlist_t *nvl)
{
	nvlist_t **child;
	nvlist_t *nvroot = NULL;
	vdev_stat_t *vs;
	uint_t c, children, nelem;

	if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN,
	    &child, &children) == 0) {
		for (c = 0; c < children; c++) {
			zfs_ioctl_compat_fix_stats_nvlist(child[c]);
		}
	}

	if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_VDEV_TREE,
	    &nvroot) == 0)
		zfs_ioctl_compat_fix_stats_nvlist(nvroot);
#ifdef _KERNEL
	if ((nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_VDEV_STATS,
#else
	if ((nvlist_lookup_uint64_array(nvl, "stats",
#endif

	    (uint64_t **)&vs, &nelem) == 0)) {
		nvlist_add_uint64_array(nvl,
#ifdef _KERNEL
		    "stats",
#else
		    ZPOOL_CONFIG_VDEV_STATS,
#endif
		    (uint64_t *)vs, nelem);
#ifdef _KERNEL
		nvlist_remove(nvl, ZPOOL_CONFIG_VDEV_STATS,
#else
		nvlist_remove(nvl, "stats",
#endif
		    DATA_TYPE_UINT64_ARRAY);
	}
コード例 #15
0
ファイル: scheme.c プロジェクト: CoryXie/opensolaris
int
fmd_fmri_unusable(nvlist_t *nvl)
{
	uint64_t pool_guid, vdev_guid;
	cbdata_t cb;
	nvlist_t *vd;
	int ret;

	(void) nvlist_lookup_uint64(nvl, FM_FMRI_ZFS_POOL, &pool_guid);

	cb.cb_guid = pool_guid;
	cb.cb_pool = NULL;

	if (zpool_iter(g_zfs, find_pool, &cb) != 1)
		return (1);

	if (nvlist_lookup_uint64(nvl, FM_FMRI_ZFS_VDEV, &vdev_guid) != 0) {
		ret = (zpool_get_state(cb.cb_pool) == POOL_STATE_UNAVAIL);
		zpool_close(cb.cb_pool);
		return (ret);
	}

	vd = find_vdev(cb.cb_pool, vdev_guid);
	if (vd == NULL) {
		ret = 1;
	} else {
		vdev_stat_t *vs;
		uint_t c;

		(void) nvlist_lookup_uint64_array(vd, ZPOOL_CONFIG_STATS,
		    (uint64_t **)&vs, &c);

		ret = (vs->vs_state < VDEV_STATE_DEGRADED);
	}

	zpool_close(cb.cb_pool);

	return (ret);
}
コード例 #16
0
/*
 * Detect if any leaf devices that have seen errors or could not be opened.
 */
static boolean_t
find_vdev_problem(nvlist_t *vdev, int (*func)(uint64_t, uint64_t, uint64_t))
{
	nvlist_t **child;
	vdev_stat_t *vs;
	uint_t c, children;
	char *type;

	/*
	 * Ignore problems within a 'replacing' vdev, since we're presumably in
	 * the process of repairing any such errors, and don't want to call them
	 * out again.  We'll pick up the fact that a resilver is happening
	 * later.
	 */
	verify(nvlist_lookup_string(vdev, ZPOOL_CONFIG_TYPE, &type) == 0);
	if (strcmp(type, VDEV_TYPE_REPLACING) == 0)
		return (B_FALSE);

	if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_CHILDREN, &child,
	    &children) == 0) {
		for (c = 0; c < children; c++)
			if (find_vdev_problem(child[c], func))
				return (B_TRUE);
	} else {
		verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_STATS,
		    (uint64_t **)&vs, &c) == 0);

		if (func(vs->vs_state, vs->vs_aux,
		    vs->vs_read_errors +
		    vs->vs_write_errors +
		    vs->vs_checksum_errors))
			return (B_TRUE);
	}

	return (B_FALSE);
}
コード例 #17
0
ファイル: libzfs_import.c プロジェクト: cbreak-black/zfs
/*
 * Convert our list of pools into the definitive set of configurations.  We
 * start by picking the best config for each toplevel vdev.  Once that's done,
 * we assemble the toplevel vdevs into a full config for the pool.  We make a
 * pass to fix up any incorrect paths, and then add it to the main list to
 * return to the user.
 */
static nvlist_t *
get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok,
    nvlist_t *policy)
{
	pool_entry_t *pe;
	vdev_entry_t *ve;
	config_entry_t *ce;
	nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot;
	nvlist_t **spares, **l2cache;
	uint_t i, nspares, nl2cache;
	boolean_t config_seen;
	uint64_t best_txg;
	char *name, *hostname = NULL;
	uint64_t guid;
	uint_t children = 0;
	nvlist_t **child = NULL;
	uint_t holes;
	uint64_t *hole_array, max_id;
	uint_t c;
	boolean_t isactive;
	uint64_t hostid;
	nvlist_t *nvl;
	boolean_t valid_top_config = B_FALSE;

	if (nvlist_alloc(&ret, 0, 0) != 0)
		goto nomem;

	for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
		uint64_t id, max_txg = 0;

		if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
			goto nomem;
		config_seen = B_FALSE;

		/*
		 * Iterate over all toplevel vdevs.  Grab the pool configuration
		 * from the first one we find, and then go through the rest and
		 * add them as necessary to the 'vdevs' member of the config.
		 */
		for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {

			/*
			 * Determine the best configuration for this vdev by
			 * selecting the config with the latest transaction
			 * group.
			 */
			best_txg = 0;
			for (ce = ve->ve_configs; ce != NULL;
			    ce = ce->ce_next) {

				if (ce->ce_txg > best_txg) {
					tmp = ce->ce_config;
					best_txg = ce->ce_txg;
				}
			}

			/*
			 * We rely on the fact that the max txg for the
			 * pool will contain the most up-to-date information
			 * about the valid top-levels in the vdev namespace.
			 */
			if (best_txg > max_txg) {
				(void) nvlist_remove(config,
				    ZPOOL_CONFIG_VDEV_CHILDREN,
				    DATA_TYPE_UINT64);
				(void) nvlist_remove(config,
				    ZPOOL_CONFIG_HOLE_ARRAY,
				    DATA_TYPE_UINT64_ARRAY);

				max_txg = best_txg;
				hole_array = NULL;
				holes = 0;
				max_id = 0;
				valid_top_config = B_FALSE;

				if (nvlist_lookup_uint64(tmp,
				    ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
					verify(nvlist_add_uint64(config,
					    ZPOOL_CONFIG_VDEV_CHILDREN,
					    max_id) == 0);
					valid_top_config = B_TRUE;
				}

				if (nvlist_lookup_uint64_array(tmp,
				    ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
				    &holes) == 0) {
					verify(nvlist_add_uint64_array(config,
					    ZPOOL_CONFIG_HOLE_ARRAY,
					    hole_array, holes) == 0);
				}
			}

			if (!config_seen) {
				/*
				 * Copy the relevant pieces of data to the pool
				 * configuration:
				 *
				 *	version
				 *	pool guid
				 *	name
				 *	pool txg (if available)
				 *	comment (if available)
				 *	pool state
				 *	hostid (if available)
				 *	hostname (if available)
				 */
				uint64_t state, version, pool_txg;
				char *comment = NULL;

				version = fnvlist_lookup_uint64(tmp,
				    ZPOOL_CONFIG_VERSION);
				fnvlist_add_uint64(config,
				    ZPOOL_CONFIG_VERSION, version);
				guid = fnvlist_lookup_uint64(tmp,
				    ZPOOL_CONFIG_POOL_GUID);
				fnvlist_add_uint64(config,
				    ZPOOL_CONFIG_POOL_GUID, guid);
				name = fnvlist_lookup_string(tmp,
				    ZPOOL_CONFIG_POOL_NAME);
				fnvlist_add_string(config,
				    ZPOOL_CONFIG_POOL_NAME, name);
				if (nvlist_lookup_uint64(tmp,
				    ZPOOL_CONFIG_POOL_TXG, &pool_txg) == 0)
					fnvlist_add_uint64(config,
					    ZPOOL_CONFIG_POOL_TXG, pool_txg);

				if (nvlist_lookup_string(tmp,
				    ZPOOL_CONFIG_COMMENT, &comment) == 0)
					fnvlist_add_string(config,
					    ZPOOL_CONFIG_COMMENT, comment);

				state = fnvlist_lookup_uint64(tmp,
				    ZPOOL_CONFIG_POOL_STATE);
				fnvlist_add_uint64(config,
				    ZPOOL_CONFIG_POOL_STATE, state);

				hostid = 0;
				if (nvlist_lookup_uint64(tmp,
				    ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
					fnvlist_add_uint64(config,
					    ZPOOL_CONFIG_HOSTID, hostid);
					hostname = fnvlist_lookup_string(tmp,
					    ZPOOL_CONFIG_HOSTNAME);
					fnvlist_add_string(config,
					    ZPOOL_CONFIG_HOSTNAME, hostname);
				}

				config_seen = B_TRUE;
			}

			/*
			 * Add this top-level vdev to the child array.
			 */
			verify(nvlist_lookup_nvlist(tmp,
			    ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
			verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
			    &id) == 0);

			if (id >= children) {
				nvlist_t **newchild;

				newchild = zfs_alloc(hdl, (id + 1) *
				    sizeof (nvlist_t *));
				if (newchild == NULL)
					goto nomem;

				for (c = 0; c < children; c++)
					newchild[c] = child[c];

				free(child);
				child = newchild;
				children = id + 1;
			}
			if (nvlist_dup(nvtop, &child[id], 0) != 0)
				goto nomem;

		}

		/*
		 * If we have information about all the top-levels then
		 * clean up the nvlist which we've constructed. This
		 * means removing any extraneous devices that are
		 * beyond the valid range or adding devices to the end
		 * of our array which appear to be missing.
		 */
		if (valid_top_config) {
			if (max_id < children) {
				for (c = max_id; c < children; c++)
					nvlist_free(child[c]);
				children = max_id;
			} else if (max_id > children) {
				nvlist_t **newchild;

				newchild = zfs_alloc(hdl, (max_id) *
				    sizeof (nvlist_t *));
				if (newchild == NULL)
					goto nomem;

				for (c = 0; c < children; c++)
					newchild[c] = child[c];

				free(child);
				child = newchild;
				children = max_id;
			}
		}

		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
		    &guid) == 0);

		/*
		 * The vdev namespace may contain holes as a result of
		 * device removal. We must add them back into the vdev
		 * tree before we process any missing devices.
		 */
		if (holes > 0) {
			ASSERT(valid_top_config);

			for (c = 0; c < children; c++) {
				nvlist_t *holey;

				if (child[c] != NULL ||
				    !vdev_is_hole(hole_array, holes, c))
					continue;

				if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
				    0) != 0)
					goto nomem;

				/*
				 * Holes in the namespace are treated as
				 * "hole" top-level vdevs and have a
				 * special flag set on them.
				 */
				if (nvlist_add_string(holey,
				    ZPOOL_CONFIG_TYPE,
				    VDEV_TYPE_HOLE) != 0 ||
				    nvlist_add_uint64(holey,
				    ZPOOL_CONFIG_ID, c) != 0 ||
				    nvlist_add_uint64(holey,
				    ZPOOL_CONFIG_GUID, 0ULL) != 0) {
					nvlist_free(holey);
					goto nomem;
				}
				child[c] = holey;
			}
		}

		/*
		 * Look for any missing top-level vdevs.  If this is the case,
		 * create a faked up 'missing' vdev as a placeholder.  We cannot
		 * simply compress the child array, because the kernel performs
		 * certain checks to make sure the vdev IDs match their location
		 * in the configuration.
		 */
		for (c = 0; c < children; c++) {
			if (child[c] == NULL) {
				nvlist_t *missing;
				if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
				    0) != 0)
					goto nomem;
				if (nvlist_add_string(missing,
				    ZPOOL_CONFIG_TYPE,
				    VDEV_TYPE_MISSING) != 0 ||
				    nvlist_add_uint64(missing,
				    ZPOOL_CONFIG_ID, c) != 0 ||
				    nvlist_add_uint64(missing,
				    ZPOOL_CONFIG_GUID, 0ULL) != 0) {
					nvlist_free(missing);
					goto nomem;
				}
				child[c] = missing;
			}
		}

		/*
		 * Put all of this pool's top-level vdevs into a root vdev.
		 */
		if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
			goto nomem;
		if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
		    VDEV_TYPE_ROOT) != 0 ||
		    nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
		    nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
		    nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
		    child, children) != 0) {
			nvlist_free(nvroot);
			goto nomem;
		}

		for (c = 0; c < children; c++)
			nvlist_free(child[c]);
		free(child);
		children = 0;
		child = NULL;

		/*
		 * Go through and fix up any paths and/or devids based on our
		 * known list of vdev GUID -> path mappings.
		 */
		if (fix_paths(nvroot, pl->names) != 0) {
			nvlist_free(nvroot);
			goto nomem;
		}

		/*
		 * Add the root vdev to this pool's configuration.
		 */
		if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
		    nvroot) != 0) {
			nvlist_free(nvroot);
			goto nomem;
		}
		nvlist_free(nvroot);

		/*
		 * zdb uses this path to report on active pools that were
		 * imported or created using -R.
		 */
		if (active_ok)
			goto add_pool;

		/*
		 * Determine if this pool is currently active, in which case we
		 * can't actually import it.
		 */
		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
		    &name) == 0);
		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
		    &guid) == 0);

		if (pool_active(hdl, name, guid, &isactive) != 0)
			goto error;

		if (isactive) {
			nvlist_free(config);
			config = NULL;
			continue;
		}

		if (policy != NULL) {
			if (nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
			    policy) != 0)
				goto nomem;
		}

		if ((nvl = refresh_config(hdl, config)) == NULL) {
			nvlist_free(config);
			config = NULL;
			continue;
		}

		nvlist_free(config);
		config = nvl;

		/*
		 * Go through and update the paths for spares, now that we have
		 * them.
		 */
		verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
		    &nvroot) == 0);
		if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
		    &spares, &nspares) == 0) {
			for (i = 0; i < nspares; i++) {
				if (fix_paths(spares[i], pl->names) != 0)
					goto nomem;
			}
		}

		/*
		 * Update the paths for l2cache devices.
		 */
		if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
		    &l2cache, &nl2cache) == 0) {
			for (i = 0; i < nl2cache; i++) {
				if (fix_paths(l2cache[i], pl->names) != 0)
					goto nomem;
			}
		}

		/*
		 * Restore the original information read from the actual label.
		 */
		(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
		    DATA_TYPE_UINT64);
		(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
		    DATA_TYPE_STRING);
		if (hostid != 0) {
			verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
			    hostid) == 0);
			verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
			    hostname) == 0);
		}

add_pool:
		/*
		 * Add this pool to the list of configs.
		 */
		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
		    &name) == 0);
		if (nvlist_add_nvlist(ret, name, config) != 0)
			goto nomem;

		nvlist_free(config);
		config = NULL;
	}

	return (ret);

nomem:
	(void) no_memory(hdl);
error:
	nvlist_free(config);
	nvlist_free(ret);
	for (c = 0; c < children; c++)
		nvlist_free(child[c]);
	free(child);

	return (NULL);
}
コード例 #18
0
/*
 * Print out configuration state as requested by status_callback.
 */
void lzwu_zpool_print_status_config(libzfs_handle_t *p_zhd, zpool_handle_t *zhp,
                                    const char *name, nvlist_t *nv, int namewidth,
                                    int depth, boolean_t isspare)
{
        nvlist_t **child;
        uint_t children;
        unsigned c;
        vdev_stat_t *vs;
        char rbuf[6], wbuf[6], cbuf[6], repaired[7];
        char *vname;
        uint64_t notpresent;
        spare_cbdata_t cb;
        char *state;

        verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
            (uint64_t **)&vs, &c) == 0);

        if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
            &child, &children) != 0)
                children = 0;

        state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
        if(isspare)
        {
                /*
                 * For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for
                 * online drives.
                 */
                if(vs->vs_aux == VDEV_AUX_SPARED)
                        state = "INUSE";
                else if(vs->vs_state == VDEV_STATE_HEALTHY)
                        state = "AVAIL";
        }

        printf("\t%*s%-*s  %-8s", depth, "", namewidth - depth,
            name, state);

        if(!isspare)
        {
                zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));
                zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));
                zfs_nicenum(vs->vs_checksum_errors, cbuf, sizeof (cbuf));
                printf(" %5s %5s %5s", rbuf, wbuf, cbuf);
        }

        if(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &notpresent) == 0)
        {
                char *path;
                verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
                printf("  was %s", path);
        }
        else if(vs->vs_aux != 0)
        {
                printf("  ");

                switch (vs->vs_aux)
                {
                case VDEV_AUX_OPEN_FAILED:
                        printf("cannot open");
                        break;

                case VDEV_AUX_BAD_GUID_SUM:
                        printf("missing device");
                        break;

                case VDEV_AUX_NO_REPLICAS:
                        printf("insufficient replicas");
                        break;

                case VDEV_AUX_VERSION_NEWER:
                        printf("newer version");
                        break;

                case VDEV_AUX_SPARED:
                        verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
                            &cb.cb_guid) == 0);
                        if(zpool_iter(p_zhd, lzwu_find_spare, &cb) == 1)
                        {
                                if(strcmp(zpool_get_name(cb.cb_zhp),
                                    zpool_get_name(zhp)) == 0)
                                        printf("currently in use");
                                else
                                        printf("in use by pool '%s'", zpool_get_name(cb.cb_zhp));
                                zpool_close(cb.cb_zhp);
                        }
                        else
                                printf("currently in use");
                        break;

                case VDEV_AUX_ERR_EXCEEDED:
                        printf("too many errors");
                        break;

                case VDEV_AUX_IO_FAILURE:
                        printf("experienced I/O failures");
                        break;

                case VDEV_AUX_BAD_LOG:
                        printf("bad intent log");
                        break;

                case VDEV_AUX_EXTERNAL:
                        printf("external device fault");
                        break;

                case VDEV_AUX_SPLIT_POOL:
                        printf("split into new pool");
                        break;

                default:
                        printf("corrupted data");
                        break;
                }
        }
        else if(vs->vs_scrub_repaired != 0 && children == 0)
        {
                /*
                 * Report bytes resilvered/repaired on leaf devices.
                 */
                zfs_nicenum(vs->vs_scrub_repaired, repaired, sizeof (repaired));
                printf("  %s %s", repaired,
                       (vs->vs_scrub_type == POOL_SCRUB_RESILVER) ?
                       "resilvered" : "repaired");
        }

        printf("\n");

        for(unsigned c = 0; c < children; c++)
        {
                uint64_t islog = B_FALSE, ishole = B_FALSE;

                /* Don't print logs or holes here */
                nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, &islog);
                nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, &ishole);
                if(islog || ishole)
                        continue;
                vname = zpool_vdev_name(p_zhd, zhp, child[c], B_TRUE);
                lzwu_zpool_print_status_config(p_zhd, zhp, vname, child[c],
                                               namewidth, depth + 2, isspare);
                free(vname);
        }
}
コード例 #19
0
/*
 * Print the configuration of an exported pool.  Iterate over all vdevs in the
 * pool, printing out the name and status for each one.
 */
static void lzwu_print_import_config(libzfs_handle_t *p_zhd, const char *name, nvlist_t *nv, int namewidth, int depth)
{
        nvlist_t **child;
        uint_t c, children;
        vdev_stat_t *vs;
        char *type, *vname;

        verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
        if (strcmp(type, VDEV_TYPE_MISSING) == 0 ||
            strcmp(type, VDEV_TYPE_HOLE) == 0)
                return;

        verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
            (uint64_t **)&vs, &c) == 0);

        (void) printf("\t%*s%-*s", depth, "", namewidth - depth, name);
        (void) printf("  %s", zpool_state_to_name(vs->vs_state, vs->vs_aux));

        if (vs->vs_aux != 0) {
                (void) printf("  ");

                switch (vs->vs_aux) {
                case VDEV_AUX_OPEN_FAILED:
                        printf("cannot open");
                        break;

                case VDEV_AUX_BAD_GUID_SUM:
                        printf("missing device");
                        break;

                case VDEV_AUX_NO_REPLICAS:
                        printf("insufficient replicas");
                        break;

                case VDEV_AUX_VERSION_NEWER:
                        printf("newer version");
                        break;

                case VDEV_AUX_ERR_EXCEEDED:
                        printf("too many errors");
                        break;

                default:
                        printf("corrupted data");
                        break;
                }
        }
        (void) printf("\n");

        if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
            &child, &children) != 0)
                return;

        for (c = 0; c < children; c++) {
                uint64_t is_log = B_FALSE;

                (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
                    &is_log);
                if (is_log)
                        continue;

                vname = zpool_vdev_name(p_zhd, NULL, child[c], B_TRUE);
                lzwu_print_import_config(p_zhd, vname, child[c], namewidth, depth + 2);
                free(vname);
        }

        if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
            &child, &children) == 0) {
                printf("\tcache\n");
                for (c = 0; c < children; c++) {
                        vname = zpool_vdev_name(p_zhd, NULL, child[c], B_FALSE);
                        (void) printf("\t  %s\n", vname);
                        free(vname);
                }
        }

        if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
            &child, &children) == 0) {
                printf("\tspares\n");
                for (c = 0; c < children; c++) {
                        vname = zpool_vdev_name(p_zhd, NULL, child[c], B_FALSE);
                        (void) printf("\t  %s\n", vname);
                        free(vname);
                }
        }
}
コード例 #20
0
ファイル: libzfs_status.c プロジェクト: ColdCanuck/zfs
/*
 * Active pool health status.
 *
 * To determine the status for a pool, we make several passes over the config,
 * picking the most egregious error we find.  In order of importance, we do the
 * following:
 *
 *	- Check for a complete and valid configuration
 *	- Look for any faulted or missing devices in a non-replicated config
 *	- Check for any data errors
 *	- Check for any faulted or missing devices in a replicated config
 *	- Look for any devices showing errors
 *	- Check for any resilvering devices
 *
 * There can obviously be multiple errors within a single pool, so this routine
 * only picks the most damaging of all the current errors to report.
 */
static zpool_status_t
check_status(nvlist_t *config, boolean_t isimport)
{
	nvlist_t *nvroot;
	vdev_stat_t *vs;
	pool_scan_stat_t *ps = NULL;
	uint_t vsc, psc;
	uint64_t nerr;
	uint64_t version;
	uint64_t stateval;
	uint64_t suspended;
	uint64_t hostid = 0;
	unsigned long system_hostid = gethostid() & 0xffffffff;

	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
	    &version) == 0);
	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
	    &nvroot) == 0);
	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
	    (uint64_t **)&vs, &vsc) == 0);
	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
	    &stateval) == 0);

	/*
	 * Currently resilvering a vdev
	 */
	(void) nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
	    (uint64_t **)&ps, &psc);
	if (ps && ps->pss_func == POOL_SCAN_RESILVER &&
	    ps->pss_state == DSS_SCANNING)
		return (ZPOOL_STATUS_RESILVERING);

	/*
	 * Pool last accessed by another system.
	 */
	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
	if (hostid != 0 && (unsigned long)hostid != system_hostid &&
	    stateval == POOL_STATE_ACTIVE)
		return (ZPOOL_STATUS_HOSTID_MISMATCH);

	/*
	 * Newer on-disk version.
	 */
	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    vs->vs_aux == VDEV_AUX_VERSION_NEWER)
		return (ZPOOL_STATUS_VERSION_NEWER);

	/*
	 * Check that the config is complete.
	 */
	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    vs->vs_aux == VDEV_AUX_BAD_GUID_SUM)
		return (ZPOOL_STATUS_BAD_GUID_SUM);

	/*
	 * Check whether the pool has suspended due to failed I/O.
	 */
	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_SUSPENDED,
	    &suspended) == 0) {
		if (suspended == ZIO_FAILURE_MODE_CONTINUE)
			return (ZPOOL_STATUS_IO_FAILURE_CONTINUE);
		return (ZPOOL_STATUS_IO_FAILURE_WAIT);
	}

	/*
	 * Could not read a log.
	 */
	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    vs->vs_aux == VDEV_AUX_BAD_LOG) {
		return (ZPOOL_STATUS_BAD_LOG);
	}

	/*
	 * Bad devices in non-replicated config.
	 */
	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    find_vdev_problem(nvroot, vdev_faulted))
		return (ZPOOL_STATUS_FAULTED_DEV_NR);

	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    find_vdev_problem(nvroot, vdev_missing))
		return (ZPOOL_STATUS_MISSING_DEV_NR);

	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    find_vdev_problem(nvroot, vdev_broken))
		return (ZPOOL_STATUS_CORRUPT_LABEL_NR);

	/*
	 * Corrupted pool metadata
	 */
	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    vs->vs_aux == VDEV_AUX_CORRUPT_DATA)
		return (ZPOOL_STATUS_CORRUPT_POOL);

	/*
	 * Persistent data errors.
	 */
	if (!isimport) {
		if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
		    &nerr) == 0 && nerr != 0)
			return (ZPOOL_STATUS_CORRUPT_DATA);
	}

	/*
	 * Missing devices in a replicated config.
	 */
	if (find_vdev_problem(nvroot, vdev_faulted))
		return (ZPOOL_STATUS_FAULTED_DEV_R);
	if (find_vdev_problem(nvroot, vdev_missing))
		return (ZPOOL_STATUS_MISSING_DEV_R);
	if (find_vdev_problem(nvroot, vdev_broken))
		return (ZPOOL_STATUS_CORRUPT_LABEL_R);

	/*
	 * Devices with errors
	 */
	if (!isimport && find_vdev_problem(nvroot, vdev_errors))
		return (ZPOOL_STATUS_FAILING_DEV);

	/*
	 * Offlined devices
	 */
	if (find_vdev_problem(nvroot, vdev_offlined))
		return (ZPOOL_STATUS_OFFLINE_DEV);

	/*
	 * Removed device
	 */
	if (find_vdev_problem(nvroot, vdev_removed))
		return (ZPOOL_STATUS_REMOVED_DEV);

	/*
	 * Outdated, but usable, version
	 */
	if (version < SPA_VERSION)
		return (ZPOOL_STATUS_VERSION_OLDER);

	return (ZPOOL_STATUS_OK);
}
コード例 #21
0
/*
 * Active pool health status.
 *
 * To determine the status for a pool, we make several passes over the config,
 * picking the most egregious error we find.  In order of importance, we do the
 * following:
 *
 *	- Check for a complete and valid configuration
 *	- Look for any missing devices in a non-replicated config
 *	- Check for any data errors
 *	- Check for any missing devices in a replicated config
 *	- Look for any devices showing errors
 *	- Check for any resilvering devices
 *
 * There can obviously be multiple errors within a single pool, so this routine
 * only picks the most damaging of all the current errors to report.
 */
static zpool_status_t
check_status(nvlist_t *config, boolean_t isimport)
{
	nvlist_t *nvroot;
	vdev_stat_t *vs;
	uint_t vsc;
	uint64_t nerr;
	uint64_t version;

	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
	    &version) == 0);
	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
	    &nvroot) == 0);
	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
	    (uint64_t **)&vs, &vsc) == 0);

	/*
	 * Newer on-disk version.
	 */
	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    vs->vs_aux == VDEV_AUX_VERSION_NEWER)
		return (ZPOOL_STATUS_VERSION_NEWER);

	/*
	 * Check that the config is complete.
	 */
	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    vs->vs_aux == VDEV_AUX_BAD_GUID_SUM)
		return (ZPOOL_STATUS_BAD_GUID_SUM);

	/*
	 * Missing devices in non-replicated config.
	 */
	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    find_vdev_problem(nvroot, vdev_missing))
		return (ZPOOL_STATUS_MISSING_DEV_NR);

	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    find_vdev_problem(nvroot, vdev_broken))
		return (ZPOOL_STATUS_CORRUPT_LABEL_NR);

	/*
	 * Corrupted pool metadata
	 */
	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    vs->vs_aux == VDEV_AUX_CORRUPT_DATA)
		return (ZPOOL_STATUS_CORRUPT_POOL);

	/*
	 * Persistent data errors.
	 */
	if (!isimport) {
		if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
		    &nerr) == 0 && nerr != 0)
			return (ZPOOL_STATUS_CORRUPT_DATA);
	}

	/*
	 * Missing devices in a replicated config.
	 */
	if (find_vdev_problem(nvroot, vdev_missing))
		return (ZPOOL_STATUS_MISSING_DEV_R);
	if (find_vdev_problem(nvroot, vdev_broken))
		return (ZPOOL_STATUS_CORRUPT_LABEL_R);

	/*
	 * Devices with errors
	 */
	if (!isimport && find_vdev_problem(nvroot, vdev_errors))
		return (ZPOOL_STATUS_FAILING_DEV);

	/*
	 * Offlined devices
	 */
	if (find_vdev_problem(nvroot, vdev_offlined))
		return (ZPOOL_STATUS_OFFLINE_DEV);

	/*
	 * Currently resilvering
	 */
	if (!vs->vs_scrub_complete && vs->vs_scrub_type == POOL_SCRUB_RESILVER)
		return (ZPOOL_STATUS_RESILVERING);

	/*
	 * Outdated, but usable, version
	 */
	if (version < ZFS_VERSION)
		return (ZPOOL_STATUS_VERSION_OLDER);

	return (ZPOOL_STATUS_OK);
}
コード例 #22
0
/*
 * Active pool health status.
 *
 * To determine the status for a pool, we make several passes over the config,
 * picking the most egregious error we find.  In order of importance, we do the
 * following:
 *
 *	- Check for a complete and valid configuration
 *	- Look for any faulted or missing devices in a non-replicated config
 *	- Check for any data errors
 *	- Check for any faulted or missing devices in a replicated config
 *	- Look for any devices showing errors
 *	- Check for any resilvering devices
 *
 * There can obviously be multiple errors within a single pool, so this routine
 * only picks the most damaging of all the current errors to report.
 */
static zpool_status_t
check_status(zpool_handle_t *zhp, nvlist_t *config, boolean_t isimport)
{
	nvlist_t *nvroot;
	vdev_stat_t *vs;
	uint_t vsc;
	uint64_t nerr;
	uint64_t version;
	uint64_t stateval;
	uint64_t hostid = 0;

	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
	    &version) == 0);
	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
	    &nvroot) == 0);
	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
	    (uint64_t **)&vs, &vsc) == 0);
	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
	    &stateval) == 0);
	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);

	/*
	 * Pool last accessed by another system.
	 */
	if (hostid != 0 && (unsigned long)hostid != gethostid() &&
	    stateval == POOL_STATE_ACTIVE)
		return (ZPOOL_STATUS_HOSTID_MISMATCH);

	/*
	 * Newer on-disk version.
	 */
	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    vs->vs_aux == VDEV_AUX_VERSION_NEWER)
		return (ZPOOL_STATUS_VERSION_NEWER);

	/*
	 * Check that the config is complete.
	 */
	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    vs->vs_aux == VDEV_AUX_BAD_GUID_SUM)
		return (ZPOOL_STATUS_BAD_GUID_SUM);

	/*
	 * Pool has experienced failed I/O.
	 */
	if (stateval == POOL_STATE_IO_FAILURE) {
		zpool_handle_t *tmp_zhp = NULL;
		libzfs_handle_t *hdl = NULL;
		char property[ZPOOL_MAXPROPLEN];
		char *failmode = NULL;

		if (zhp == NULL) {
			char *poolname;

			verify(nvlist_lookup_string(config,
			    ZPOOL_CONFIG_POOL_NAME, &poolname) == 0);
			if ((hdl = libzfs_init()) == NULL)
				return (ZPOOL_STATUS_IO_FAILURE_WAIT);
			tmp_zhp = zpool_open_canfail(hdl, poolname);
			if (tmp_zhp == NULL) {
				libzfs_fini(hdl);
				return (ZPOOL_STATUS_IO_FAILURE_WAIT);
			}
		}
		if (zpool_get_prop(zhp ? zhp : tmp_zhp, ZPOOL_PROP_FAILUREMODE,
		    property, sizeof (property), NULL) == 0)
			failmode = property;
		if (tmp_zhp != NULL)
			zpool_close(tmp_zhp);
		if (hdl != NULL)
			libzfs_fini(hdl);
		if (failmode == NULL)
			return (ZPOOL_STATUS_IO_FAILURE_WAIT);

		if (strncmp(failmode, "continue", strlen("continue")) == 0)
			return (ZPOOL_STATUS_IO_FAILURE_CONTINUE);
		else
			return (ZPOOL_STATUS_IO_FAILURE_WAIT);
	}

	/*
	 * Could not read a log.
	 */
	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    vs->vs_aux == VDEV_AUX_BAD_LOG) {
		return (ZPOOL_STATUS_BAD_LOG);
	}

	/*
	 * Bad devices in non-replicated config.
	 */
	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    find_vdev_problem(nvroot, vdev_faulted))
		return (ZPOOL_STATUS_FAULTED_DEV_NR);

	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    find_vdev_problem(nvroot, vdev_missing))
		return (ZPOOL_STATUS_MISSING_DEV_NR);

	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    find_vdev_problem(nvroot, vdev_broken))
		return (ZPOOL_STATUS_CORRUPT_LABEL_NR);

	/*
	 * Corrupted pool metadata
	 */
	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    vs->vs_aux == VDEV_AUX_CORRUPT_DATA)
		return (ZPOOL_STATUS_CORRUPT_POOL);

	/*
	 * Persistent data errors.
	 */
	if (!isimport) {
		if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
		    &nerr) == 0 && nerr != 0)
			return (ZPOOL_STATUS_CORRUPT_DATA);
	}

	/*
	 * Missing devices in a replicated config.
	 */
	if (find_vdev_problem(nvroot, vdev_faulted))
		return (ZPOOL_STATUS_FAULTED_DEV_R);
	if (find_vdev_problem(nvroot, vdev_missing))
		return (ZPOOL_STATUS_MISSING_DEV_R);
	if (find_vdev_problem(nvroot, vdev_broken))
		return (ZPOOL_STATUS_CORRUPT_LABEL_R);

	/*
	 * Devices with errors
	 */
	if (!isimport && find_vdev_problem(nvroot, vdev_errors))
		return (ZPOOL_STATUS_FAILING_DEV);

	/*
	 * Offlined devices
	 */
	if (find_vdev_problem(nvroot, vdev_offlined))
		return (ZPOOL_STATUS_OFFLINE_DEV);

	/*
	 * Currently resilvering
	 */
	if (!vs->vs_scrub_complete && vs->vs_scrub_type == POOL_SCRUB_RESILVER)
		return (ZPOOL_STATUS_RESILVERING);

	/*
	 * Outdated, but usable, version
	 */
	if (version < SPA_VERSION)
		return (ZPOOL_STATUS_VERSION_OLDER);

	return (ZPOOL_STATUS_OK);
}
コード例 #23
0
ファイル: libzfs_status.c プロジェクト: 0xbda2d2f8/freebsd
/*
 * Active pool health status.
 *
 * To determine the status for a pool, we make several passes over the config,
 * picking the most egregious error we find.  In order of importance, we do the
 * following:
 *
 *	- Check for a complete and valid configuration
 *	- Look for any faulted or missing devices in a non-replicated config
 *	- Check for any data errors
 *	- Check for any faulted or missing devices in a replicated config
 *	- Look for any devices showing errors
 *	- Check for any resilvering devices
 *
 * There can obviously be multiple errors within a single pool, so this routine
 * only picks the most damaging of all the current errors to report.
 */
static zpool_status_t
check_status(nvlist_t *config, boolean_t isimport)
{
	nvlist_t *nvroot;
	vdev_stat_t *vs;
	pool_scan_stat_t *ps = NULL;
	uint_t vsc, psc;
	uint64_t nerr;
	uint64_t version;
	uint64_t stateval;
	uint64_t suspended;
	uint64_t hostid = 0;

	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
	    &version) == 0);
	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
	    &nvroot) == 0);
	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
	    (uint64_t **)&vs, &vsc) == 0);
	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
	    &stateval) == 0);

	/*
	 * Currently resilvering a vdev
	 */
	(void) nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
	    (uint64_t **)&ps, &psc);
	if (ps && ps->pss_func == POOL_SCAN_RESILVER &&
	    ps->pss_state == DSS_SCANNING)
		return (ZPOOL_STATUS_RESILVERING);

	/*
	 * Pool last accessed by another system.
	 */
	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
	if (hostid != 0 && (unsigned long)hostid != gethostid() &&
	    stateval == POOL_STATE_ACTIVE)
		return (ZPOOL_STATUS_HOSTID_MISMATCH);

	/*
	 * Newer on-disk version.
	 */
	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    vs->vs_aux == VDEV_AUX_VERSION_NEWER)
		return (ZPOOL_STATUS_VERSION_NEWER);

	/*
	 * Unsupported feature(s).
	 */
	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    vs->vs_aux == VDEV_AUX_UNSUP_FEAT) {
		nvlist_t *nvinfo;

		verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
		    &nvinfo) == 0);
		if (nvlist_exists(nvinfo, ZPOOL_CONFIG_CAN_RDONLY))
			return (ZPOOL_STATUS_UNSUP_FEAT_WRITE);
		return (ZPOOL_STATUS_UNSUP_FEAT_READ);
	}

	/*
	 * Check that the config is complete.
	 */
	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    vs->vs_aux == VDEV_AUX_BAD_GUID_SUM)
		return (ZPOOL_STATUS_BAD_GUID_SUM);

	/*
	 * Check whether the pool has suspended due to failed I/O.
	 */
	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_SUSPENDED,
	    &suspended) == 0) {
		if (suspended == ZIO_FAILURE_MODE_CONTINUE)
			return (ZPOOL_STATUS_IO_FAILURE_CONTINUE);
		return (ZPOOL_STATUS_IO_FAILURE_WAIT);
	}

	/*
	 * Could not read a log.
	 */
	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    vs->vs_aux == VDEV_AUX_BAD_LOG) {
		return (ZPOOL_STATUS_BAD_LOG);
	}

	/*
	 * Bad devices in non-replicated config.
	 */
	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    find_vdev_problem(nvroot, vdev_faulted, B_TRUE))
		return (ZPOOL_STATUS_FAULTED_DEV_NR);

	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    find_vdev_problem(nvroot, vdev_missing, B_TRUE))
		return (ZPOOL_STATUS_MISSING_DEV_NR);

	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    find_vdev_problem(nvroot, vdev_broken, B_TRUE))
		return (ZPOOL_STATUS_CORRUPT_LABEL_NR);

	/*
	 * Corrupted pool metadata
	 */
	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
	    vs->vs_aux == VDEV_AUX_CORRUPT_DATA)
		return (ZPOOL_STATUS_CORRUPT_POOL);

	/*
	 * Persistent data errors.
	 */
	if (!isimport) {
		if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
		    &nerr) == 0 && nerr != 0)
			return (ZPOOL_STATUS_CORRUPT_DATA);
	}

	/*
	 * Missing devices in a replicated config.
	 */
	if (find_vdev_problem(nvroot, vdev_faulted, B_TRUE))
		return (ZPOOL_STATUS_FAULTED_DEV_R);
	if (find_vdev_problem(nvroot, vdev_missing, B_TRUE))
		return (ZPOOL_STATUS_MISSING_DEV_R);
	if (find_vdev_problem(nvroot, vdev_broken, B_TRUE))
		return (ZPOOL_STATUS_CORRUPT_LABEL_R);

	/*
	 * Devices with errors
	 */
	if (!isimport && find_vdev_problem(nvroot, vdev_errors, B_TRUE))
		return (ZPOOL_STATUS_FAILING_DEV);

	/*
	 * Offlined devices
	 */
	if (find_vdev_problem(nvroot, vdev_offlined, B_TRUE))
		return (ZPOOL_STATUS_OFFLINE_DEV);

	/*
	 * Removed device
	 */
	if (find_vdev_problem(nvroot, vdev_removed, B_TRUE))
		return (ZPOOL_STATUS_REMOVED_DEV);

	/*
	 * Suboptimal, but usable, ashift configuration.
	 */
	if (find_vdev_problem(nvroot, vdev_non_native_ashift, B_FALSE))
		return (ZPOOL_STATUS_NON_NATIVE_ASHIFT);

	/*
	 * Outdated, but usable, version
	 */
	if (SPA_VERSION_IS_SUPPORTED(version) && version != SPA_VERSION)
		return (ZPOOL_STATUS_VERSION_OLDER);

	/*
	 * Usable pool with disabled features
	 */
	if (version >= SPA_VERSION_FEATURES) {
		int i;
		nvlist_t *feat;

		if (isimport) {
			feat = fnvlist_lookup_nvlist(config,
			    ZPOOL_CONFIG_LOAD_INFO);
			feat = fnvlist_lookup_nvlist(feat,
			    ZPOOL_CONFIG_ENABLED_FEAT);
		} else {
			feat = fnvlist_lookup_nvlist(config,
			    ZPOOL_CONFIG_FEATURE_STATS);
		}

		for (i = 0; i < SPA_FEATURES; i++) {
			zfeature_info_t *fi = &spa_feature_table[i];
			if (!nvlist_exists(feat, fi->fi_guid))
				return (ZPOOL_STATUS_FEAT_DISABLED);
		}
	}

	return (ZPOOL_STATUS_OK);
}
コード例 #24
0
ファイル: topo_prop.c プロジェクト: NanXiao/illumos-joyent
static int
prop_getval(tnode_t *node, const char *pgname, const char *pname, void *val,
    topo_type_t type, uint_t *nelems, int *err)
{
	int i, j, ret = 0;
	topo_hdl_t *thp = node->tn_hdl;
	topo_propval_t *pv;

	topo_node_lock(node);
	if ((pv = prop_get(node, pgname, pname, NULL, err))
	    == NULL)
		return (get_properror(node, err, *err));

	if (pv->tp_type != type)
		return (get_properror(node, err, ETOPO_PROP_TYPE));

	switch (type) {
		case TOPO_TYPE_INT32:
			ret = nvlist_lookup_int32(pv->tp_val, TOPO_PROP_VAL_VAL,
			    (int32_t *)val);
			break;
		case TOPO_TYPE_UINT32:
			ret = nvlist_lookup_uint32(pv->tp_val,
			    TOPO_PROP_VAL_VAL, (uint32_t *)val);
			break;
		case TOPO_TYPE_INT64:
			ret = nvlist_lookup_int64(pv->tp_val, TOPO_PROP_VAL_VAL,
			    (int64_t *)val);
			break;
		case TOPO_TYPE_UINT64:
			ret = nvlist_lookup_uint64(pv->tp_val,
			    TOPO_PROP_VAL_VAL, (uint64_t *)val);
			break;
		case TOPO_TYPE_DOUBLE:
			ret = nvlist_lookup_double(pv->tp_val,
			    TOPO_PROP_VAL_VAL, (double *)val);
			break;
		case TOPO_TYPE_STRING: {
			char *str;

			ret = nvlist_lookup_string(pv->tp_val,
			    TOPO_PROP_VAL_VAL, &str);
			if (ret == 0) {
				char *s2;
				if ((s2 = topo_hdl_strdup(thp, str)) == NULL)
					ret = -1;
				else
					*(char **)val = s2;
			}
			break;
		}
		case TOPO_TYPE_FMRI: {
			nvlist_t *nvl;

			ret = nvlist_lookup_nvlist(pv->tp_val,
			    TOPO_PROP_VAL_VAL, &nvl);
			if (ret == 0)
				ret = topo_hdl_nvdup(thp, nvl,
				    (nvlist_t **)val);
			break;
		}
		case TOPO_TYPE_INT32_ARRAY: {
			int32_t *a1, *a2;

			if ((ret = nvlist_lookup_int32_array(pv->tp_val,
			    TOPO_PROP_VAL_VAL, &a2, nelems)) != 0)
				break;
			if ((a1 = topo_hdl_alloc(thp, sizeof (int32_t) *
			    *nelems)) == NULL) {
				ret = ETOPO_NOMEM;
				break;
			}
			for (i = 0; i < *nelems; ++i)
				a1[i] = a2[i];
			*(int32_t **)val = a1;
			break;
		}
		case TOPO_TYPE_UINT32_ARRAY: {
			uint32_t *a1, *a2;

			if ((ret = nvlist_lookup_uint32_array(pv->tp_val,
			    TOPO_PROP_VAL_VAL, &a2, nelems)) != 0)
				break;
			if ((a1 = topo_hdl_alloc(thp, sizeof (uint32_t) *
			    *nelems)) == NULL) {
				ret = ETOPO_NOMEM;
				break;
			}
			for (i = 0; i < *nelems; ++i)
				a1[i] = a2[i];
			*(uint32_t **)val = a1;
			break;
		}
		case TOPO_TYPE_INT64_ARRAY: {
			int64_t *a1, *a2;

			if ((ret = nvlist_lookup_int64_array(pv->tp_val,
			    TOPO_PROP_VAL_VAL, &a2, nelems)) != 0)
				break;
			if ((a1 = topo_hdl_alloc(thp, sizeof (int64_t) *
			    *nelems)) == NULL) {
				ret = ETOPO_NOMEM;
				break;
			}
			for (i = 0; i < *nelems; ++i)
				a1[i] = a2[i];
			*(int64_t **)val = a1;
			break;
		}
		case TOPO_TYPE_UINT64_ARRAY: {
			uint64_t *a1, *a2;

			if ((ret = nvlist_lookup_uint64_array(pv->tp_val,
			    TOPO_PROP_VAL_VAL, &a2, nelems)) != 0)
				break;
			if ((a1 = topo_hdl_alloc(thp, sizeof (uint64_t) *
			    *nelems)) == NULL) {
				ret = ETOPO_NOMEM;
				break;
			}
			for (i = 0; i < *nelems; ++i)
				a1[i] = a2[i];
			*(uint64_t **)val = a1;
			break;
		}
		case TOPO_TYPE_STRING_ARRAY: {
			char **a1, **a2;

			if ((ret = nvlist_lookup_string_array(pv->tp_val,
			    TOPO_PROP_VAL_VAL, &a2, nelems)) != 0)
				break;
			if ((a1 = topo_hdl_alloc(thp, sizeof (char *) *
			    *nelems)) == NULL) {
				ret = ETOPO_NOMEM;
				break;
			}
			for (i = 0; i < *nelems; ++i) {
				if ((a1[i] = topo_hdl_strdup(thp, a2[i]))
				    == NULL) {
					for (j = 0; j < i; ++j)
						topo_hdl_free(thp, a1[j],
						    sizeof (char *));
					topo_hdl_free(thp, a1,
					    sizeof (char *) * *nelems);
					break;
				}
			}
			*(char ***)val = a1;
			break;
		}
		case TOPO_TYPE_FMRI_ARRAY: {
			nvlist_t **a1, **a2;

			if ((ret = nvlist_lookup_nvlist_array(pv->tp_val,
			    TOPO_PROP_VAL_VAL, &a2, nelems)) != 0)
				break;
			if ((a1 = topo_hdl_alloc(thp, sizeof (nvlist_t *) *
			    *nelems)) == NULL) {
				ret = ETOPO_NOMEM;
				break;
			}
			for (i = 0; i < *nelems; ++i) {
				if (topo_hdl_nvdup(thp, a2[i], &a1[i]) < 0) {
					for (j = 0; j < i; ++j)
						nvlist_free(a1[j]);
					topo_hdl_free(thp, a1,
					    sizeof (nvlist_t *) * *nelems);
					break;
				}
			}
			*(nvlist_t ***)val = a1;
			break;
		}
		default:
			ret = ETOPO_PROP_NOENT;
	}

	if (ret != 0)
		if (ret == ENOENT)
			return (get_properror(node, err, ETOPO_PROP_NOENT));
		else if (ret < ETOPO_UNKNOWN)
			return (get_properror(node, err, ETOPO_PROP_NVL));
		else
			return (get_properror(node, err, ret));

	topo_node_unlock(node);
	return (0);
}
コード例 #25
0
ファイル: chip_amd.c プロジェクト: bahamas10/openzfs
int
amd_rank_create(topo_mod_t *mod, tnode_t *pnode, nvlist_t *dimmnvl,
    nvlist_t *auth)
{
	uint64_t *csnumarr;
	char **csnamearr;
	uint_t ncs, ncsname;
	tnode_t *ranknode;
	nvlist_t *fmri, *pfmri = NULL;
	uint64_t dsz, rsz;
	int nerr = 0;
	int err;
	int i;

	if (nvlist_lookup_uint64_array(dimmnvl, "csnums", &csnumarr,
	    &ncs) != 0 || nvlist_lookup_string_array(dimmnvl, "csnames",
	    &csnamearr, &ncsname) != 0 || ncs != ncsname) {
		whinge(mod, &nerr, "amd_rank_create: "
		    "csnums/csnames extraction failed\n");
		return (nerr);
	}

	if (topo_node_resource(pnode, &pfmri, &err) < 0) {
		whinge(mod, &nerr, "amd_rank_create: parent fmri lookup "
		    "failed\n");
		return (nerr);
	}

	if (topo_node_range_create(mod, pnode, RANK_NODE_NAME, 0, ncs) < 0) {
		whinge(mod, &nerr, "amd_rank_create: range create failed\n");
		nvlist_free(pfmri);
		return (nerr);
	}

	if (topo_prop_get_uint64(pnode, PGNAME(DIMM), "size", &dsz,
	    &err) == 0) {
		rsz = dsz / ncs;
	} else {
		whinge(mod, &nerr, "amd_rank_create: parent dimm has no "
		    "size\n");
		return (nerr);
	}

	for (i = 0; i < ncs; i++) {
		if (mkrsrc(mod, pnode, RANK_NODE_NAME, i, auth, &fmri) < 0) {
			whinge(mod, &nerr, "amd_rank_create: mkrsrc failed\n");
			continue;
		}

		if ((ranknode = topo_node_bind(mod, pnode, RANK_NODE_NAME, i,
		    fmri)) == NULL) {
			nvlist_free(fmri);
			whinge(mod, &nerr, "amd_rank_create: node bind "
			    "failed\n");
			continue;
		}

		nvlist_free(fmri);
		if (FM_AWARE_SMBIOS(mod))
			(void) topo_node_fru_set(ranknode, NULL, 0, &err);
		else
			(void) topo_node_fru_set(ranknode, pfmri, 0, &err);

		/*
		 * If a rank is faulted the asru is the associated
		 * chip-select, but if a page within a rank is faulted
		 * the asru is just that page.  Hence the dual preconstructed
		 * and computed ASRU.
		 */
		if (topo_method_register(mod, ranknode, rank_methods) < 0)
			whinge(mod, &nerr, "amd_rank_create: "
			    "topo_method_register failed");

		if (! is_xpv() && topo_method_register(mod, ranknode,
		    ntv_page_retire_methods) < 0)
			whinge(mod, &nerr, "amd_rank_create: "
			    "topo_method_register failed");

		(void) topo_node_asru_set(ranknode, cs_fmri[csnumarr[i]],
		    TOPO_ASRU_COMPUTE, &err);

		(void) topo_pgroup_create(ranknode, &rank_pgroup, &err);

		(void) topo_prop_set_uint64(ranknode, PGNAME(RANK), "size",
		    TOPO_PROP_IMMUTABLE, rsz, &err);

		(void) topo_prop_set_string(ranknode, PGNAME(RANK), "csname",
		    TOPO_PROP_IMMUTABLE, csnamearr[i], &err);

		(void) topo_prop_set_uint64(ranknode, PGNAME(RANK), "csnum",
		    TOPO_PROP_IMMUTABLE, csnumarr[i], &err);
	}

	nvlist_free(pfmri);

	return (nerr);
}
コード例 #26
0
ファイル: mp_utils.c プロジェクト: apprisi/illumos-gate
/* Calls the client callback function, if one is registered */
static void
notifyClient(sysevent_t *ev)
{
	nvlist_t *attr_list = NULL;

	uint64_t *val = NULL;
	int32_t  *instance = NULL;
	int32_t  *major = NULL;

	int valAllocated = 0;

	uint_t nelem = 0;

	int i = 0;
	int eventType = 0;
	int index = -1;

	void *pCallerData = NULL;

	char subClassName[256];

	MP_BOOL becomingVisible = MP_FALSE;

	MP_OID_LIST *oidList = NULL;


	log(LOG_INFO, "notifyClient()", "- enter");


	(void) strncpy(subClassName, sysevent_get_subclass_name(ev), 256);

	if (strstr(subClassName, "change")) {

		eventType = PROP_CHANGE;

		log(LOG_INFO, "notifyClient()", "- got a change event");
		log(LOG_INFO, "notifyClient()", ": [%s]",
		    subClassName);

		if (strncmp(subClassName, ESC_SUN_MP_PLUGIN_CHANGE, 255)
		    == 0) {

			index = MP_OBJECT_TYPE_PLUGIN;

		} else if (strncmp(subClassName, ESC_SUN_MP_LU_CHANGE, 255)
		    == 0) {

			index = MP_OBJECT_TYPE_MULTIPATH_LU;

		} else if (strncmp(subClassName, ESC_SUN_MP_PATH_CHANGE, 255)
		    == 0) {

			index = MP_OBJECT_TYPE_PATH_LU;

		} else if (strncmp(subClassName, ESC_SUN_MP_INIT_PORT_CHANGE,
		    255) == 0) {

			index = MP_OBJECT_TYPE_INITIATOR_PORT;

		} else if (strncmp(subClassName, ESC_SUN_MP_TPG_CHANGE, 255)
		    == 0) {

			index = MP_OBJECT_TYPE_TARGET_PORT_GROUP;

		} else if (strncmp(subClassName, ESC_SUN_MP_TARGET_PORT_CHANGE,
		    255) == 0) {

			index = MP_OBJECT_TYPE_TARGET_PORT;

		} else if (strncmp(subClassName, ESC_SUN_MP_DEV_PROD_CHANGE,
		    255) == 0) {

			index = MP_OBJECT_TYPE_DEVICE_PRODUCT;
		}

	} else if ((strstr(subClassName, "add")) ||
	    (strstr(subClassName, "initiator_register"))) {

		eventType = VISA_CHANGE;
		becomingVisible = MP_TRUE;

		log(LOG_INFO, "notifyClient()", "- got a visibility"
		    " add event");
		log(LOG_INFO, "notifyClient()", ": [%s]",
		    subClassName);

		if (strncmp(subClassName, ESC_SUN_MP_LU_ADD, 255) == 0) {

			index = MP_OBJECT_TYPE_MULTIPATH_LU;

		} else if (strncmp(subClassName, ESC_SUN_MP_PATH_ADD, 255)
		    == 0) {

			index = MP_OBJECT_TYPE_PATH_LU;

		} else if (strncmp(subClassName, ESC_DDI_INITIATOR_REGISTER,
		    244) == 0) {

			index = MP_OBJECT_TYPE_INITIATOR_PORT;

		} else if (strncmp(subClassName, ESC_SUN_MP_TPG_ADD,
		    255) == 0) {

			index = MP_OBJECT_TYPE_TARGET_PORT_GROUP;

		} else if (strncmp(subClassName, ESC_SUN_MP_TARGET_PORT_ADD,
		    255) == 0) {

			index = MP_OBJECT_TYPE_TARGET_PORT;

		} else if (strncmp(subClassName, ESC_SUN_MP_DEV_PROD_ADD, 255)
		    == 0) {

			index = MP_OBJECT_TYPE_DEVICE_PRODUCT;
		}


	} else if ((strstr(subClassName, "remove")) ||
	    (strstr(subClassName, "initiator_unregister"))) {

		eventType = VISA_CHANGE;
		becomingVisible = MP_FALSE;

		log(LOG_INFO, "notifyClient()", "- got a visibility"
		    " remove event");
		log(LOG_INFO, "notifyClient()", ": [%s]",
		    subClassName);

		if (strncmp(subClassName, ESC_SUN_MP_LU_REMOVE, 255) == 0) {

			index = MP_OBJECT_TYPE_MULTIPATH_LU;

		} else if (strncmp(subClassName, ESC_SUN_MP_PATH_REMOVE, 255)
		    == 0) {

			index = MP_OBJECT_TYPE_PATH_LU;

		} else if (strncmp(subClassName, ESC_DDI_INITIATOR_UNREGISTER,
		    255) == 0) {

			index = MP_OBJECT_TYPE_INITIATOR_PORT;

		} else if (strncmp(subClassName, ESC_SUN_MP_TPG_REMOVE, 255)
		    == 0) {

			index = MP_OBJECT_TYPE_TARGET_PORT_GROUP;

		} else if (strncmp(subClassName, ESC_SUN_MP_TARGET_PORT_REMOVE,
		    255) == 0) {

			index = MP_OBJECT_TYPE_TARGET_PORT;

		} else if (strncmp(subClassName, ESC_SUN_MP_DEV_PROD_REMOVE,
		    255) == 0) {

			index = MP_OBJECT_TYPE_DEVICE_PRODUCT;
		}


	} else {
		log(LOG_INFO, "notifyClient()", "- got an unsupported event");
		return;
	}

	if (index < 0) {

		log(LOG_INFO, "notifyClient()", "- index is less than zero");
		return;
	}

	if (eventType == VISA_CHANGE) {

		(void) pthread_mutex_lock(&g_visa_mutex);

		if (NULL == g_Visibility_Callback_List[index].pClientFn) {

			log(LOG_INFO, "notifyClient()",
			    "- no visibility change callback to notify");

			(void) pthread_mutex_unlock(&g_visa_mutex);

			return;
		}

		(void) pthread_mutex_unlock(&g_visa_mutex);
	}

	if (eventType == PROP_CHANGE) {

		(void) pthread_mutex_lock(&g_prop_mutex);

		if (NULL == g_Property_Callback_List[index].pClientFn) {

			log(LOG_INFO, "notifyClient()",
			    "- no property change callback to notify");

			(void) pthread_mutex_unlock(&g_prop_mutex);

			return;
		}

		(void) pthread_mutex_unlock(&g_prop_mutex);
	}

	(void) sysevent_get_attr_list(ev, &attr_list);
	if (NULL != attr_list) {

		if ((VISA_CHANGE == eventType) &&
		    (MP_OBJECT_TYPE_PLUGIN == index)) {

			val = (uint64_t *)malloc(sizeof (uint64_t));
			valAllocated = 1;

			/*
			 * We have no well-defined way to determine our OSN.
			 * Currently the common library uses 0 as OSN for every
			 * plugin, so just use 0. If the OSN assigned by the
			 * common library changed, this code would have to be
			 * updated.
			 */
			*val = 0;
			nelem = 1;

		} else if ((VISA_CHANGE == eventType) &&
		    (MP_OBJECT_TYPE_INITIATOR_PORT == index)) {

			(void) nvlist_lookup_int32_array(attr_list,
			    DDI_INSTANCE, &instance, &nelem);

			log(LOG_INFO, "notifyClient()",
			    "- event (PHCI_INSTANCE) has [%d] elements",
			    nelem);

			(void) nvlist_lookup_int32_array(attr_list,
			    DDI_DRIVER_MAJOR, &major, &nelem);

			log(LOG_INFO, "notifyClient()",
			    "- event (PHCI_DRIVER_MAJOR) has [%d] elements",
			    nelem);

			if ((NULL != instance) & (NULL != major)) {

				val = (uint64_t *)malloc(sizeof (uint64_t));

				valAllocated = 1;

				*val = 0;
				*val = MP_STORE_INST_TO_ID(*instance, *val);
				*val = MP_STORE_MAJOR_TO_ID(*major, *val);

				nelem = 1;

			} else {

				nelem = 0;
			}

		} else {

			(void) nvlist_lookup_uint64_array(attr_list, OIDLIST,
			    &val, &nelem);

			log(LOG_INFO, "notifyClient()",
			    "- event has [%d] elements",
			    nelem);
		}

		if (nelem > 0) {

			for (i = 0; i < nelem; i++) {

				log(LOG_INFO, "notifyClient()",
				    "- event [%d] = %llx",
				    i, val[i]);
			}

			oidList = createOidList(nelem);
			if (NULL == oidList) {

				log(LOG_INFO, "notifyClient()",
				    "- unable to create MP_OID_LIST");

				log(LOG_INFO, "notifyClient()",
				    "- error exit");

				nvlist_free(attr_list);

				return;
			}

			oidList->oidCount = nelem;

			for (i = 0; i < nelem; i++) {

				oidList->oids[i].objectType = index;
				oidList->oids[i].ownerId = g_pluginOwnerID;
				oidList->oids[i].objectSequenceNumber = val[i];
			}

			if (valAllocated) {

				free(val);
			}

			for (i = 0; i < oidList->oidCount; i++) {

				log(LOG_INFO, "notifyClient()",
				    "oidList->oids[%d].objectType"
				    "           = %d",
				    i, oidList->oids[i].objectType);
				log(LOG_INFO, "notifyClient()",
				    "oidList->oids[%d].ownerId"
				    "              = %d",
				    i, oidList->oids[i].ownerId);
				log(LOG_INFO, "notifyClient()",
				    "oidList->oids[%d].objectSequenceNumber"
				    " = %llx",
				    i, oidList->oids[i].objectSequenceNumber);
			}

			if (eventType == PROP_CHANGE) {

				(void) pthread_mutex_lock(&g_prop_mutex);

				pCallerData = g_Property_Callback_List[index].
				    pCallerData;

				(g_Property_Callback_List[index].pClientFn)
				    (oidList, pCallerData);

				(void) pthread_mutex_unlock(&g_prop_mutex);

			} else if (eventType == VISA_CHANGE) {

				(void) pthread_mutex_lock(&g_visa_mutex);

				pCallerData = g_Visibility_Callback_List[index].
				    pCallerData;

				(g_Visibility_Callback_List[index].pClientFn)
				    (becomingVisible, oidList, pCallerData);

				(void) pthread_mutex_unlock(&g_visa_mutex);

			}
		}

		nvlist_free(attr_list);
	}


	log(LOG_INFO, "notifyClient()", "- exit");
}
コード例 #27
0
ファイル: zfs_mod.c プロジェクト: DeHackEd/zfs
/*
 * The device associated with the given vdev (either by devid or physical path)
 * has been added to the system.  If 'isdisk' is set, then we only attempt a
 * replacement if it's a whole disk.  This also implies that we should label the
 * disk first.
 *
 * First, we attempt to online the device (making sure to undo any spare
 * operation when finished).  If this succeeds, then we're done.  If it fails,
 * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
 * but that the label was not what we expected.  If the 'autoreplace' property
 * is enabled, then we relabel the disk (if specified), and attempt a 'zpool
 * replace'.  If the online is successful, but the new state is something else
 * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
 * race, and we should avoid attempting to relabel the disk.
 *
 * Also can arrive here from a ESC_ZFS_VDEV_CHECK event
 */
static void
zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
{
	char *path;
	vdev_state_t newstate;
	nvlist_t *nvroot, *newvd;
	pendingdev_t *device;
	uint64_t wholedisk = 0ULL;
	uint64_t offline = 0ULL;
	uint64_t guid = 0ULL;
	char *physpath = NULL, *new_devid = NULL, *enc_sysfs_path = NULL;
	char rawpath[PATH_MAX], fullpath[PATH_MAX];
	char devpath[PATH_MAX];
	int ret;
	int is_dm = 0;
	int is_sd = 0;
	uint_t c;
	vdev_stat_t *vs;

	if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
		return;

	/* Skip healthy disks */
	verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
	    (uint64_t **)&vs, &c) == 0);
	if (vs->vs_state == VDEV_STATE_HEALTHY) {
		zed_log_msg(LOG_INFO, "%s: %s is already healthy, skip it.",
		    __func__, path);
		return;
	}

	(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
	(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
	    &enc_sysfs_path);
	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_GUID, &guid);

	if (offline)
		return;  /* don't intervene if it was taken offline */

	is_dm = zfs_dev_is_dm(path);
	zed_log_msg(LOG_INFO, "zfs_process_add: pool '%s' vdev '%s', phys '%s'"
	    " wholedisk %d, dm %d (%llu)", zpool_get_name(zhp), path,
	    physpath ? physpath : "NULL", wholedisk, is_dm,
	    (long long unsigned int)guid);

	/*
	 * The VDEV guid is preferred for identification (gets passed in path)
	 */
	if (guid != 0) {
		(void) snprintf(fullpath, sizeof (fullpath), "%llu",
		    (long long unsigned int)guid);
	} else {
		/*
		 * otherwise use path sans partition suffix for whole disks
		 */
		(void) strlcpy(fullpath, path, sizeof (fullpath));
		if (wholedisk) {
			char *spath = zfs_strip_partition(fullpath);
			if (!spath) {
				zed_log_msg(LOG_INFO, "%s: Can't alloc",
				    __func__);
				return;
			}

			(void) strlcpy(fullpath, spath, sizeof (fullpath));
			free(spath);
		}
	}

	/*
	 * Attempt to online the device.
	 */
	if (zpool_vdev_online(zhp, fullpath,
	    ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
	    (newstate == VDEV_STATE_HEALTHY ||
	    newstate == VDEV_STATE_DEGRADED)) {
		zed_log_msg(LOG_INFO, "  zpool_vdev_online: vdev %s is %s",
		    fullpath, (newstate == VDEV_STATE_HEALTHY) ?
		    "HEALTHY" : "DEGRADED");
		return;
	}

	/*
	 * vdev_id alias rule for using scsi_debug devices (FMA automated
	 * testing)
	 */
	if (physpath != NULL && strcmp("scsidebug", physpath) == 0)
		is_sd = 1;

	/*
	 * If the pool doesn't have the autoreplace property set, then use
	 * vdev online to trigger a FMA fault by posting an ereport.
	 */
	if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
	    !(wholedisk || is_dm) || (physpath == NULL)) {
		(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
		    &newstate);
		zed_log_msg(LOG_INFO, "Pool's autoreplace is not enabled or "
		    "not a whole disk for '%s'", fullpath);
		return;
	}

	/*
	 * Convert physical path into its current device node.  Rawpath
	 * needs to be /dev/disk/by-vdev for a scsi_debug device since
	 * /dev/disk/by-path will not be present.
	 */
	(void) snprintf(rawpath, sizeof (rawpath), "%s%s",
	    is_sd ? DEV_BYVDEV_PATH : DEV_BYPATH_PATH, physpath);

	if (realpath(rawpath, devpath) == NULL && !is_dm) {
		zed_log_msg(LOG_INFO, "  realpath: %s failed (%s)",
		    rawpath, strerror(errno));

		(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
		    &newstate);

		zed_log_msg(LOG_INFO, "  zpool_vdev_online: %s FORCEFAULT (%s)",
		    fullpath, libzfs_error_description(g_zfshdl));
		return;
	}

	/* Only autoreplace bad disks */
	if ((vs->vs_state != VDEV_STATE_DEGRADED) &&
	    (vs->vs_state != VDEV_STATE_FAULTED) &&
	    (vs->vs_state != VDEV_STATE_CANT_OPEN)) {
		return;
	}

	nvlist_lookup_string(vdev, "new_devid", &new_devid);

	if (is_dm) {
		/* Don't label device mapper or multipath disks. */
	} else if (!labeled) {
		/*
		 * we're auto-replacing a raw disk, so label it first
		 */
		char *leafname;

		/*
		 * If this is a request to label a whole disk, then attempt to
		 * write out the label.  Before we can label the disk, we need
		 * to map the physical string that was matched on to the under
		 * lying device node.
		 *
		 * If any part of this process fails, then do a force online
		 * to trigger a ZFS fault for the device (and any hot spare
		 * replacement).
		 */
		leafname = strrchr(devpath, '/') + 1;

		/*
		 * If this is a request to label a whole disk, then attempt to
		 * write out the label.
		 */
		if (zpool_label_disk(g_zfshdl, zhp, leafname) != 0) {
			zed_log_msg(LOG_INFO, "  zpool_label_disk: could not "
			    "label '%s' (%s)", leafname,
			    libzfs_error_description(g_zfshdl));

			(void) zpool_vdev_online(zhp, fullpath,
			    ZFS_ONLINE_FORCEFAULT, &newstate);
			return;
		}

		/*
		 * The disk labeling is asynchronous on Linux. Just record
		 * this label request and return as there will be another
		 * disk add event for the partition after the labeling is
		 * completed.
		 */
		device = malloc(sizeof (pendingdev_t));
		(void) strlcpy(device->pd_physpath, physpath,
		    sizeof (device->pd_physpath));
		list_insert_tail(&g_device_list, device);

		zed_log_msg(LOG_INFO, "  zpool_label_disk: async '%s' (%llu)",
		    leafname, (u_longlong_t)guid);

		return;	/* resumes at EC_DEV_ADD.ESC_DISK for partition */

	} else /* labeled */ {
		boolean_t found = B_FALSE;
		/*
		 * match up with request above to label the disk
		 */
		for (device = list_head(&g_device_list); device != NULL;
		    device = list_next(&g_device_list, device)) {
			if (strcmp(physpath, device->pd_physpath) == 0) {
				list_remove(&g_device_list, device);
				free(device);
				found = B_TRUE;
				break;
			}
			zed_log_msg(LOG_INFO, "zpool_label_disk: %s != %s",
			    physpath, device->pd_physpath);
		}
		if (!found) {
			/* unexpected partition slice encountered */
			zed_log_msg(LOG_INFO, "labeled disk %s unexpected here",
			    fullpath);
			(void) zpool_vdev_online(zhp, fullpath,
			    ZFS_ONLINE_FORCEFAULT, &newstate);
			return;
		}

		zed_log_msg(LOG_INFO, "  zpool_label_disk: resume '%s' (%llu)",
		    physpath, (u_longlong_t)guid);

		(void) snprintf(devpath, sizeof (devpath), "%s%s",
		    DEV_BYID_PATH, new_devid);
	}

	/*
	 * Construct the root vdev to pass to zpool_vdev_attach().  While adding
	 * the entire vdev structure is harmless, we construct a reduced set of
	 * path/physpath/wholedisk to keep it simple.
	 */
	if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) {
		zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
		return;
	}
	if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
		zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
		nvlist_free(nvroot);
		return;
	}

	if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
	    nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
	    nvlist_add_string(newvd, ZPOOL_CONFIG_DEVID, new_devid) != 0 ||
	    (physpath != NULL && nvlist_add_string(newvd,
	    ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
	    (enc_sysfs_path != NULL && nvlist_add_string(newvd,
	    ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH, enc_sysfs_path) != 0) ||
	    nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
	    nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
	    nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &newvd,
	    1) != 0) {
		zed_log_msg(LOG_WARNING, "zfs_mod: unable to add nvlist pairs");
		nvlist_free(newvd);
		nvlist_free(nvroot);
		return;
	}

	nvlist_free(newvd);

	/*
	 * Wait for udev to verify the links exist, then auto-replace
	 * the leaf disk at same physical location.
	 */
	if (zpool_label_disk_wait(path, 3000) != 0) {
		zed_log_msg(LOG_WARNING, "zfs_mod: expected replacement "
		    "disk %s is missing", path);
		nvlist_free(nvroot);
		return;
	}

	ret = zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE);

	zed_log_msg(LOG_INFO, "  zpool_vdev_replace: %s with %s (%s)",
	    fullpath, path, (ret == 0) ? "no errors" :
	    libzfs_error_description(g_zfshdl));

	nvlist_free(nvroot);
}
コード例 #28
0
ファイル: util.c プロジェクト: lidongyang/zfs
static void
show_vdev_stats(const char *desc, const char *ctype, nvlist_t *nv, int indent)
{
	vdev_stat_t *vs;
	vdev_stat_t *v0 = { 0 };
	uint64_t sec;
	uint64_t is_log = 0;
	nvlist_t **child;
	uint_t c, children;
	char used[6], avail[6];
	char rops[6], wops[6], rbytes[6], wbytes[6], rerr[6], werr[6], cerr[6];

	v0 = umem_zalloc(sizeof (*v0), UMEM_NOFAIL);

	if (indent == 0 && desc != NULL) {
		(void) printf("                           "
		    " capacity   operations   bandwidth  ---- errors ----\n");
		(void) printf("description                "
		    "used avail  read write  read write  read write cksum\n");
	}

	if (desc != NULL) {
		char *suffix = "", *bias = NULL;
		char bias_suffix[32];

		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &is_log);
		(void) nvlist_lookup_string(nv, ZPOOL_CONFIG_ALLOCATION_BIAS,
		    &bias);
		if (nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
		    (uint64_t **)&vs, &c) != 0)
			vs = v0;

		if (bias != NULL) {
			(void) snprintf(bias_suffix, sizeof (bias_suffix),
			    " (%s)", bias);
			suffix = bias_suffix;
		} else if (is_log) {
			suffix = " (log)";
		}

		sec = MAX(1, vs->vs_timestamp / NANOSEC);

		nicenum(vs->vs_alloc, used, sizeof (used));
		nicenum(vs->vs_space - vs->vs_alloc, avail, sizeof (avail));
		nicenum(vs->vs_ops[ZIO_TYPE_READ] / sec, rops, sizeof (rops));
		nicenum(vs->vs_ops[ZIO_TYPE_WRITE] / sec, wops, sizeof (wops));
		nicenum(vs->vs_bytes[ZIO_TYPE_READ] / sec, rbytes,
		    sizeof (rbytes));
		nicenum(vs->vs_bytes[ZIO_TYPE_WRITE] / sec, wbytes,
		    sizeof (wbytes));
		nicenum(vs->vs_read_errors, rerr, sizeof (rerr));
		nicenum(vs->vs_write_errors, werr, sizeof (werr));
		nicenum(vs->vs_checksum_errors, cerr, sizeof (cerr));

		(void) printf("%*s%s%*s%*s%*s %5s %5s %5s %5s %5s %5s %5s\n",
		    indent, "",
		    desc,
		    (int)(indent+strlen(desc)-25-(vs->vs_space ? 0 : 12)),
		    suffix,
		    vs->vs_space ? 6 : 0, vs->vs_space ? used : "",
		    vs->vs_space ? 6 : 0, vs->vs_space ? avail : "",
		    rops, wops, rbytes, wbytes, rerr, werr, cerr);
	}
	free(v0);

	if (nvlist_lookup_nvlist_array(nv, ctype, &child, &children) != 0)
		return;

	for (c = 0; c < children; c++) {
		nvlist_t *cnv = child[c];
		char *cname = NULL, *tname;
		uint64_t np;
		int len;
		if (nvlist_lookup_string(cnv, ZPOOL_CONFIG_PATH, &cname) &&
		    nvlist_lookup_string(cnv, ZPOOL_CONFIG_TYPE, &cname))
			cname = "<unknown>";
		len = strlen(cname) + 2;
		tname = umem_zalloc(len, UMEM_NOFAIL);
		(void) strlcpy(tname, cname, len);
		if (nvlist_lookup_uint64(cnv, ZPOOL_CONFIG_NPARITY, &np) == 0)
			tname[strlen(tname)] = '0' + np;
		show_vdev_stats(tname, ctype, cnv, indent + 2);
		free(tname);
	}
}