Exemplo n.º 1
0
static void
inhm_dimmlist(uint32_t node, nvlist_t *nvl)
{
	nvlist_t **dimmlist;
	nvlist_t **newchannel;
	int nchannels = CHANNELS_PER_MEMORY_CONTROLLER;
	int nd;
	uint8_t i, j;
	nhm_dimm_t **dimmpp;
	nhm_dimm_t *dimmp;

	dimmlist =  kmem_zalloc(sizeof (nvlist_t *) * MAX_DIMMS_PER_CHANNEL,
	    KM_SLEEP);
	newchannel = kmem_zalloc(sizeof (nvlist_t *) * nchannels, KM_SLEEP);
	dimmpp = &nhm_dimms[node * CHANNELS_PER_MEMORY_CONTROLLER *
	    MAX_DIMMS_PER_CHANNEL];
	(void) nvlist_add_string(nvl, "memory-policy",
	    closed_page ? "closed-page" : "open-page");
	(void) nvlist_add_string(nvl, "memory-ecc",
	    ecc_enabled ? lockstep[node] ? "x8" : "x4" : "no");
	for (i = 0; i < nchannels; i++) {
		(void) nvlist_alloc(&newchannel[i], NV_UNIQUE_NAME, KM_SLEEP);
		(void) nvlist_add_string(newchannel[i], "channel-mode",
		    CHANNEL_DISABLED(MC_STATUS_RD(node), i) ? "disabled" :
		    i != 2 && lockstep[node] ? "lockstep" :
		    i != 2 && mirror_mode[node] ?
		    REDUNDANCY_LOSS(MC_RAS_STATUS_RD(node)) ?
		    "redundancy-loss" : "mirror" :
		    i == 2 && spare_channel[node] &&
		    !REDUNDANCY_LOSS(MC_RAS_STATUS_RD(node)) ? "spare" :
		    "independent");
		nd = 0;
		for (j = 0; j < MAX_DIMMS_PER_CHANNEL; j++) {
			dimmp = *dimmpp;
			if (dimmp != NULL) {
				dimmlist[nd] = inhm_dimm(dimmp, node, i,
				    (uint32_t)j);
				nd++;
			}
			dimmpp++;
		}
		if (nd) {
			(void) nvlist_add_nvlist_array(newchannel[i],
			    "memory-dimms", dimmlist, nd);
			for (j = 0; j < nd; j++)
				nvlist_free(dimmlist[j]);
		}
	}
	(void) nvlist_add_nvlist_array(nvl, MCINTEL_NVLIST_MC, newchannel,
	    nchannels);
	for (i = 0; i < nchannels; i++)
		nvlist_free(newchannel[i]);
	kmem_free(dimmlist, sizeof (nvlist_t *) * MAX_DIMMS_PER_CHANNEL);
	kmem_free(newchannel, sizeof (nvlist_t *) * nchannels);
}
Exemplo n.º 2
0
static int
ch_end(list_wrap_t **lw, boolean_t array, int argc, char **argv)
{
	nvlist_t *parent;
	char *name;

	if (list_wrap_depth(*lw) < 2) {
		(void) fprintf(stderr, "ERROR: not nested, cannot end.\n");
		return (-1);
	}

	parent = (*lw)->lw_next->lw_nvl[(*lw)->lw_next->lw_pos];
	name = (*lw)->lw_name;
	if ((*lw)->lw_array) {
		/*
		 * This was an array of objects.
		 */
		nvlist_t **children = (*lw)->lw_nvl;
		int nelems = (*lw)->lw_pos + 1;

		if (nvlist_add_nvlist_array(parent, name, children,
		    nelems) != 0) {
			(void) fprintf(stderr, "fail at "
			    "nvlist_add_nvlist_array\n");
			return (-1);
		}
	} else {
		/*
		 * This was a single object.
		 */
		nvlist_t *child = (*lw)->lw_nvl[0];

		if ((*lw)->lw_pos != 0)
			abort();

		if (nvlist_add_nvlist(parent, name, child) != 0) {
			(void) fprintf(stderr, "fail at nvlist_add_nvlist\n");
			return (-1);
		}
	}

	*lw = list_wrap_pop_and_free(*lw);

	return (0);
}
Exemplo n.º 3
0
static void
inhm_rank(nvlist_t *newdimm, nhm_dimm_t *nhm_dimm, uint32_t node,
    uint8_t channel, uint32_t dimm, uint64_t rank_size)
{
	nvlist_t **newrank;
	int num;
	int i;
	uint64_t dimm_base;
	uint64_t vrank_sz;
	uint64_t rank_addr;
	uint64_t pa;
	uint32_t sinterleave, cinterleave, rinterleave;
	uint32_t sway, cway, rway;

	newrank = kmem_zalloc(sizeof (nvlist_t *) * nhm_dimm->nranks, KM_SLEEP);
	for (i = 0; i < nhm_dimm->nranks; i++) {
		(void) nvlist_alloc(&newrank[i], NV_UNIQUE_NAME, KM_SLEEP);
		rank_addr = 0;
		num = 0;
		while (rank_addr < rank_size) {
			pa = dimm_to_addr(node, channel, dimm * 4 + i,
			    rank_addr, &dimm_base, &vrank_sz, &sinterleave,
			    &cinterleave, &rinterleave, &sway, &cway, &rway);
			if (pa == -1)
				break;
			inhm_vrank(newrank[i], num, dimm_base,
			    vrank_sz * sinterleave * cinterleave * rinterleave,
			    sinterleave, cinterleave, rinterleave, sway, cway,
			    rway);
			rank_addr += vrank_sz;
			num++;
		}

	}
	(void) nvlist_add_nvlist_array(newdimm, MCINTEL_NVLIST_RANKS, newrank,
	    nhm_dimm->nranks);
	for (i = 0; i < nhm_dimm->nranks; i++)
		nvlist_free(newrank[i]);
	kmem_free(newrank, sizeof (nvlist_t *) * nhm_dimm->nranks);
}
Exemplo n.º 4
0
/*
 * Retrieve the command history of a pool.
 */
int
zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
{
	char buf[HIS_BUF_LEN];
	uint64_t off = 0;
	nvlist_t **records = NULL;
	uint_t numrecords = 0;
	int err, i;

	do {
		uint64_t bytes_read = sizeof (buf);
		uint64_t leftover;

		if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
			break;

		/* if nothing else was read in, we're at EOF, just return */
		if (!bytes_read)
			break;

		if ((err = zpool_history_unpack(buf, bytes_read,
		    &leftover, &records, &numrecords)) != 0)
			break;
		off -= leftover;

		/* CONSTCOND */
	} while (1);

	if (!err) {
		verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
		verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
		    records, numrecords) == 0);
	}
	for (i = 0; i < numrecords; i++)
		nvlist_free(records[i]);
	free(records);

	return (err);
}
Exemplo n.º 5
0
static
#endif
int
zfs_fuid_find_by_domain(zfsvfs_t *zfsvfs, const char *domain, char **retdomain,
    dmu_tx_t *tx)
{
	fuid_domain_t searchnode, *findnode;
	avl_index_t loc;

	/*
	 * If the dummy "nobody" domain then return an index of 0
	 * to cause the created FUID to be a standard POSIX id
	 * for the user nobody.
	 */
	if (domain[0] == '\0') {
		*retdomain = "";
		return (0);
	}

	searchnode.f_ksid = ksid_lookupdomain(domain);
	if (retdomain) {
		*retdomain = searchnode.f_ksid->kd_name;
	}
	if (!zfsvfs->z_fuid_loaded)
		zfs_fuid_init(zfsvfs, tx);

	rw_enter(&zfsvfs->z_fuid_lock, RW_READER);
	findnode = avl_find(&zfsvfs->z_fuid_domain, &searchnode, &loc);
	rw_exit(&zfsvfs->z_fuid_lock);

	if (findnode) {
		ksiddomain_rele(searchnode.f_ksid);
		return (findnode->f_idx);
	} else {
		fuid_domain_t *domnode;
		nvlist_t *nvp;
		nvlist_t **fuids;
		uint64_t retidx;
		size_t nvsize = 0;
		char *packed;
		dmu_buf_t *db;
		int i = 0;

		domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP);
		domnode->f_ksid = searchnode.f_ksid;

		rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
		retidx = domnode->f_idx = avl_numnodes(&zfsvfs->z_fuid_idx) + 1;

		avl_add(&zfsvfs->z_fuid_domain, domnode);
		avl_add(&zfsvfs->z_fuid_idx, domnode);
		/*
		 * Now resync the on-disk nvlist.
		 */
		VERIFY(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);

		domnode = avl_first(&zfsvfs->z_fuid_domain);
		fuids = kmem_alloc(retidx * sizeof (void *), KM_SLEEP);
		while (domnode) {
			VERIFY(nvlist_alloc(&fuids[i],
			    NV_UNIQUE_NAME, KM_SLEEP) == 0);
			VERIFY(nvlist_add_uint64(fuids[i], FUID_IDX,
			    domnode->f_idx) == 0);
			VERIFY(nvlist_add_uint64(fuids[i],
			    FUID_OFFSET, 0) == 0);
			VERIFY(nvlist_add_string(fuids[i++], FUID_DOMAIN,
			    domnode->f_ksid->kd_name) == 0);
			domnode = AVL_NEXT(&zfsvfs->z_fuid_domain, domnode);
		}
		VERIFY(nvlist_add_nvlist_array(nvp, FUID_NVP_ARRAY,
		    fuids, retidx) == 0);
		for (i = 0; i != retidx; i++)
			nvlist_free(fuids[i]);
		kmem_free(fuids, retidx * sizeof (void *));
		VERIFY(nvlist_size(nvp, &nvsize, NV_ENCODE_XDR) == 0);
		packed = kmem_alloc(nvsize, KM_SLEEP);
		VERIFY(nvlist_pack(nvp, &packed, &nvsize,
		    NV_ENCODE_XDR, KM_SLEEP) == 0);
		nvlist_free(nvp);
		zfsvfs->z_fuid_size = nvsize;
		dmu_write(zfsvfs->z_os, zfsvfs->z_fuid_obj, 0,
		    zfsvfs->z_fuid_size, packed, tx);
		kmem_free(packed, zfsvfs->z_fuid_size);
		VERIFY(0 == dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj,
		    FTAG, &db));
		dmu_buf_will_dirty(db, tx);
		*(uint64_t *)db->db_data = zfsvfs->z_fuid_size;
		dmu_buf_rele(db, FTAG);

		rw_exit(&zfsvfs->z_fuid_lock);
		return (retidx);
	}
}
Exemplo n.º 6
0
/*
 * The device associated with the given vdev (either by devid or physical path)
 * has been added to the system.  If 'isdisk' is set, then we only attempt a
 * replacement if it's a whole disk.  This also implies that we should label the
 * disk first.
 *
 * First, we attempt to online the device (making sure to undo any spare
 * operation when finished).  If this succeeds, then we're done.  If it fails,
 * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
 * but that the label was not what we expected.  If the 'autoreplace' property
 * is enabled, then we relabel the disk (if specified), and attempt a 'zpool
 * replace'.  If the online is successful, but the new state is something else
 * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
 * race, and we should avoid attempting to relabel the disk.
 *
 * Also can arrive here from a ESC_ZFS_VDEV_CHECK event
 */
static void
zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
{
	char *path;
	vdev_state_t newstate;
	nvlist_t *nvroot, *newvd;
	pendingdev_t *device;
	uint64_t wholedisk = 0ULL;
	uint64_t offline = 0ULL;
	uint64_t guid = 0ULL;
	char *physpath = NULL, *new_devid = NULL, *enc_sysfs_path = NULL;
	char rawpath[PATH_MAX], fullpath[PATH_MAX];
	char devpath[PATH_MAX];
	int ret;
	int is_dm = 0;
	int is_sd = 0;
	uint_t c;
	vdev_stat_t *vs;

	if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
		return;

	/* Skip healthy disks */
	verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
	    (uint64_t **)&vs, &c) == 0);
	if (vs->vs_state == VDEV_STATE_HEALTHY) {
		zed_log_msg(LOG_INFO, "%s: %s is already healthy, skip it.",
		    __func__, path);
		return;
	}

	(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
	(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
	    &enc_sysfs_path);
	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_GUID, &guid);

	if (offline)
		return;  /* don't intervene if it was taken offline */

	is_dm = zfs_dev_is_dm(path);
	zed_log_msg(LOG_INFO, "zfs_process_add: pool '%s' vdev '%s', phys '%s'"
	    " wholedisk %d, dm %d (%llu)", zpool_get_name(zhp), path,
	    physpath ? physpath : "NULL", wholedisk, is_dm,
	    (long long unsigned int)guid);

	/*
	 * The VDEV guid is preferred for identification (gets passed in path)
	 */
	if (guid != 0) {
		(void) snprintf(fullpath, sizeof (fullpath), "%llu",
		    (long long unsigned int)guid);
	} else {
		/*
		 * otherwise use path sans partition suffix for whole disks
		 */
		(void) strlcpy(fullpath, path, sizeof (fullpath));
		if (wholedisk) {
			char *spath = zfs_strip_partition(fullpath);
			if (!spath) {
				zed_log_msg(LOG_INFO, "%s: Can't alloc",
				    __func__);
				return;
			}

			(void) strlcpy(fullpath, spath, sizeof (fullpath));
			free(spath);
		}
	}

	/*
	 * Attempt to online the device.
	 */
	if (zpool_vdev_online(zhp, fullpath,
	    ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
	    (newstate == VDEV_STATE_HEALTHY ||
	    newstate == VDEV_STATE_DEGRADED)) {
		zed_log_msg(LOG_INFO, "  zpool_vdev_online: vdev %s is %s",
		    fullpath, (newstate == VDEV_STATE_HEALTHY) ?
		    "HEALTHY" : "DEGRADED");
		return;
	}

	/*
	 * vdev_id alias rule for using scsi_debug devices (FMA automated
	 * testing)
	 */
	if (physpath != NULL && strcmp("scsidebug", physpath) == 0)
		is_sd = 1;

	/*
	 * If the pool doesn't have the autoreplace property set, then use
	 * vdev online to trigger a FMA fault by posting an ereport.
	 */
	if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
	    !(wholedisk || is_dm) || (physpath == NULL)) {
		(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
		    &newstate);
		zed_log_msg(LOG_INFO, "Pool's autoreplace is not enabled or "
		    "not a whole disk for '%s'", fullpath);
		return;
	}

	/*
	 * Convert physical path into its current device node.  Rawpath
	 * needs to be /dev/disk/by-vdev for a scsi_debug device since
	 * /dev/disk/by-path will not be present.
	 */
	(void) snprintf(rawpath, sizeof (rawpath), "%s%s",
	    is_sd ? DEV_BYVDEV_PATH : DEV_BYPATH_PATH, physpath);

	if (realpath(rawpath, devpath) == NULL && !is_dm) {
		zed_log_msg(LOG_INFO, "  realpath: %s failed (%s)",
		    rawpath, strerror(errno));

		(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
		    &newstate);

		zed_log_msg(LOG_INFO, "  zpool_vdev_online: %s FORCEFAULT (%s)",
		    fullpath, libzfs_error_description(g_zfshdl));
		return;
	}

	/* Only autoreplace bad disks */
	if ((vs->vs_state != VDEV_STATE_DEGRADED) &&
	    (vs->vs_state != VDEV_STATE_FAULTED) &&
	    (vs->vs_state != VDEV_STATE_CANT_OPEN)) {
		return;
	}

	nvlist_lookup_string(vdev, "new_devid", &new_devid);

	if (is_dm) {
		/* Don't label device mapper or multipath disks. */
	} else if (!labeled) {
		/*
		 * we're auto-replacing a raw disk, so label it first
		 */
		char *leafname;

		/*
		 * If this is a request to label a whole disk, then attempt to
		 * write out the label.  Before we can label the disk, we need
		 * to map the physical string that was matched on to the under
		 * lying device node.
		 *
		 * If any part of this process fails, then do a force online
		 * to trigger a ZFS fault for the device (and any hot spare
		 * replacement).
		 */
		leafname = strrchr(devpath, '/') + 1;

		/*
		 * If this is a request to label a whole disk, then attempt to
		 * write out the label.
		 */
		if (zpool_label_disk(g_zfshdl, zhp, leafname) != 0) {
			zed_log_msg(LOG_INFO, "  zpool_label_disk: could not "
			    "label '%s' (%s)", leafname,
			    libzfs_error_description(g_zfshdl));

			(void) zpool_vdev_online(zhp, fullpath,
			    ZFS_ONLINE_FORCEFAULT, &newstate);
			return;
		}

		/*
		 * The disk labeling is asynchronous on Linux. Just record
		 * this label request and return as there will be another
		 * disk add event for the partition after the labeling is
		 * completed.
		 */
		device = malloc(sizeof (pendingdev_t));
		(void) strlcpy(device->pd_physpath, physpath,
		    sizeof (device->pd_physpath));
		list_insert_tail(&g_device_list, device);

		zed_log_msg(LOG_INFO, "  zpool_label_disk: async '%s' (%llu)",
		    leafname, (u_longlong_t)guid);

		return;	/* resumes at EC_DEV_ADD.ESC_DISK for partition */

	} else /* labeled */ {
		boolean_t found = B_FALSE;
		/*
		 * match up with request above to label the disk
		 */
		for (device = list_head(&g_device_list); device != NULL;
		    device = list_next(&g_device_list, device)) {
			if (strcmp(physpath, device->pd_physpath) == 0) {
				list_remove(&g_device_list, device);
				free(device);
				found = B_TRUE;
				break;
			}
			zed_log_msg(LOG_INFO, "zpool_label_disk: %s != %s",
			    physpath, device->pd_physpath);
		}
		if (!found) {
			/* unexpected partition slice encountered */
			zed_log_msg(LOG_INFO, "labeled disk %s unexpected here",
			    fullpath);
			(void) zpool_vdev_online(zhp, fullpath,
			    ZFS_ONLINE_FORCEFAULT, &newstate);
			return;
		}

		zed_log_msg(LOG_INFO, "  zpool_label_disk: resume '%s' (%llu)",
		    physpath, (u_longlong_t)guid);

		(void) snprintf(devpath, sizeof (devpath), "%s%s",
		    DEV_BYID_PATH, new_devid);
	}

	/*
	 * Construct the root vdev to pass to zpool_vdev_attach().  While adding
	 * the entire vdev structure is harmless, we construct a reduced set of
	 * path/physpath/wholedisk to keep it simple.
	 */
	if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) {
		zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
		return;
	}
	if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
		zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
		nvlist_free(nvroot);
		return;
	}

	if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
	    nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
	    nvlist_add_string(newvd, ZPOOL_CONFIG_DEVID, new_devid) != 0 ||
	    (physpath != NULL && nvlist_add_string(newvd,
	    ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
	    (enc_sysfs_path != NULL && nvlist_add_string(newvd,
	    ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH, enc_sysfs_path) != 0) ||
	    nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
	    nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
	    nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &newvd,
	    1) != 0) {
		zed_log_msg(LOG_WARNING, "zfs_mod: unable to add nvlist pairs");
		nvlist_free(newvd);
		nvlist_free(nvroot);
		return;
	}

	nvlist_free(newvd);

	/*
	 * Wait for udev to verify the links exist, then auto-replace
	 * the leaf disk at same physical location.
	 */
	if (zpool_label_disk_wait(path, 3000) != 0) {
		zed_log_msg(LOG_WARNING, "zfs_mod: expected replacement "
		    "disk %s is missing", path);
		nvlist_free(nvroot);
		return;
	}

	ret = zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE);

	zed_log_msg(LOG_INFO, "  zpool_vdev_replace: %s with %s (%s)",
	    fullpath, path, (ret == 0) ? "no errors" :
	    libzfs_error_description(g_zfshdl));

	nvlist_free(nvroot);
}
Exemplo n.º 7
0
/*
 * Convert our list of pools into the definitive set of configurations.  We
 * start by picking the best config for each toplevel vdev.  Once that's done,
 * we assemble the toplevel vdevs into a full config for the pool.  We make a
 * pass to fix up any incorrect paths, and then add it to the main list to
 * return to the user.
 */
static nvlist_t *
get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok,
    nvlist_t *policy)
{
	pool_entry_t *pe;
	vdev_entry_t *ve;
	config_entry_t *ce;
	nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot;
	nvlist_t **spares, **l2cache;
	uint_t i, nspares, nl2cache;
	boolean_t config_seen;
	uint64_t best_txg;
	char *name, *hostname = NULL;
	uint64_t guid;
	uint_t children = 0;
	nvlist_t **child = NULL;
	uint_t holes;
	uint64_t *hole_array, max_id;
	uint_t c;
	boolean_t isactive;
	uint64_t hostid;
	nvlist_t *nvl;
	boolean_t valid_top_config = B_FALSE;

	if (nvlist_alloc(&ret, 0, 0) != 0)
		goto nomem;

	for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
		uint64_t id, max_txg = 0;

		if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
			goto nomem;
		config_seen = B_FALSE;

		/*
		 * Iterate over all toplevel vdevs.  Grab the pool configuration
		 * from the first one we find, and then go through the rest and
		 * add them as necessary to the 'vdevs' member of the config.
		 */
		for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {

			/*
			 * Determine the best configuration for this vdev by
			 * selecting the config with the latest transaction
			 * group.
			 */
			best_txg = 0;
			for (ce = ve->ve_configs; ce != NULL;
			    ce = ce->ce_next) {

				if (ce->ce_txg > best_txg) {
					tmp = ce->ce_config;
					best_txg = ce->ce_txg;
				}
			}

			/*
			 * We rely on the fact that the max txg for the
			 * pool will contain the most up-to-date information
			 * about the valid top-levels in the vdev namespace.
			 */
			if (best_txg > max_txg) {
				(void) nvlist_remove(config,
				    ZPOOL_CONFIG_VDEV_CHILDREN,
				    DATA_TYPE_UINT64);
				(void) nvlist_remove(config,
				    ZPOOL_CONFIG_HOLE_ARRAY,
				    DATA_TYPE_UINT64_ARRAY);

				max_txg = best_txg;
				hole_array = NULL;
				holes = 0;
				max_id = 0;
				valid_top_config = B_FALSE;

				if (nvlist_lookup_uint64(tmp,
				    ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
					verify(nvlist_add_uint64(config,
					    ZPOOL_CONFIG_VDEV_CHILDREN,
					    max_id) == 0);
					valid_top_config = B_TRUE;
				}

				if (nvlist_lookup_uint64_array(tmp,
				    ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
				    &holes) == 0) {
					verify(nvlist_add_uint64_array(config,
					    ZPOOL_CONFIG_HOLE_ARRAY,
					    hole_array, holes) == 0);
				}
			}

			if (!config_seen) {
				/*
				 * Copy the relevant pieces of data to the pool
				 * configuration:
				 *
				 *	version
				 *	pool guid
				 *	name
				 *	pool txg (if available)
				 *	comment (if available)
				 *	pool state
				 *	hostid (if available)
				 *	hostname (if available)
				 */
				uint64_t state, version, pool_txg;
				char *comment = NULL;

				version = fnvlist_lookup_uint64(tmp,
				    ZPOOL_CONFIG_VERSION);
				fnvlist_add_uint64(config,
				    ZPOOL_CONFIG_VERSION, version);
				guid = fnvlist_lookup_uint64(tmp,
				    ZPOOL_CONFIG_POOL_GUID);
				fnvlist_add_uint64(config,
				    ZPOOL_CONFIG_POOL_GUID, guid);
				name = fnvlist_lookup_string(tmp,
				    ZPOOL_CONFIG_POOL_NAME);
				fnvlist_add_string(config,
				    ZPOOL_CONFIG_POOL_NAME, name);
				if (nvlist_lookup_uint64(tmp,
				    ZPOOL_CONFIG_POOL_TXG, &pool_txg) == 0)
					fnvlist_add_uint64(config,
					    ZPOOL_CONFIG_POOL_TXG, pool_txg);

				if (nvlist_lookup_string(tmp,
				    ZPOOL_CONFIG_COMMENT, &comment) == 0)
					fnvlist_add_string(config,
					    ZPOOL_CONFIG_COMMENT, comment);

				state = fnvlist_lookup_uint64(tmp,
				    ZPOOL_CONFIG_POOL_STATE);
				fnvlist_add_uint64(config,
				    ZPOOL_CONFIG_POOL_STATE, state);

				hostid = 0;
				if (nvlist_lookup_uint64(tmp,
				    ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
					fnvlist_add_uint64(config,
					    ZPOOL_CONFIG_HOSTID, hostid);
					hostname = fnvlist_lookup_string(tmp,
					    ZPOOL_CONFIG_HOSTNAME);
					fnvlist_add_string(config,
					    ZPOOL_CONFIG_HOSTNAME, hostname);
				}

				config_seen = B_TRUE;
			}

			/*
			 * Add this top-level vdev to the child array.
			 */
			verify(nvlist_lookup_nvlist(tmp,
			    ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
			verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
			    &id) == 0);

			if (id >= children) {
				nvlist_t **newchild;

				newchild = zfs_alloc(hdl, (id + 1) *
				    sizeof (nvlist_t *));
				if (newchild == NULL)
					goto nomem;

				for (c = 0; c < children; c++)
					newchild[c] = child[c];

				free(child);
				child = newchild;
				children = id + 1;
			}
			if (nvlist_dup(nvtop, &child[id], 0) != 0)
				goto nomem;

		}

		/*
		 * If we have information about all the top-levels then
		 * clean up the nvlist which we've constructed. This
		 * means removing any extraneous devices that are
		 * beyond the valid range or adding devices to the end
		 * of our array which appear to be missing.
		 */
		if (valid_top_config) {
			if (max_id < children) {
				for (c = max_id; c < children; c++)
					nvlist_free(child[c]);
				children = max_id;
			} else if (max_id > children) {
				nvlist_t **newchild;

				newchild = zfs_alloc(hdl, (max_id) *
				    sizeof (nvlist_t *));
				if (newchild == NULL)
					goto nomem;

				for (c = 0; c < children; c++)
					newchild[c] = child[c];

				free(child);
				child = newchild;
				children = max_id;
			}
		}

		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
		    &guid) == 0);

		/*
		 * The vdev namespace may contain holes as a result of
		 * device removal. We must add them back into the vdev
		 * tree before we process any missing devices.
		 */
		if (holes > 0) {
			ASSERT(valid_top_config);

			for (c = 0; c < children; c++) {
				nvlist_t *holey;

				if (child[c] != NULL ||
				    !vdev_is_hole(hole_array, holes, c))
					continue;

				if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
				    0) != 0)
					goto nomem;

				/*
				 * Holes in the namespace are treated as
				 * "hole" top-level vdevs and have a
				 * special flag set on them.
				 */
				if (nvlist_add_string(holey,
				    ZPOOL_CONFIG_TYPE,
				    VDEV_TYPE_HOLE) != 0 ||
				    nvlist_add_uint64(holey,
				    ZPOOL_CONFIG_ID, c) != 0 ||
				    nvlist_add_uint64(holey,
				    ZPOOL_CONFIG_GUID, 0ULL) != 0) {
					nvlist_free(holey);
					goto nomem;
				}
				child[c] = holey;
			}
		}

		/*
		 * Look for any missing top-level vdevs.  If this is the case,
		 * create a faked up 'missing' vdev as a placeholder.  We cannot
		 * simply compress the child array, because the kernel performs
		 * certain checks to make sure the vdev IDs match their location
		 * in the configuration.
		 */
		for (c = 0; c < children; c++) {
			if (child[c] == NULL) {
				nvlist_t *missing;
				if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
				    0) != 0)
					goto nomem;
				if (nvlist_add_string(missing,
				    ZPOOL_CONFIG_TYPE,
				    VDEV_TYPE_MISSING) != 0 ||
				    nvlist_add_uint64(missing,
				    ZPOOL_CONFIG_ID, c) != 0 ||
				    nvlist_add_uint64(missing,
				    ZPOOL_CONFIG_GUID, 0ULL) != 0) {
					nvlist_free(missing);
					goto nomem;
				}
				child[c] = missing;
			}
		}

		/*
		 * Put all of this pool's top-level vdevs into a root vdev.
		 */
		if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
			goto nomem;
		if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
		    VDEV_TYPE_ROOT) != 0 ||
		    nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
		    nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
		    nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
		    child, children) != 0) {
			nvlist_free(nvroot);
			goto nomem;
		}

		for (c = 0; c < children; c++)
			nvlist_free(child[c]);
		free(child);
		children = 0;
		child = NULL;

		/*
		 * Go through and fix up any paths and/or devids based on our
		 * known list of vdev GUID -> path mappings.
		 */
		if (fix_paths(nvroot, pl->names) != 0) {
			nvlist_free(nvroot);
			goto nomem;
		}

		/*
		 * Add the root vdev to this pool's configuration.
		 */
		if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
		    nvroot) != 0) {
			nvlist_free(nvroot);
			goto nomem;
		}
		nvlist_free(nvroot);

		/*
		 * zdb uses this path to report on active pools that were
		 * imported or created using -R.
		 */
		if (active_ok)
			goto add_pool;

		/*
		 * Determine if this pool is currently active, in which case we
		 * can't actually import it.
		 */
		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
		    &name) == 0);
		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
		    &guid) == 0);

		if (pool_active(hdl, name, guid, &isactive) != 0)
			goto error;

		if (isactive) {
			nvlist_free(config);
			config = NULL;
			continue;
		}

		if (policy != NULL) {
			if (nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
			    policy) != 0)
				goto nomem;
		}

		if ((nvl = refresh_config(hdl, config)) == NULL) {
			nvlist_free(config);
			config = NULL;
			continue;
		}

		nvlist_free(config);
		config = nvl;

		/*
		 * Go through and update the paths for spares, now that we have
		 * them.
		 */
		verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
		    &nvroot) == 0);
		if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
		    &spares, &nspares) == 0) {
			for (i = 0; i < nspares; i++) {
				if (fix_paths(spares[i], pl->names) != 0)
					goto nomem;
			}
		}

		/*
		 * Update the paths for l2cache devices.
		 */
		if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
		    &l2cache, &nl2cache) == 0) {
			for (i = 0; i < nl2cache; i++) {
				if (fix_paths(l2cache[i], pl->names) != 0)
					goto nomem;
			}
		}

		/*
		 * Restore the original information read from the actual label.
		 */
		(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
		    DATA_TYPE_UINT64);
		(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
		    DATA_TYPE_STRING);
		if (hostid != 0) {
			verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
			    hostid) == 0);
			verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
			    hostname) == 0);
		}

add_pool:
		/*
		 * Add this pool to the list of configs.
		 */
		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
		    &name) == 0);
		if (nvlist_add_nvlist(ret, name, config) != 0)
			goto nomem;

		nvlist_free(config);
		config = NULL;
	}

	return (ret);

nomem:
	(void) no_memory(hdl);
error:
	nvlist_free(config);
	nvlist_free(ret);
	for (c = 0; c < children; c++)
		nvlist_free(child[c]);
	free(child);

	return (NULL);
}
Exemplo n.º 8
0
void
fnvlist_add_nvlist_array(nvlist_t *nvl, const char *name,
    nvlist_t **val, uint_t n)
{
	VERIFY0(nvlist_add_nvlist_array(nvl, name, val, n));
}
Exemplo n.º 9
0
/*
 * sync out AVL trees to persistent storage.
 */
void
zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
{
#ifdef HAVE_ZPL
	nvlist_t *nvp;
	nvlist_t **fuids;
	size_t nvsize = 0;
	char *packed;
	dmu_buf_t *db;
	fuid_domain_t *domnode;
	int numnodes;
	int i;

	if (!zfsvfs->z_fuid_dirty) {
		return;
	}

	rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);

	/*
	 * First see if table needs to be created?
	 */
	if (zfsvfs->z_fuid_obj == 0) {
		zfsvfs->z_fuid_obj = dmu_object_alloc(zfsvfs->z_os,
		    DMU_OT_FUID, 1 << 14, DMU_OT_FUID_SIZE,
		    sizeof (uint64_t), tx);
		VERIFY(zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
		    ZFS_FUID_TABLES, sizeof (uint64_t), 1,
		    &zfsvfs->z_fuid_obj, tx) == 0);
	}

	VERIFY(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);

	numnodes = avl_numnodes(&zfsvfs->z_fuid_idx);
	fuids = kmem_alloc(numnodes * sizeof (void *), KM_SLEEP);
	for (i = 0, domnode = avl_first(&zfsvfs->z_fuid_domain); domnode; i++,
	    domnode = AVL_NEXT(&zfsvfs->z_fuid_domain, domnode)) {
		VERIFY(nvlist_alloc(&fuids[i], NV_UNIQUE_NAME, KM_SLEEP) == 0);
		VERIFY(nvlist_add_uint64(fuids[i], FUID_IDX,
		    domnode->f_idx) == 0);
		VERIFY(nvlist_add_uint64(fuids[i], FUID_OFFSET, 0) == 0);
		VERIFY(nvlist_add_string(fuids[i], FUID_DOMAIN,
		    domnode->f_ksid->kd_name) == 0);
	}
	VERIFY(nvlist_add_nvlist_array(nvp, FUID_NVP_ARRAY,
	    fuids, numnodes) == 0);
	for (i = 0; i != numnodes; i++)
		nvlist_free(fuids[i]);
	kmem_free(fuids, numnodes * sizeof (void *));
	VERIFY(nvlist_size(nvp, &nvsize, NV_ENCODE_XDR) == 0);
	packed = kmem_alloc(nvsize, KM_SLEEP);
	VERIFY(nvlist_pack(nvp, &packed, &nvsize,
	    NV_ENCODE_XDR, KM_SLEEP) == 0);
	nvlist_free(nvp);
	zfsvfs->z_fuid_size = nvsize;
	dmu_write(zfsvfs->z_os, zfsvfs->z_fuid_obj, 0,
	    zfsvfs->z_fuid_size, packed, tx);
	kmem_free(packed, zfsvfs->z_fuid_size);
	VERIFY(0 == dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj,
	    FTAG, &db));
	dmu_buf_will_dirty(db, tx);
	*(uint64_t *)db->db_data = zfsvfs->z_fuid_size;
	dmu_buf_rele(db, FTAG);

	zfsvfs->z_fuid_dirty = B_FALSE;
	rw_exit(&zfsvfs->z_fuid_lock);
#endif /* HAVE_ZPL */
}
Exemplo n.º 10
0
int
smb_get_bb_fmri(smbios_hdl_t *shp, nvlist_t *fmri,  uint_t parent,
    smbs_cnt_t *bbstypes)
{
	int rc = 0;
	int i, j, n, cnt;
	int id, index;
	nvlist_t *pairs[MAX_PAIRS];
	smbios_bboard_t bb;
	uint16_t chassis_inst, mch_inst;
	char name[40];
	char idstr[11];
	bbindex_t bb_idx;
	uint16_t bbid;
	int chcnt = 0;

	for (n = 0; n < MAX_PAIRS; n++) {
		bb_idx.index[n] = 0;
		pairs[n] = NULL;
	}
	bb_idx.count = 0;

	get_bboard_index(bbstypes, parent, &bb_idx);

	index = bb_idx.index[0];
	bbid = bbstypes->ids[index]->id;

	rc = get_chassis_inst(shp, &chassis_inst, bbid, &chcnt);

	if (rc != 0) {
		return (rc);
	}

	if ((bb_idx.count + chcnt) > MAX_PAIRS) {
		return (-1);
	}

	i = 0;
	if (chcnt > 1) {
		/*
		 * create main chassis pair
		 */
		pairs[i] = fm_nvlist_create(NULL);
		if (pairs[i] == NULL) {
			return (-1);
		}
		mch_inst = 0;
		(void) snprintf(idstr, sizeof (idstr), "%u", mch_inst);
		if ((nvlist_add_string(pairs[i], FM_FMRI_HC_NAME,
		    "chassis") != 0) ||
		    (nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr)) != 0) {
			fm_nvlist_destroy(pairs[i], FM_NVA_FREE);
			return (-1);
		}
		i++;
	}

	/*
	 * create chassis pair
	 */
	pairs[i] = fm_nvlist_create(NULL);
	if (pairs[i] == NULL) {
		for (n = 0; n < MAX_PAIRS; n++) {
			if (pairs[n] != NULL)
				fm_nvlist_destroy(pairs[n], FM_NVA_FREE);
		}
		return (-1);
	}
	(void) snprintf(idstr, sizeof (idstr), "%u", chassis_inst);
	if ((nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, "chassis") != 0) ||
	    (nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0)) {
		for (n = 0; n < MAX_PAIRS; n++) {
			if (pairs[n] != NULL)
				fm_nvlist_destroy(pairs[n], FM_NVA_FREE);
		}
		return (-1);
	}

	for (j = 0, i = chcnt, cnt = chcnt; j < bb_idx.count; j++) {
		index = bb_idx.index[j];
		bbid = bbstypes->ids[index]->id;
		rc =  smbios_info_bboard(shp, bbid, &bb);
		if (rc != 0) {
			rc = -1;
			break;
		}

		pairs[i] = fm_nvlist_create(NULL);
		if (pairs[i] == NULL) {
			rc = -1;
			break;
		}

		id = bbstypes->ids[index]->inst;
		(void) snprintf(idstr, sizeof (idstr), "%u", id);
		(void) strncpy(name, bbd_type[bb.smbb_type].name,
		    sizeof (name));
		cnt++;

		if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 ||
		    nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr)
		    != 0) {
			rc = -1;
			break;
		}
		i++;
	}

	if (rc != -1) {
		if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST,
		    pairs, cnt) != 0) {
			rc = -1;
		}
	}

	for (n = 0; n < cnt; n++) {
		if (pairs[n] != NULL)
			fm_nvlist_destroy(pairs[n], FM_NVA_FREE);
	}

	return (rc);
}
Exemplo n.º 11
0
/*
 * The device associated with the given vdev (either by devid or physical path)
 * has been added to the system.  If 'isdisk' is set, then we only attempt a
 * replacement if it's a whole disk.  This also implies that we should label the
 * disk first.
 *
 * First, we attempt to online the device (making sure to undo any spare
 * operation when finished).  If this succeeds, then we're done.  If it fails,
 * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
 * but that the label was not what we expected.  If the 'autoreplace' property
 * is not set, then we relabel the disk (if specified), and attempt a 'zpool
 * replace'.  If the online is successful, but the new state is something else
 * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
 * race, and we should avoid attempting to relabel the disk.
 */
static void
zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t isdisk)
{
	char *path;
	vdev_state_t newstate;
	nvlist_t *nvroot, *newvd;
	uint64_t wholedisk = 0ULL;
	uint64_t offline = 0ULL;
	char *physpath = NULL;
	char rawpath[PATH_MAX], fullpath[PATH_MAX];
	size_t len;

	if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
		return;

	(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);

	/*
	 * We should have a way to online a device by guid.  With the current
	 * interface, we are forced to chop off the 's0' for whole disks.
	 */
	(void) strlcpy(fullpath, path, sizeof (fullpath));
	if (wholedisk)
		fullpath[strlen(fullpath) - 2] = '\0';

	/*
	 * Attempt to online the device.  It would be nice to online this by
	 * GUID, but the current interface only supports lookup by path.
	 */
	if (offline ||
	    (zpool_vdev_online(zhp, fullpath,
	    ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
	    (newstate == VDEV_STATE_HEALTHY ||
	    newstate == VDEV_STATE_DEGRADED)))
		return;

	/*
	 * If the pool doesn't have the autoreplace property set, then attempt a
	 * true online (without the unspare flag), which will trigger a FMA
	 * fault.
	 */
	if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
	    (isdisk && !wholedisk)) {
		(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
		    &newstate);
		return;
	}

	if (isdisk) {
		/*
		 * If this is a request to label a whole disk, then attempt to
		 * write out the label.  Before we can label the disk, we need
		 * access to a raw node.  Ideally, we'd like to walk the devinfo
		 * tree and find a raw node from the corresponding parent node.
		 * This is overly complicated, and since we know how we labeled
		 * this device in the first place, we know it's save to switch
		 * from /dev/dsk to /dev/rdsk and append the backup slice.
		 *
		 * If any part of this process fails, then do a force online to
		 * trigger a ZFS fault for the device (and any hot spare
		 * replacement).
		 */
		if (strncmp(path, "/dev/dsk/", 9) != 0) {
			(void) zpool_vdev_online(zhp, fullpath,
			    ZFS_ONLINE_FORCEFAULT, &newstate);
			return;
		}

		(void) strlcpy(rawpath, path + 9, sizeof (rawpath));
		len = strlen(rawpath);
		rawpath[len - 2] = '\0';

		if (zpool_label_disk(g_zfshdl, zhp, rawpath) != 0) {
			(void) zpool_vdev_online(zhp, fullpath,
			    ZFS_ONLINE_FORCEFAULT, &newstate);
			return;
		}
	}

	/*
	 * Cosntruct the root vdev to pass to zpool_vdev_attach().  While adding
	 * the entire vdev structure is harmless, we construct a reduced set of
	 * path/physpath/wholedisk to keep it simple.
	 */
	if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
		return;

	if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
		nvlist_free(nvroot);
		return;
	}

	if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
	    nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
	    (physpath != NULL && nvlist_add_string(newvd,
	    ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
	    nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
	    nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
	    nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &newvd,
	    1) != 0) {
		nvlist_free(newvd);
		nvlist_free(nvroot);
		return;
	}

	nvlist_free(newvd);

	(void) zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE);

	nvlist_free(nvroot);

}
Exemplo n.º 12
0
Arquivo: zfs_mod.c Projeto: pyavdr/zfs
/*
 * The device associated with the given vdev (either by devid or physical path)
 * has been added to the system.  If 'isdisk' is set, then we only attempt a
 * replacement if it's a whole disk.  This also implies that we should label the
 * disk first.
 *
 * First, we attempt to online the device (making sure to undo any spare
 * operation when finished).  If this succeeds, then we're done.  If it fails,
 * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
 * but that the label was not what we expected.  If the 'autoreplace' property
 * is not set, then we relabel the disk (if specified), and attempt a 'zpool
 * replace'.  If the online is successful, but the new state is something else
 * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
 * race, and we should avoid attempting to relabel the disk.
 *
 * Also can arrive here from a ESC_ZFS_VDEV_CHECK event
 */
static void
zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
{
    char *path;
    vdev_state_t newstate;
    nvlist_t *nvroot, *newvd;
    pendingdev_t *device;
    uint64_t wholedisk = 0ULL;
    uint64_t offline = 0ULL;
    uint64_t guid = 0ULL;
    char *physpath = NULL, *new_devid = NULL;
    char rawpath[PATH_MAX], fullpath[PATH_MAX];
    char devpath[PATH_MAX];
    int ret;

    if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
        return;

    (void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
    (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
    (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
    (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_GUID, &guid);

    if (offline)
        return;  /* don't intervene if it was taken offline */

    zed_log_msg(LOG_INFO, "zfs_process_add: pool '%s' vdev '%s' (%llu)",
                zpool_get_name(zhp), path, (long long unsigned int)guid);

    /*
     * The VDEV guid is preferred for identification (gets passed in path)
     */
    if (guid != 0) {
        (void) snprintf(fullpath, sizeof (fullpath), "%llu",
                        (long long unsigned int)guid);
    } else {
        /*
         * otherwise use path sans partition suffix for whole disks
         */
        (void) strlcpy(fullpath, path, sizeof (fullpath));
        if (wholedisk) {
            char *spath = zfs_strip_partition(g_zfshdl, fullpath);

            (void) strlcpy(fullpath, spath, sizeof (fullpath));
            free(spath);
        }
    }

    /*
     * Attempt to online the device.
     */
    if (zpool_vdev_online(zhp, fullpath,
                          ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
            (newstate == VDEV_STATE_HEALTHY ||
             newstate == VDEV_STATE_DEGRADED)) {
        zed_log_msg(LOG_INFO, "  zpool_vdev_online: vdev %s is %s",
                    fullpath, (newstate == VDEV_STATE_HEALTHY) ?
                    "HEALTHY" : "DEGRADED");
        return;
    }

    /*
     * If the pool doesn't have the autoreplace property set, then attempt
     * a true online (without the unspare flag), which will trigger a FMA
     * fault.
     */
    if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
            !wholedisk || physpath == NULL) {
        (void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
                                 &newstate);
        zed_log_msg(LOG_INFO, "  zpool_vdev_online: %s FORCEFAULT (%s)",
                    fullpath, libzfs_error_description(g_zfshdl));
        return;
    }

    /*
     * convert physical path into its current device node
     */
    (void) snprintf(rawpath, sizeof (rawpath), "%s%s", DEV_BYPATH_PATH,
                    physpath);
    if (realpath(rawpath, devpath) == NULL) {
        zed_log_msg(LOG_INFO, "  realpath: %s failed (%s)",
                    rawpath, strerror(errno));

        (void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
                                 &newstate);

        zed_log_msg(LOG_INFO, "  zpool_vdev_online: %s FORCEFAULT (%s)",
                    fullpath, libzfs_error_description(g_zfshdl));
        return;
    }

    /*
     * we're auto-replacing a raw disk, so label it first
     */
    if (!labeled) {
        char *leafname;

        /*
         * If this is a request to label a whole disk, then attempt to
         * write out the label.  Before we can label the disk, we need
         * to map the physical string that was matched on to the under
         * lying device node.
         *
         * If any part of this process fails, then do a force online
         * to trigger a ZFS fault for the device (and any hot spare
         * replacement).
         */
        leafname = strrchr(devpath, '/') + 1;

        /*
         * If this is a request to label a whole disk, then attempt to
         * write out the label.
         */
        if (zpool_label_disk(g_zfshdl, zhp, leafname) != 0) {
            zed_log_msg(LOG_INFO, "  zpool_label_disk: could not "
                        "label '%s' (%s)", leafname,
                        libzfs_error_description(g_zfshdl));

            (void) zpool_vdev_online(zhp, fullpath,
                                     ZFS_ONLINE_FORCEFAULT, &newstate);
            return;
        }

        /*
         * The disk labeling is asynchronous on Linux. Just record
         * this label request and return as there will be another
         * disk add event for the partition after the labeling is
         * completed.
         */
        device = malloc(sizeof (pendingdev_t));
        (void) strlcpy(device->pd_physpath, physpath,
                       sizeof (device->pd_physpath));
        list_insert_tail(&g_device_list, device);

        zed_log_msg(LOG_INFO, "  zpool_label_disk: async '%s' (%llu)",
                    leafname, (long long unsigned int)guid);

        return;	/* resumes at EC_DEV_ADD.ESC_DISK for partition */

    } else { /* labeled */
        boolean_t found = B_FALSE;
        /*
         * match up with request above to label the disk
         */
        for (device = list_head(&g_device_list); device != NULL;
                device = list_next(&g_device_list, device)) {
            if (strcmp(physpath, device->pd_physpath) == 0) {
                list_remove(&g_device_list, device);
                free(device);
                found = B_TRUE;
                break;
            }
        }
        if (!found) {
            /* unexpected partition slice encountered */
            (void) zpool_vdev_online(zhp, fullpath,
                                     ZFS_ONLINE_FORCEFAULT, &newstate);
            return;
        }

        zed_log_msg(LOG_INFO, "  zpool_label_disk: resume '%s' (%llu)",
                    physpath, (long long unsigned int)guid);

        if (nvlist_lookup_string(vdev, "new_devid", &new_devid) != 0) {
            zed_log_msg(LOG_INFO, "  auto replace: missing devid!");
            return;
        }

        (void) snprintf(devpath, sizeof (devpath), "%s%s",
                        DEV_BYID_PATH, new_devid);
        path = devpath;
    }

    /*
     * Construct the root vdev to pass to zpool_vdev_attach().  While adding
     * the entire vdev structure is harmless, we construct a reduced set of
     * path/physpath/wholedisk to keep it simple.
     */
    if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) {
        zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
        return;
    }
    if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
        zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
        nvlist_free(nvroot);
        return;
    }

    if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
            nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
            nvlist_add_string(newvd, ZPOOL_CONFIG_DEVID, new_devid) != 0 ||
            (physpath != NULL && nvlist_add_string(newvd,
                    ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
            nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
            nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
            nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &newvd,
                                    1) != 0) {
        zed_log_msg(LOG_WARNING, "zfs_mod: unable to add nvlist pairs");
        nvlist_free(newvd);
        nvlist_free(nvroot);
        return;
    }

    nvlist_free(newvd);

    /*
     * auto replace a leaf disk at same physical location
     */
    ret = zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE);

    zed_log_msg(LOG_INFO, "  zpool_vdev_replace: %s with %s (%s)",
                fullpath, path, (ret == 0) ? "no errors" :
                libzfs_error_description(g_zfshdl));

    nvlist_free(nvroot);
}
Exemplo n.º 13
0
static int
topo_prop_set(tnode_t *node, const char *pgname, const char *pname,
    topo_type_t type, int flag, void *val, int nelems, int *err)
{
	int ret;
	topo_hdl_t *thp = node->tn_hdl;
	nvlist_t *nvl;

	if (topo_hdl_nvalloc(thp, &nvl, NV_UNIQUE_NAME) < 0) {
		*err = ETOPO_PROP_NVL;
		return (-1);
	}

	ret = nvlist_add_string(nvl, TOPO_PROP_VAL_NAME, pname);
	ret |= nvlist_add_uint32(nvl, TOPO_PROP_VAL_TYPE, type);
	switch (type) {
		case TOPO_TYPE_INT32:
			ret |= nvlist_add_int32(nvl, TOPO_PROP_VAL_VAL,
			    *(int32_t *)val);
			break;
		case TOPO_TYPE_UINT32:
			ret |= nvlist_add_uint32(nvl, TOPO_PROP_VAL_VAL,
			    *(uint32_t *)val);
			break;
		case TOPO_TYPE_INT64:
			ret |= nvlist_add_int64(nvl, TOPO_PROP_VAL_VAL,
			    *(int64_t *)val);
			break;
		case TOPO_TYPE_UINT64:
			ret |= nvlist_add_uint64(nvl, TOPO_PROP_VAL_VAL,
			    *(uint64_t *)val);
			break;
		case TOPO_TYPE_DOUBLE:
			ret |= nvlist_add_double(nvl, TOPO_PROP_VAL_VAL,
			    *(double *)val);
			break;
		case TOPO_TYPE_STRING:
			ret |= nvlist_add_string(nvl, TOPO_PROP_VAL_VAL,
			    (char *)val);
			break;
		case TOPO_TYPE_FMRI:
			ret |= nvlist_add_nvlist(nvl, TOPO_PROP_VAL_VAL,
			    (nvlist_t *)val);
			break;
		case TOPO_TYPE_INT32_ARRAY:
			ret |= nvlist_add_int32_array(nvl,
			    TOPO_PROP_VAL_VAL, (int32_t *)val, nelems);
			break;
		case TOPO_TYPE_UINT32_ARRAY:
			ret |= nvlist_add_uint32_array(nvl,
			    TOPO_PROP_VAL_VAL, (uint32_t *)val, nelems);
			break;
		case TOPO_TYPE_INT64_ARRAY:
			ret |= nvlist_add_int64_array(nvl,
			    TOPO_PROP_VAL_VAL, (int64_t *)val, nelems);
			break;
		case TOPO_TYPE_UINT64_ARRAY:
			ret |= nvlist_add_uint64_array(nvl,
			    TOPO_PROP_VAL_VAL, (uint64_t *)val, nelems);
			break;
		case TOPO_TYPE_STRING_ARRAY:
			ret |= nvlist_add_string_array(nvl,
			    TOPO_PROP_VAL_VAL, (char **)val, nelems);
			break;
		case TOPO_TYPE_FMRI_ARRAY:
			ret |= nvlist_add_nvlist_array(nvl,
			    TOPO_PROP_VAL_VAL, (nvlist_t **)val, nelems);
			break;
		default:
			*err = ETOPO_PROP_TYPE;
			return (-1);
	}

	if (ret != 0) {
		nvlist_free(nvl);
		if (ret == ENOMEM) {
			*err = ETOPO_PROP_NOMEM;
			return (-1);
		} else {
			*err = ETOPO_PROP_NVL;
			return (-1);
		}
	}

	if (topo_prop_setprop(node, pgname, nvl, flag, nvl, err) != 0) {
		nvlist_free(nvl);
		return (-1); /* err set */
	}
	nvlist_free(nvl);
	return (ret);
}
Exemplo n.º 14
0
nvlist_t *
cmd_mkboard_fru(fmd_hdl_t *hdl, char *frustr, char *serialstr, char *partstr) {

	char *nac, *nac_name;
	int n, i, len;
	nvlist_t *fru, **hc_list;

	if (frustr == NULL)
		return (NULL);

	if ((nac_name = strstr(frustr, "MB")) == NULL)
		return (NULL);

	len = strlen(nac_name) + 1;

	nac = fmd_hdl_zalloc(hdl, len, FMD_SLEEP);
	(void) strcpy(nac, nac_name);

	n = cmd_count_components(nac, '/');

	fmd_hdl_debug(hdl, "cmd_mkboard_fru: nac=%s components=%d\n", nac, n);

	hc_list = fmd_hdl_zalloc(hdl, sizeof (nvlist_t *)*n, FMD_SLEEP);

	for (i = 0; i < n; i++) {
		(void) nvlist_alloc(&hc_list[i],
		    NV_UNIQUE_NAME|NV_UNIQUE_NAME_TYPE, 0);
	}

	if (cmd_breakup_components(nac, "/", hc_list) < 0) {
		for (i = 0; i < n; i++) {
			if (hc_list[i] != NULL)
			    nvlist_free(hc_list[i]);
		}
		fmd_hdl_free(hdl, hc_list, sizeof (nvlist_t *)*n);
		fmd_hdl_free(hdl, nac, len);
		return (NULL);
	}

	if (nvlist_alloc(&fru, NV_UNIQUE_NAME, 0) != 0) {
		for (i = 0; i < n; i++) {
			if (hc_list[i] != NULL)
			    nvlist_free(hc_list[i]);
		}
		fmd_hdl_free(hdl, hc_list, sizeof (nvlist_t *)*n);
		fmd_hdl_free(hdl, nac, len);
		return (NULL);
	}

	if (nvlist_add_uint8(fru, FM_VERSION, FM_HC_SCHEME_VERSION) != 0 ||
	    nvlist_add_string(fru, FM_FMRI_SCHEME, FM_FMRI_SCHEME_HC) != 0 ||
	    nvlist_add_string(fru, FM_FMRI_HC_ROOT, "") != 0 ||
	    nvlist_add_uint32(fru, FM_FMRI_HC_LIST_SZ, n) != 0 ||
	    nvlist_add_nvlist_array(fru, FM_FMRI_HC_LIST, hc_list, n) != 0) {
		for (i = 0; i < n; i++) {
			if (hc_list[i] != NULL)
			    nvlist_free(hc_list[i]);
		}
		fmd_hdl_free(hdl, hc_list, sizeof (nvlist_t *)*n);
		fmd_hdl_free(hdl, nac, len);
		nvlist_free(fru);
		return (NULL);
	}

	for (i = 0; i < n; i++) {
		if (hc_list[i] != NULL)
		    nvlist_free(hc_list[i]);
	}
	fmd_hdl_free(hdl, hc_list, sizeof (nvlist_t *)*n);
	fmd_hdl_free(hdl, nac, len);

	if ((serialstr != NULL &&
	    nvlist_add_string(fru, FM_FMRI_HC_SERIAL_ID, serialstr) != 0) ||
	    (partstr != NULL &&
	    nvlist_add_string(fru, FM_FMRI_HC_PART, partstr) != 0)) {
		nvlist_free(fru);
		return (NULL);
	}

	return (fru);
}
Exemplo n.º 15
0
int
main(int argc, char *argv[])
{
	fmd_msg_hdl_t *h;
	pid_t pid;
	int i, err = 0;
	char *s;

	nvlist_t *auth, *fmri, *list, *test_arr[TEST_ARR_SZ];
	const char *code = "TEST-8000-08";
	int64_t tod[] = { 0x9400000, 0 };

	if (argc > 1) {
		(void) fprintf(stderr, "Usage: %s\n", argv[0]);
		return (2);
	}

	/*
	 * Build up a valid list.suspect event for a fictional diagnosis
	 * using a diagnosis code from our test dictionary so we can format
	 * messages.
	 */
	if (nvlist_alloc(&auth, NV_UNIQUE_NAME, 0) != 0 ||
	    nvlist_alloc(&fmri, NV_UNIQUE_NAME, 0) != 0 ||
	    nvlist_alloc(&list, NV_UNIQUE_NAME, 0) != 0) {
		(void) fprintf(stderr, "%s: nvlist_alloc failed\n", argv[0]);
		return (1);
	}

	err |= nvlist_add_uint8(auth, FM_VERSION, FM_FMRI_AUTH_VERSION);
	err |= nvlist_add_string(auth, FM_FMRI_AUTH_PRODUCT, "product");
	err |= nvlist_add_string(auth, FM_FMRI_AUTH_PRODUCT_SN, "product_sn");
	err |= nvlist_add_string(auth, FM_FMRI_AUTH_CHASSIS, "chassis");
	err |= nvlist_add_string(auth, FM_FMRI_AUTH_DOMAIN, "domain");
	err |= nvlist_add_string(auth, FM_FMRI_AUTH_SERVER, "server");

	if (err != 0) {
		(void) fprintf(stderr, "%s: failed to build auth nvlist: %s\n",
		    argv[0], strerror(err));
		return (1);
	}

	err |= nvlist_add_uint8(fmri, FM_VERSION, FM_FMD_SCHEME_VERSION);
	err |= nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_FMD);
	err |= nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY, auth);
	err |= nvlist_add_string(fmri, FM_FMRI_FMD_NAME, "fmd_msg_test");
	err |= nvlist_add_string(fmri, FM_FMRI_FMD_VERSION, "1.0");

	if (err != 0) {
		(void) fprintf(stderr, "%s: failed to build fmri nvlist: %s\n",
		    argv[0], strerror(err));
		return (1);
	}

	err |= nvlist_add_uint8(list, FM_VERSION, FM_SUSPECT_VERSION);
	err |= nvlist_add_string(list, FM_CLASS, FM_LIST_SUSPECT_CLASS);
	err |= nvlist_add_string(list, FM_SUSPECT_UUID, "12345678");
	err |= nvlist_add_string(list, FM_SUSPECT_DIAG_CODE, code);
	err |= nvlist_add_int64_array(list, FM_SUSPECT_DIAG_TIME, tod, 2);
	err |= nvlist_add_nvlist(list, FM_SUSPECT_DE, fmri);
	err |= nvlist_add_uint32(list, FM_SUSPECT_FAULT_SZ, 0);

	/*
	 * Add a contrived nvlist array to our list.suspect so that we can
	 * exercise the expansion syntax for dereferencing nvlist array members
	 */
	for (i = 0; i < TEST_ARR_SZ; i++) {
		if (nvlist_alloc(&test_arr[i], NV_UNIQUE_NAME, 0) != 0) {
			(void) fprintf(stderr, "%s: failed to alloc nvlist "
			    "array: %s\n", argv[0], strerror(err));
			return (1);
		}
		err |= nvlist_add_uint8(test_arr[i], "index", i);
	}
	err |= nvlist_add_nvlist_array(list, "test_arr", test_arr, TEST_ARR_SZ);

	if (err != 0) {
		(void) fprintf(stderr, "%s: failed to build list nvlist: %s\n",
		    argv[0], strerror(err));
		return (1);
	}

	/*
	 * Now initialize the libfmd_msg library for testing, using the message
	 * catalogs found in the proto area of the current workspace.
	 */
	if ((h = fmd_msg_init(getenv("ROOT"), FMD_MSG_VERSION)) == NULL) {
		(void) fprintf(stderr, "%s: fmd_msg_init failed: %s\n",
		    argv[0], strerror(errno));
		return (1);
	}

	/*
	 * Test 0: Verify that both fmd_msg_getitem_id and fmd_msg_gettext_id
	 * return NULL and EINVAL for an illegal message code, and NULL
	 * and ENOENT for a valid but not defined message code.
	 */
	s = fmd_msg_getitem_id(h, NULL, "I_AM_NOT_VALID", 0);
	if (s != NULL || errno != EINVAL) {
		(void) fprintf(stderr, "%s: test0 FAIL: illegal code returned "
		    "s = %p, errno = %d\n", argv[0], (void *)s, errno);
		return (1);
	}

	s = fmd_msg_gettext_id(h, NULL, "I_AM_NOT_VALID");
	if (s != NULL || errno != EINVAL) {
		(void) fprintf(stderr, "%s: test0 FAIL: illegal code returned "
		    "s = %p, errno = %d\n", argv[0], (void *)s, errno);
		return (1);
	}

	s = fmd_msg_getitem_id(h, NULL, "I_AM_NOT_HERE-0000-0000", 0);
	if (s != NULL || errno != ENOENT) {
		(void) fprintf(stderr, "%s: test0 FAIL: missing code returned "
		    "s = %p, errno = %d\n", argv[0], (void *)s, errno);
		return (1);
	}

	s = fmd_msg_gettext_id(h, NULL, "I_AM_NOT_HERE-0000-0000");
	if (s != NULL || errno != ENOENT) {
		(void) fprintf(stderr, "%s: test0 FAIL: missing code returned "
		    "s = %p, errno = %d\n", argv[0], (void *)s, errno);
		return (1);
	}

	/*
	 * Test 1: Use fmd_msg_getitem_id to retrieve the item strings for
	 * a known message code without having any actual event handle.
	 */
	for (i = 0; i < FMD_MSG_ITEM_MAX; i++) {
		if ((s = fmd_msg_getitem_id(h, NULL, code, i)) == NULL) {
			(void) fprintf(stderr, "%s: fmd_msg_getitem_id failed "
			    "for %s, item %d: %s\n",
			    argv[0], code, i, strerror(errno));
		}

		(void) printf("code %s item %d = <<%s>>\n", code, i, s);
		free(s);
	}

	/*
	 * Test 2: Use fmd_msg_gettext_id to retrieve the complete message for
	 * a known message code without having any actual event handle.
	 */
	if ((s = fmd_msg_gettext_id(h, NULL, code)) == NULL) {
		(void) fprintf(stderr, "%s: fmd_msg_gettext_id failed for %s: "
		    "%s\n", argv[0], code, strerror(errno));
		return (1);
	}

	(void) printf("%s\n", s);
	free(s);

	/*
	 * Test 3: Use fmd_msg_getitem_nv to retrieve the item strings for
	 * our list.suspect event handle.
	 */
	for (i = 0; i < FMD_MSG_ITEM_MAX; i++) {
		if ((s = fmd_msg_getitem_nv(h, NULL, list, i)) == NULL) {
			(void) fprintf(stderr, "%s: fmd_msg_getitem_nv failed "
			    "for %s, item %d: %s\n",
			    argv[0], code, i, strerror(errno));
		}

		(void) printf("code %s item %d = <<%s>>\n", code, i, s);
		free(s);
	}

	/*
	 * Test 4: Use fmd_msg_getitem_nv to retrieve the complete message for
	 * a known message code using our list.suspect event handle.
	 */
	if ((s = fmd_msg_gettext_nv(h, NULL, list)) == NULL) {
		(void) fprintf(stderr, "%s: fmd_msg_gettext_nv failed for %s: "
		    "%s\n", argv[0], code, strerror(errno));
		return (1);
	}

	(void) printf("%s\n", s);
	free(s);

	/*
	 * Test 5: Use fmd_msg_getitem_nv to retrieve the complete message for
	 * a known message code using our list.suspect event handle, but this
	 * time set the URL to our own customized URL.  Our contrived message
	 * has been designed to exercise the key aspects of the variable
	 * expansion syntax.
	 */
	if (fmd_msg_url_set(h, "http://foo.bar.com/") != 0) {
		(void) fprintf(stderr, "%s: fmd_msg_url_set failed: %s\n",
		    argv[0], strerror(errno));
	}

	if ((s = fmd_msg_gettext_nv(h, NULL, list)) == NULL) {
		(void) fprintf(stderr, "%s: fmd_msg_gettext_nv failed for %s: "
		    "%s\n", argv[0], code, strerror(errno));
		return (1);
	}

	(void) printf("%s\n", s);
	free(s);

	for (i = 0; i < TEST_ARR_SZ; i++)
		nvlist_free(test_arr[i]);
	nvlist_free(fmri);
	nvlist_free(auth);
	nvlist_free(list);

	fmd_msg_fini(h);	/* free library state before dumping core */
	pid = fork();		/* fork into background to not bother make(1) */

	switch (pid) {
	case -1:
		(void) fprintf(stderr, "FAIL (failed to fork)\n");
		return (1);
	case 0:
		abort();
		return (1);
	}

	if (waitpid(pid, &err, 0) == -1) {
		(void) fprintf(stderr, "FAIL (failed to wait for %d: %s)\n",
		    (int)pid, strerror(errno));
		return (1);
	}

	if (WIFSIGNALED(err) == 0 || WTERMSIG(err) != SIGABRT) {
		(void) fprintf(stderr, "FAIL (child did not SIGABRT)\n");
		return (1);
	}

	if (!WCOREDUMP(err)) {
		(void) fprintf(stderr, "FAIL (no core generated)\n");
		return (1);
	}

	(void) fprintf(stderr, "done\n");
	return (0);
}
Exemplo n.º 16
0
/**
 * Create the root of the vdev tree according to the parameters (type and vdev)
 * @param psz_type: type of zpool (""=raid0, "mirror or "raidz")
 * @param ppsz_dev: the list of devices
 * @param i_dev: the number of devices
 * @param ppsz_error: return the error message if any
 * @return the root vded or NULL in case of error
 */
nvlist_t *lzwu_make_root_vdev(const char *psz_type, const char **ppsz_dev, size_t i_dev, const char **ppsz_error)
{
        nvlist_t *pnv_root, **ppnv_top;
        int i_mindev, i_maxdev, i_top = 0;
        size_t i;

        /* Check the type and the required number of devices */
        if(!strcmp(psz_type, "raidz", 5))
        {
                int i_parity;
                const char *psz_parity = psz_type + 5;
                if(*psz_parity == '\0')
                        i_parity = 1;
                else if(*psz_parity == '0')
                {
                        *ppsz_error = "raidz0 does not exist";
                        return NULL;
                }
                else
                {
                        const char *psz_end;
                        i_parity = strtol(psz_parity, &psz_end, 10);
                        if(i_parity < 1 || i_parity > 255 || *psz_end != '\0')
                        {
                                *ppsz_error = "raidz only accept values in [1, 255]";
                                return NULL;
                        }
                }
                i_mindev = i_parity + 1;
                i_maxdev = 255;
                psz_type = "raidz";
        }
        else if(!strcmp(psz_type, "mirror"))
        {
                i_mindev = 2;
                i_maxdev = INT_MAX;
        }
        else if(psz_type[0] == '\0')
        {
                i_mindev = 1;
                i_maxdev = INT_MAX;
        }
        else
        {
                *ppsz_error = "unknown zpool type: only '', 'mirror' and 'raidz' are handled";
                return NULL;
        }

        /* Check the number of devices */
        if(i_dev < i_mindev || i_dev > i_maxdev)
        {
                *ppsz_error = i_dev < i_mindev ? "too few devices" :
                                                 "too much devices";
                return NULL;
        }

        if(psz_type[0] == '\0')
        {
                ppnv_top = malloc(i_dev * sizeof(nvlist_t*));
                i_top = i_dev;
                for(i = 0; i < i_dev; i++)
                {
                        ppnv_top[i] = lzwu_make_leaf_vdev(ppsz_dev[i]);
                        if(!ppnv_top[i])
                        {
                                size_t j;
                                for(j = 0; j < i; j++)
                                        nvlist_free(ppnv_top[j]);
                                free(ppnv_top);

                                *ppsz_error = "unable to create the vdev array";
                                return NULL;
                        }
                }
        }
        else
        {
                /* List all the devices */
                nvlist_t **pp_children = malloc(i_dev * sizeof(nvlist_t *));
                for(i = 0; i < i_dev; i++)
                {
                        pp_children[i] = lzwu_make_leaf_vdev(ppsz_dev[i]);
                        if(!pp_children[i])
                        {
                                size_t j;
                                for(j = 0; j < i; j++)
                                        nvlist_free(pp_children[j]);
                                *ppsz_error = "unable to create the vdev array";
                                return NULL;
                        }
                }

                // Build the list of devices
                nvlist_t *pnv_fs;
                assert(nvlist_alloc(&pnv_fs, NV_UNIQUE_NAME, 0) == 0);
                assert(nvlist_add_string(pnv_fs, ZPOOL_CONFIG_TYPE, psz_type) == 0);
                assert(nvlist_add_nvlist_array(pnv_fs, ZPOOL_CONFIG_CHILDREN, pp_children, i_dev) == 0);
                if(!strncmp(psz_type, "raidz", 5))
                        nvlist_add_uint64(pnv_fs, ZPOOL_CONFIG_NPARITY, i_mindev - 1);

                i_top = 1;
                ppnv_top = malloc(sizeof(nvlist_t*));
                ppnv_top[0] = pnv_fs;

                for(i = 0; i < i_dev; i++)
                        nvlist_free(pp_children[i]);
                free(pp_children);
        }

        /* Create the root tree */
        assert(nvlist_alloc(&pnv_root, NV_UNIQUE_NAME, 0) == 0);
        assert(nvlist_add_string(pnv_root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0);
        assert(nvlist_add_nvlist_array(pnv_root, ZPOOL_CONFIG_CHILDREN, ppnv_top, i_top) == 0);

        for(i = 0; i < i_top; i++)
                nvlist_free(ppnv_top[i]);
        free(ppnv_top);

        return pnv_root;
}