/* ARGSUSED */ int fab_prep_basic_erpt(fmd_hdl_t *hdl, nvlist_t *nvl, nvlist_t *erpt, boolean_t isRC) { uint64_t *now; uint64_t ena; uint_t nelem; nvlist_t *detector, *new_detector; char rcpath[255]; int err = 0; /* Grab the tod, ena and detector(FMRI) */ err |= nvlist_lookup_uint64_array(nvl, "__tod", &now, &nelem); err |= nvlist_lookup_uint64(nvl, "ena", &ena); err |= nvlist_lookup_nvlist(nvl, FM_EREPORT_DETECTOR, &detector); if (err) return (err); /* Make a copy of the detector */ err = nvlist_dup(detector, &new_detector, NV_UNIQUE_NAME); if (err) return (err); /* Copy the tod and ena to erpt */ (void) nvlist_add_uint64(erpt, FM_EREPORT_ENA, ena); (void) nvlist_add_uint64_array(erpt, "__tod", now, nelem); /* * Create the correct ROOT FMRI from PCIe leaf fabric ereports. Used * only by fab_prep_fake_rc_erpt. See the fab_pciex_fake_rc_erpt_tbl * comments for more information. */ if (isRC && fab_get_rcpath(hdl, nvl, rcpath)) { /* Create the correct PCIe RC new_detector aka FMRI */ (void) nvlist_remove(new_detector, FM_FMRI_DEV_PATH, DATA_TYPE_STRING); (void) nvlist_add_string(new_detector, FM_FMRI_DEV_PATH, rcpath); } /* Copy the FMRI to erpt */ (void) nvlist_add_nvlist(erpt, FM_EREPORT_DETECTOR, new_detector); nvlist_free(new_detector); return (err); }
/* * Set the named uint64 array from the given nvlist_t. * * @param attrs * the nvlist_t to search * * @param which * the string key for this element in the list * * @param val * the value of the requested uint64 array * * @param nelem * the number of elements in the array * * @return 0 * if successful * * @return EINVAL * if there is an invalid argument * * @return ENOMEM * if there is insufficient memory */ int set_uint64_array( nvlist_t *attrs, char *which, uint64_t *val, uint_t nelem) { int error = 0; if ((error = nvlist_add_uint64_array( attrs, which, val, nelem)) != 0) { volume_set_error( gettext("nvlist_add_uint64_array(%s) failed: %d.\n"), which, error); } return (error); }
static void zfs_ioctl_compat_fix_stats_nvlist(nvlist_t *nvl) { nvlist_t **child; nvlist_t *nvroot = NULL; vdev_stat_t *vs; uint_t c, children, nelem; if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child, &children) == 0) { for (c = 0; c < children; c++) { zfs_ioctl_compat_fix_stats_nvlist(child[c]); } } if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0) zfs_ioctl_compat_fix_stats_nvlist(nvroot); #ifdef _KERNEL if ((nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_VDEV_STATS, #else if ((nvlist_lookup_uint64_array(nvl, "stats", #endif (uint64_t **)&vs, &nelem) == 0)) { nvlist_add_uint64_array(nvl, #ifdef _KERNEL "stats", #else ZPOOL_CONFIG_VDEV_STATS, #endif (uint64_t *)vs, nelem); #ifdef _KERNEL nvlist_remove(nvl, ZPOOL_CONFIG_VDEV_STATS, #else nvlist_remove(nvl, "stats", #endif DATA_TYPE_UINT64_ARRAY); }
/* * Convert our list of pools into the definitive set of configurations. We * start by picking the best config for each toplevel vdev. Once that's done, * we assemble the toplevel vdevs into a full config for the pool. We make a * pass to fix up any incorrect paths, and then add it to the main list to * return to the user. */ static nvlist_t * get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok, nvlist_t *policy) { pool_entry_t *pe; vdev_entry_t *ve; config_entry_t *ce; nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot; nvlist_t **spares, **l2cache; uint_t i, nspares, nl2cache; boolean_t config_seen; uint64_t best_txg; char *name, *hostname = NULL; uint64_t guid; uint_t children = 0; nvlist_t **child = NULL; uint_t holes; uint64_t *hole_array, max_id; uint_t c; boolean_t isactive; uint64_t hostid; nvlist_t *nvl; boolean_t valid_top_config = B_FALSE; if (nvlist_alloc(&ret, 0, 0) != 0) goto nomem; for (pe = pl->pools; pe != NULL; pe = pe->pe_next) { uint64_t id, max_txg = 0; if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0) goto nomem; config_seen = B_FALSE; /* * Iterate over all toplevel vdevs. Grab the pool configuration * from the first one we find, and then go through the rest and * add them as necessary to the 'vdevs' member of the config. */ for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) { /* * Determine the best configuration for this vdev by * selecting the config with the latest transaction * group. */ best_txg = 0; for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) { if (ce->ce_txg > best_txg) { tmp = ce->ce_config; best_txg = ce->ce_txg; } } /* * We rely on the fact that the max txg for the * pool will contain the most up-to-date information * about the valid top-levels in the vdev namespace. */ if (best_txg > max_txg) { (void) nvlist_remove(config, ZPOOL_CONFIG_VDEV_CHILDREN, DATA_TYPE_UINT64); (void) nvlist_remove(config, ZPOOL_CONFIG_HOLE_ARRAY, DATA_TYPE_UINT64_ARRAY); max_txg = best_txg; hole_array = NULL; holes = 0; max_id = 0; valid_top_config = B_FALSE; if (nvlist_lookup_uint64(tmp, ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) { verify(nvlist_add_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN, max_id) == 0); valid_top_config = B_TRUE; } if (nvlist_lookup_uint64_array(tmp, ZPOOL_CONFIG_HOLE_ARRAY, &hole_array, &holes) == 0) { verify(nvlist_add_uint64_array(config, ZPOOL_CONFIG_HOLE_ARRAY, hole_array, holes) == 0); } } if (!config_seen) { /* * Copy the relevant pieces of data to the pool * configuration: * * version * pool guid * name * pool txg (if available) * comment (if available) * pool state * hostid (if available) * hostname (if available) */ uint64_t state, version, pool_txg; char *comment = NULL; version = fnvlist_lookup_uint64(tmp, ZPOOL_CONFIG_VERSION); fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, version); guid = fnvlist_lookup_uint64(tmp, ZPOOL_CONFIG_POOL_GUID); fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, guid); name = fnvlist_lookup_string(tmp, ZPOOL_CONFIG_POOL_NAME); fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, name); if (nvlist_lookup_uint64(tmp, ZPOOL_CONFIG_POOL_TXG, &pool_txg) == 0) fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, pool_txg); if (nvlist_lookup_string(tmp, ZPOOL_CONFIG_COMMENT, &comment) == 0) fnvlist_add_string(config, ZPOOL_CONFIG_COMMENT, comment); state = fnvlist_lookup_uint64(tmp, ZPOOL_CONFIG_POOL_STATE); fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, state); hostid = 0; if (nvlist_lookup_uint64(tmp, ZPOOL_CONFIG_HOSTID, &hostid) == 0) { fnvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, hostid); hostname = fnvlist_lookup_string(tmp, ZPOOL_CONFIG_HOSTNAME); fnvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, hostname); } config_seen = B_TRUE; } /* * Add this top-level vdev to the child array. */ verify(nvlist_lookup_nvlist(tmp, ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0); verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID, &id) == 0); if (id >= children) { nvlist_t **newchild; newchild = zfs_alloc(hdl, (id + 1) * sizeof (nvlist_t *)); if (newchild == NULL) goto nomem; for (c = 0; c < children; c++) newchild[c] = child[c]; free(child); child = newchild; children = id + 1; } if (nvlist_dup(nvtop, &child[id], 0) != 0) goto nomem; } /* * If we have information about all the top-levels then * clean up the nvlist which we've constructed. This * means removing any extraneous devices that are * beyond the valid range or adding devices to the end * of our array which appear to be missing. */ if (valid_top_config) { if (max_id < children) { for (c = max_id; c < children; c++) nvlist_free(child[c]); children = max_id; } else if (max_id > children) { nvlist_t **newchild; newchild = zfs_alloc(hdl, (max_id) * sizeof (nvlist_t *)); if (newchild == NULL) goto nomem; for (c = 0; c < children; c++) newchild[c] = child[c]; free(child); child = newchild; children = max_id; } } verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &guid) == 0); /* * The vdev namespace may contain holes as a result of * device removal. We must add them back into the vdev * tree before we process any missing devices. */ if (holes > 0) { ASSERT(valid_top_config); for (c = 0; c < children; c++) { nvlist_t *holey; if (child[c] != NULL || !vdev_is_hole(hole_array, holes, c)) continue; if (nvlist_alloc(&holey, NV_UNIQUE_NAME, 0) != 0) goto nomem; /* * Holes in the namespace are treated as * "hole" top-level vdevs and have a * special flag set on them. */ if (nvlist_add_string(holey, ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) != 0 || nvlist_add_uint64(holey, ZPOOL_CONFIG_ID, c) != 0 || nvlist_add_uint64(holey, ZPOOL_CONFIG_GUID, 0ULL) != 0) { nvlist_free(holey); goto nomem; } child[c] = holey; } } /* * Look for any missing top-level vdevs. If this is the case, * create a faked up 'missing' vdev as a placeholder. We cannot * simply compress the child array, because the kernel performs * certain checks to make sure the vdev IDs match their location * in the configuration. */ for (c = 0; c < children; c++) { if (child[c] == NULL) { nvlist_t *missing; if (nvlist_alloc(&missing, NV_UNIQUE_NAME, 0) != 0) goto nomem; if (nvlist_add_string(missing, ZPOOL_CONFIG_TYPE, VDEV_TYPE_MISSING) != 0 || nvlist_add_uint64(missing, ZPOOL_CONFIG_ID, c) != 0 || nvlist_add_uint64(missing, ZPOOL_CONFIG_GUID, 0ULL) != 0) { nvlist_free(missing); goto nomem; } child[c] = missing; } } /* * Put all of this pool's top-level vdevs into a root vdev. */ if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) goto nomem; if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 || nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 || nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 || nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, child, children) != 0) { nvlist_free(nvroot); goto nomem; } for (c = 0; c < children; c++) nvlist_free(child[c]); free(child); children = 0; child = NULL; /* * Go through and fix up any paths and/or devids based on our * known list of vdev GUID -> path mappings. */ if (fix_paths(nvroot, pl->names) != 0) { nvlist_free(nvroot); goto nomem; } /* * Add the root vdev to this pool's configuration. */ if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) != 0) { nvlist_free(nvroot); goto nomem; } nvlist_free(nvroot); /* * zdb uses this path to report on active pools that were * imported or created using -R. */ if (active_ok) goto add_pool; /* * Determine if this pool is currently active, in which case we * can't actually import it. */ verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, &name) == 0); verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &guid) == 0); if (pool_active(hdl, name, guid, &isactive) != 0) goto error; if (isactive) { nvlist_free(config); config = NULL; continue; } if (policy != NULL) { if (nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY, policy) != 0) goto nomem; } if ((nvl = refresh_config(hdl, config)) == NULL) { nvlist_free(config); config = NULL; continue; } nvlist_free(config); config = nvl; /* * Go through and update the paths for spares, now that we have * them. */ verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { for (i = 0; i < nspares; i++) { if (fix_paths(spares[i], pl->names) != 0) goto nomem; } } /* * Update the paths for l2cache devices. */ if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0) { for (i = 0; i < nl2cache; i++) { if (fix_paths(l2cache[i], pl->names) != 0) goto nomem; } } /* * Restore the original information read from the actual label. */ (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID, DATA_TYPE_UINT64); (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME, DATA_TYPE_STRING); if (hostid != 0) { verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, hostid) == 0); verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, hostname) == 0); } add_pool: /* * Add this pool to the list of configs. */ verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, &name) == 0); if (nvlist_add_nvlist(ret, name, config) != 0) goto nomem; nvlist_free(config); config = NULL; } return (ret); nomem: (void) no_memory(hdl); error: nvlist_free(config); nvlist_free(ret); for (c = 0; c < children; c++) nvlist_free(child[c]); free(child); return (NULL); }
void fnvlist_add_uint64_array(nvlist_t *nvl, const char *name, uint64_t *val, uint_t n) { VERIFY0(nvlist_add_uint64_array(nvl, name, val, n)); }
/* * Generate the pool's configuration based on the current in-core state. * * We infer whether to generate a complete config or just one top-level config * based on whether vd is the root vdev. */ nvlist_t * spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats) { nvlist_t *config, *nvroot; vdev_t *rvd = spa->spa_root_vdev; unsigned long hostid = 0; boolean_t locked = B_FALSE; uint64_t split_guid; if (vd == NULL) { vd = rvd; locked = B_TRUE; spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); } ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER) == (SCL_CONFIG | SCL_STATE)); /* * If txg is -1, report the current value of spa->spa_config_txg. */ if (txg == -1ULL) txg = spa->spa_config_txg; VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, KM_SLEEP) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, spa_version(spa)) == 0); VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, spa_name(spa)) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, spa_state(spa)) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, txg) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, spa_guid(spa)) == 0); VERIFY(spa->spa_comment == NULL || nvlist_add_string(config, ZPOOL_CONFIG_COMMENT, spa->spa_comment) == 0); #ifdef _KERNEL hostid = zone_get_hostid(NULL); #else /* _KERNEL */ /* * We're emulating the system's hostid in userland, so we can't use * zone_get_hostid(). */ (void) ddi_strtoul(hw_serial, NULL, 10, &hostid); #endif /* _KERNEL */ if (hostid != 0) { VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, hostid) == 0); } VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, utsname.nodename) == 0); if (vd != rvd) { VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TOP_GUID, vd->vdev_top->vdev_guid) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_GUID, vd->vdev_guid) == 0); if (vd->vdev_isspare) VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_IS_SPARE, 1ULL) == 0); if (vd->vdev_islog) VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_IS_LOG, 1ULL) == 0); vd = vd->vdev_top; /* label contains top config */ } else { /* * Only add the (potentially large) split information * in the mos config, and not in the vdev labels */ if (spa->spa_config_splitting != NULL) VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_SPLIT, spa->spa_config_splitting) == 0); } /* * Add the top-level config. We even add this on pools which * don't support holes in the namespace. */ vdev_top_config_generate(spa, config); /* * If we're splitting, record the original pool's guid. */ if (spa->spa_config_splitting != NULL && nvlist_lookup_uint64(spa->spa_config_splitting, ZPOOL_CONFIG_SPLIT_GUID, &split_guid) == 0) { VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_SPLIT_GUID, split_guid) == 0); } nvroot = vdev_config_generate(spa, vd, getstats, 0); VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); nvlist_free(nvroot); /* * Store what's necessary for reading the MOS in the label. */ VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, spa->spa_label_features) == 0); if (getstats && spa_load_state(spa) == SPA_LOAD_NONE) { ddt_histogram_t *ddh; ddt_stat_t *dds; ddt_object_t *ddo; ddh = kmem_zalloc(sizeof (ddt_histogram_t), KM_SLEEP); ddt_get_dedup_histogram(spa, ddh); VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM, (uint64_t *)ddh, sizeof (*ddh) / sizeof (uint64_t)) == 0); kmem_free(ddh, sizeof (ddt_histogram_t)); ddo = kmem_zalloc(sizeof (ddt_object_t), KM_SLEEP); ddt_get_dedup_object_stats(spa, ddo); VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS, (uint64_t *)ddo, sizeof (*ddo) / sizeof (uint64_t)) == 0); kmem_free(ddo, sizeof (ddt_object_t)); dds = kmem_zalloc(sizeof (ddt_stat_t), KM_SLEEP); ddt_get_dedup_stats(spa, dds); VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_DDT_STATS, (uint64_t *)dds, sizeof (*dds) / sizeof (uint64_t)) == 0); kmem_free(dds, sizeof (ddt_stat_t)); } if (locked) spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); return (config); }
/* * Generate the pool's configuration based on the current in-core state. * * We infer whether to generate a complete config or just one top-level config * based on whether vd is the root vdev. */ nvlist_t * spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats) { nvlist_t *config, *nvroot; vdev_t *rvd = spa->spa_root_vdev; unsigned long hostid = 0; boolean_t locked = B_FALSE; uint64_t split_guid; char *pool_name; if (vd == NULL) { vd = rvd; locked = B_TRUE; spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); } ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER) == (SCL_CONFIG | SCL_STATE)); /* * If txg is -1, report the current value of spa->spa_config_txg. */ if (txg == -1ULL) txg = spa->spa_config_txg; /* * Originally, users had to handle spa namespace collisions by either * exporting the already imported pool or by specifying a new name for * the pool with a conflicting name. In the case of root pools from * virtual guests, neither approach to collision resolution is * reasonable. This is addressed by extending the new name syntax with * an option to specify that the new name is temporary. When specified, * ZFS_IMPORT_TEMP_NAME will be set in spa->spa_import_flags to tell us * to use the previous name, which we do below. */ if (spa->spa_import_flags & ZFS_IMPORT_TEMP_NAME) { VERIFY0(nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME, &pool_name)); } else pool_name = spa_name(spa); VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, KM_SLEEP) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, spa_version(spa)) == 0); VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, pool_name) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, spa_state(spa)) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, txg) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, spa_guid(spa)) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA, spa->spa_errata) == 0); VERIFY(spa->spa_comment == NULL || nvlist_add_string(config, ZPOOL_CONFIG_COMMENT, spa->spa_comment) == 0); #ifdef _KERNEL hostid = zone_get_hostid(NULL); #else /* _KERNEL */ /* * We're emulating the system's hostid in userland, so we can't use * zone_get_hostid(). */ (void) ddi_strtoul(hw_serial, NULL, 10, &hostid); #endif /* _KERNEL */ if (hostid != 0) { VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, hostid) == 0); } VERIFY0(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, utsname()->nodename)); if (vd != rvd) { VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TOP_GUID, vd->vdev_top->vdev_guid) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_GUID, vd->vdev_guid) == 0); if (vd->vdev_isspare) VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_IS_SPARE, 1ULL) == 0); if (vd->vdev_islog) VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_IS_LOG, 1ULL) == 0); vd = vd->vdev_top; /* label contains top config */ } else { /* * Only add the (potentially large) split information * in the mos config, and not in the vdev labels */ if (spa->spa_config_splitting != NULL) VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_SPLIT, spa->spa_config_splitting) == 0); } /* * Add the top-level config. We even add this on pools which * don't support holes in the namespace. */ vdev_top_config_generate(spa, config); /* * If we're splitting, record the original pool's guid. */ if (spa->spa_config_splitting != NULL && nvlist_lookup_uint64(spa->spa_config_splitting, ZPOOL_CONFIG_SPLIT_GUID, &split_guid) == 0) { VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_SPLIT_GUID, split_guid) == 0); } nvroot = vdev_config_generate(spa, vd, getstats, 0); VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); nvlist_free(nvroot); /* * Store what's necessary for reading the MOS in the label. */ VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, spa->spa_label_features) == 0); if (getstats && spa_load_state(spa) == SPA_LOAD_NONE) { ddt_histogram_t *ddh; ddt_stat_t *dds; ddt_object_t *ddo; ddh = kmem_zalloc(sizeof (ddt_histogram_t), KM_SLEEP); ddt_get_dedup_histogram(spa, ddh); VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM, (uint64_t *)ddh, sizeof (*ddh) / sizeof (uint64_t)) == 0); kmem_free(ddh, sizeof (ddt_histogram_t)); ddo = kmem_zalloc(sizeof (ddt_object_t), KM_SLEEP); ddt_get_dedup_object_stats(spa, ddo); VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS, (uint64_t *)ddo, sizeof (*ddo) / sizeof (uint64_t)) == 0); kmem_free(ddo, sizeof (ddt_object_t)); dds = kmem_zalloc(sizeof (ddt_stat_t), KM_SLEEP); ddt_get_dedup_stats(spa, dds); VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_DDT_STATS, (uint64_t *)dds, sizeof (*dds) / sizeof (uint64_t)) == 0); kmem_free(dds, sizeof (ddt_stat_t)); } if (locked) spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); return (config); }
/* ARGSUSED */ static int xattr_fill_nvlist(vnode_t *vp, xattr_view_t xattr_view, nvlist_t *nvlp, cred_t *cr, caller_context_t *ct) { int error; f_attr_t attr; uint64_t fsid; xvattr_t xvattr; xoptattr_t *xoap; /* Pointer to optional attributes */ vnode_t *ppvp; const char *domain; uint32_t rid; xva_init(&xvattr); if ((xoap = xva_getxoptattr(&xvattr)) == NULL) return (EINVAL); /* * For detecting ephemeral uid/gid */ xvattr.xva_vattr.va_mask |= (AT_UID|AT_GID); /* * We need to access the real fs object. * vp points to a GFS file; ppvp points to the real object. */ ppvp = gfs_file_parent(gfs_file_parent(vp)); /* * Iterate through the attrs associated with this view */ for (attr = 0; attr < F_ATTR_ALL; attr++) { if (xattr_view != attr_to_xattr_view(attr)) { continue; } switch (attr) { case F_SYSTEM: XVA_SET_REQ(&xvattr, XAT_SYSTEM); break; case F_READONLY: XVA_SET_REQ(&xvattr, XAT_READONLY); break; case F_HIDDEN: XVA_SET_REQ(&xvattr, XAT_HIDDEN); break; case F_ARCHIVE: XVA_SET_REQ(&xvattr, XAT_ARCHIVE); break; case F_IMMUTABLE: XVA_SET_REQ(&xvattr, XAT_IMMUTABLE); break; case F_APPENDONLY: XVA_SET_REQ(&xvattr, XAT_APPENDONLY); break; case F_NOUNLINK: XVA_SET_REQ(&xvattr, XAT_NOUNLINK); break; case F_OPAQUE: XVA_SET_REQ(&xvattr, XAT_OPAQUE); break; case F_NODUMP: XVA_SET_REQ(&xvattr, XAT_NODUMP); break; case F_AV_QUARANTINED: XVA_SET_REQ(&xvattr, XAT_AV_QUARANTINED); break; case F_AV_MODIFIED: XVA_SET_REQ(&xvattr, XAT_AV_MODIFIED); break; case F_AV_SCANSTAMP: if (ppvp->v_type == VREG) XVA_SET_REQ(&xvattr, XAT_AV_SCANSTAMP); break; case F_CRTIME: XVA_SET_REQ(&xvattr, XAT_CREATETIME); break; case F_FSID: fsid = (((uint64_t)vp->v_vfsp->vfs_fsid.val[0] << 32) | (uint64_t)(vp->v_vfsp->vfs_fsid.val[1] & 0xffffffff)); VERIFY(nvlist_add_uint64(nvlp, attr_to_name(attr), fsid) == 0); break; case F_REPARSE: XVA_SET_REQ(&xvattr, XAT_REPARSE); break; case F_GEN: XVA_SET_REQ(&xvattr, XAT_GEN); break; case F_OFFLINE: XVA_SET_REQ(&xvattr, XAT_OFFLINE); break; case F_SPARSE: XVA_SET_REQ(&xvattr, XAT_SPARSE); break; default: break; } } error = VOP_GETATTR(ppvp, &xvattr.xva_vattr, 0, cr, ct); if (error) return (error); /* * Process all the optional attributes together here. Notice that * xoap was set when the optional attribute bits were set above. */ if ((xvattr.xva_vattr.va_mask & AT_XVATTR) && xoap) { if (XVA_ISSET_RTN(&xvattr, XAT_READONLY)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_READONLY), xoap->xoa_readonly) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_HIDDEN)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_HIDDEN), xoap->xoa_hidden) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_SYSTEM)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_SYSTEM), xoap->xoa_system) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_ARCHIVE)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_ARCHIVE), xoap->xoa_archive) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_IMMUTABLE)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_IMMUTABLE), xoap->xoa_immutable) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_NOUNLINK)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_NOUNLINK), xoap->xoa_nounlink) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_APPENDONLY)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_APPENDONLY), xoap->xoa_appendonly) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_NODUMP)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_NODUMP), xoap->xoa_nodump) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_OPAQUE)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_OPAQUE), xoap->xoa_opaque) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_AV_QUARANTINED)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_AV_QUARANTINED), xoap->xoa_av_quarantined) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_AV_MODIFIED)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_AV_MODIFIED), xoap->xoa_av_modified) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_AV_SCANSTAMP)) { VERIFY(nvlist_add_uint8_array(nvlp, attr_to_name(F_AV_SCANSTAMP), xoap->xoa_av_scanstamp, sizeof (xoap->xoa_av_scanstamp)) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_CREATETIME)) { VERIFY(nvlist_add_uint64_array(nvlp, attr_to_name(F_CRTIME), (uint64_t *)&(xoap->xoa_createtime), sizeof (xoap->xoa_createtime) / sizeof (uint64_t)) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_REPARSE)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_REPARSE), xoap->xoa_reparse) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_GEN)) { VERIFY(nvlist_add_uint64(nvlp, attr_to_name(F_GEN), xoap->xoa_generation) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_OFFLINE)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_OFFLINE), xoap->xoa_offline) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_SPARSE)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_SPARSE), xoap->xoa_sparse) == 0); } } /* * Check for optional ownersid/groupsid */ if (xvattr.xva_vattr.va_uid > MAXUID) { nvlist_t *nvl_sid; if (nvlist_alloc(&nvl_sid, NV_UNIQUE_NAME, KM_SLEEP)) return (ENOMEM); if (kidmap_getsidbyuid(crgetzone(cr), xvattr.xva_vattr.va_uid, &domain, &rid) == 0) { VERIFY(nvlist_add_string(nvl_sid, SID_DOMAIN, domain) == 0); VERIFY(nvlist_add_uint32(nvl_sid, SID_RID, rid) == 0); VERIFY(nvlist_add_nvlist(nvlp, attr_to_name(F_OWNERSID), nvl_sid) == 0); } nvlist_free(nvl_sid); } if (xvattr.xva_vattr.va_gid > MAXUID) { nvlist_t *nvl_sid; if (nvlist_alloc(&nvl_sid, NV_UNIQUE_NAME, KM_SLEEP)) return (ENOMEM); if (kidmap_getsidbygid(crgetzone(cr), xvattr.xva_vattr.va_gid, &domain, &rid) == 0) { VERIFY(nvlist_add_string(nvl_sid, SID_DOMAIN, domain) == 0); VERIFY(nvlist_add_uint32(nvl_sid, SID_RID, rid) == 0); VERIFY(nvlist_add_nvlist(nvlp, attr_to_name(F_GROUPSID), nvl_sid) == 0); } nvlist_free(nvl_sid); } return (0); }
static int topo_prop_set(tnode_t *node, const char *pgname, const char *pname, topo_type_t type, int flag, void *val, int nelems, int *err) { int ret; topo_hdl_t *thp = node->tn_hdl; nvlist_t *nvl; if (topo_hdl_nvalloc(thp, &nvl, NV_UNIQUE_NAME) < 0) { *err = ETOPO_PROP_NVL; return (-1); } ret = nvlist_add_string(nvl, TOPO_PROP_VAL_NAME, pname); ret |= nvlist_add_uint32(nvl, TOPO_PROP_VAL_TYPE, type); switch (type) { case TOPO_TYPE_INT32: ret |= nvlist_add_int32(nvl, TOPO_PROP_VAL_VAL, *(int32_t *)val); break; case TOPO_TYPE_UINT32: ret |= nvlist_add_uint32(nvl, TOPO_PROP_VAL_VAL, *(uint32_t *)val); break; case TOPO_TYPE_INT64: ret |= nvlist_add_int64(nvl, TOPO_PROP_VAL_VAL, *(int64_t *)val); break; case TOPO_TYPE_UINT64: ret |= nvlist_add_uint64(nvl, TOPO_PROP_VAL_VAL, *(uint64_t *)val); break; case TOPO_TYPE_DOUBLE: ret |= nvlist_add_double(nvl, TOPO_PROP_VAL_VAL, *(double *)val); break; case TOPO_TYPE_STRING: ret |= nvlist_add_string(nvl, TOPO_PROP_VAL_VAL, (char *)val); break; case TOPO_TYPE_FMRI: ret |= nvlist_add_nvlist(nvl, TOPO_PROP_VAL_VAL, (nvlist_t *)val); break; case TOPO_TYPE_INT32_ARRAY: ret |= nvlist_add_int32_array(nvl, TOPO_PROP_VAL_VAL, (int32_t *)val, nelems); break; case TOPO_TYPE_UINT32_ARRAY: ret |= nvlist_add_uint32_array(nvl, TOPO_PROP_VAL_VAL, (uint32_t *)val, nelems); break; case TOPO_TYPE_INT64_ARRAY: ret |= nvlist_add_int64_array(nvl, TOPO_PROP_VAL_VAL, (int64_t *)val, nelems); break; case TOPO_TYPE_UINT64_ARRAY: ret |= nvlist_add_uint64_array(nvl, TOPO_PROP_VAL_VAL, (uint64_t *)val, nelems); break; case TOPO_TYPE_STRING_ARRAY: ret |= nvlist_add_string_array(nvl, TOPO_PROP_VAL_VAL, (char **)val, nelems); break; case TOPO_TYPE_FMRI_ARRAY: ret |= nvlist_add_nvlist_array(nvl, TOPO_PROP_VAL_VAL, (nvlist_t **)val, nelems); break; default: *err = ETOPO_PROP_TYPE; return (-1); } if (ret != 0) { nvlist_free(nvl); if (ret == ENOMEM) { *err = ETOPO_PROP_NOMEM; return (-1); } else { *err = ETOPO_PROP_NVL; return (-1); } } if (topo_prop_setprop(node, pgname, nvl, flag, nvl, err) != 0) { nvlist_free(nvl); return (-1); /* err set */ } nvlist_free(nvl); return (ret); }
/* * fps_generate_ereport_struct(struct fps_test_ereport *report) * takes report and constructs an nvlist that will be used * for the ereport. */ int fps_generate_ereport_struct(struct fps_test_ereport *report) { char class_name[FM_MAX_CLASS]; char *cpu_brand; char *string_data; int expect_size; int is_valid_cpu; int mask; int observe_size; int ret; nvlist_t *detector; nvlist_t *ereport; nvlist_t *resource; uint32_t cpu_id; uint32_t test; uint8_t fps_ver; uint64_t ena; uint64_t ereport_time; uint64_t *expect; uint64_t *observe; if (report == NULL) return (FPU_EREPORT_FAIL); ret = FPU_FOROFFLINE; cpu_id = report->cpu_id; test = report->test_id; mask = report->mask; is_valid_cpu = report->is_valid_cpu; expect_size = report->expected_size; expect = report->expected; observe_size = report->observed_size; observe = report->observed; string_data = report->info; /* allocate nvlists */ if ((ereport = fps_nvlist_create()) == NULL) _exit(FPU_EREPORT_FAIL); if ((detector = fps_nvlist_create()) == NULL) { _exit(FPU_EREPORT_FAIL); } /* setup class */ if ((cpu_brand = fps_get_cpu_brand(cpu_id)) == NULL) _exit(FPU_EREPORT_FAIL); if ((snprintf(class_name, FM_MAX_CLASS, "%s.%s.%s", CLASS_HEAD, cpu_brand, CLASS_TAIL)) < 0) _exit(FPU_EREPORT_FAIL); /* setup ena */ ereport_time = gethrtime(); ena = fps_ena_generate(ereport_time, cpu_id, FM_ENA_FMT1); /* setup detector */ if (fps_fmri_svc_set(detector, getenv("SMF_FMRI")) != 0) { _exit(FPU_EREPORT_FAIL); } /* setup fps-version */ fps_ver = FPS_VERSION; /* setup resource */ if (is_valid_cpu) { resource = fps_nvlist_create(); if (fps_fmri_cpu_set(resource, cpu_id)) { _exit(FPU_EREPORT_FAIL); } } else { resource = NULL; } /* put it together */ if (nvlist_add_string(ereport, NAME_FPS_CLASS, class_name) != 0) _exit(FPU_EREPORT_FAIL); if (ena != 0) { if (nvlist_add_uint64(ereport, NAME_FPS_ENA, ena) != 0) _exit(FPU_EREPORT_FAIL); } else _exit(FPU_EREPORT_FAIL); if (nvlist_add_nvlist(ereport, NAME_FPS_DETECTOR, (nvlist_t *)detector) != 0) _exit(FPU_EREPORT_FAIL); if (nvlist_add_uint8(ereport, NAME_FPS_VERSION, fps_ver) != 0) _exit(FPU_EREPORT_FAIL); if (nvlist_add_uint32(ereport, NAME_FPS_TEST_ID, test) != 0) ret = FPU_EREPORT_INCOM; if (nvlist_add_uint64_array(ereport, NAME_FPS_EXPECTED_VALUE, expect, expect_size) != 0) ret = FPU_EREPORT_INCOM; if (nvlist_add_uint64_array(ereport, NAME_FPS_OBSERVED_VALUE, observe, observe_size) != 0) ret = FPU_EREPORT_INCOM; if (mask & IS_EREPORT_INFO) { if (nvlist_add_string(ereport, NAME_FPS_STRING_DATA, string_data) != 0) ret = FPU_EREPORT_INCOM; } if (is_valid_cpu) { if (nvlist_add_nvlist(ereport, NAME_FPS_RESOURCE, (nvlist_t *)resource) != 0) _exit(FPU_EREPORT_FAIL); } /* publish */ if (fps_post_ereport(ereport)) { ret = FPU_EREPORT_FAIL; } /* free nvlists */ nvlist_free(ereport); if (resource != NULL) nvlist_free(resource); if (detector != NULL) nvlist_free(detector); return (ret); }