/* * Verify a single logpage. This will do some generic validation and then call * the logpage-specific function for further verification. */ static int verify_logpage(ds_scsi_info_t *sip, logpage_validation_entry_t *lp) { scsi_log_header_t *lhp; struct scsi_extended_sense sense; int buflen; int log_length; int result = 0; uint_t kp, asc, ascq; nvlist_t *nvl; buflen = MAX_BUFLEN(scsi_log_header_t); if ((lhp = calloc(buflen, 1)) == NULL) return (scsi_set_errno(sip, EDS_NOMEM)); bzero(&sense, sizeof (struct scsi_extended_sense)); nvl = NULL; if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0 || nvlist_add_nvlist(sip->si_state_logpage, lp->ve_desc, nvl) != 0) { nvlist_free(nvl); free(lhp); return (scsi_set_errno(sip, EDS_NOMEM)); } nvlist_free(nvl); result = nvlist_lookup_nvlist(sip->si_state_logpage, lp->ve_desc, &nvl); assert(result == 0); result = scsi_log_sense(sip, lp->ve_code, PC_CUMULATIVE, (caddr_t)lhp, buflen, &kp, &asc, &ascq); if (result == 0) { log_length = BE_16(lhp->lh_length); if (nvlist_add_uint16(nvl, "length", log_length) != 0) { free(lhp); return (scsi_set_errno(sip, EDS_NOMEM)); } if (lp->ve_validate(sip, (scsi_log_parameter_header_t *) (((char *)lhp) + sizeof (scsi_log_header_t)), log_length, nvl) != 0) { free(lhp); return (-1); } } else { printf("failed to load %s log page (KEY=0x%x " "ASC=0x%x ASCQ=0x%x)\n", lp->ve_desc, kp, asc, ascq); } free(lhp); return (0); }
static int fmd_agent_pageop_v1(fmd_agent_hdl_t *hdl, int cmd, nvlist_t *fmri) { int err; nvlist_t *nvl = NULL; if ((err = nvlist_alloc(&nvl, NV_UNIQUE_NAME_TYPE, 0)) != 0 || (err = nvlist_add_nvlist(nvl, FM_PAGE_RETIRE_FMRI, fmri)) != 0 || (err = fmd_agent_nvl_ioctl(hdl, cmd, 1, nvl, NULL)) != 0) return (cleanup_set_errno(hdl, nvl, NULL, err)); nvlist_free(nvl); return (0); }
static nvlist_t * dict2nvl(PyObject *d) { nvlist_t *nvl; int err; PyObject *key, *value; int pos = 0; if (!PyDict_Check(d)) { PyErr_SetObject(PyExc_ValueError, d); return (NULL); } err = nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0); assert(err == 0); while (PyDict_Next(d, &pos, &key, &value)) { char *keystr = PyString_AsString(key); if (keystr == NULL) { PyErr_SetObject(PyExc_KeyError, key); nvlist_free(nvl); return (NULL); } if (PyDict_Check(value)) { nvlist_t *valnvl = dict2nvl(value); err = nvlist_add_nvlist(nvl, keystr, valnvl); nvlist_free(valnvl); } else if (value == Py_None) { err = nvlist_add_boolean(nvl, keystr); } else if (PyString_Check(value)) { char *valstr = PyString_AsString(value); err = nvlist_add_string(nvl, keystr, valstr); } else if (PyInt_Check(value)) { uint64_t valint = PyInt_AsUnsignedLongLongMask(value); err = nvlist_add_uint64(nvl, keystr, valint); } else if (PyBool_Check(value)) { boolean_t valbool = value == Py_True ? B_TRUE : B_FALSE; err = nvlist_add_boolean_value(nvl, keystr, valbool); } else { PyErr_SetObject(PyExc_ValueError, value); nvlist_free(nvl); return (NULL); } assert(err == 0); } return (nvl); }
static int proc_fm_event_post(struct list_head *head, nvlist_t *nvl, char *fullclass, uint64_t ena, struct proc_mon *proc_node) { sprintf(nvl->name, FM_CLASS); strcpy (nvl->value, fullclass); nvl->rscid = ena; nvl->data = malloc(sizeof(struct proc_mon)); memcpy(nvl->data, (void *)proc_node, sizeof(struct proc_mon)); nvlist_add_nvlist(head, nvl); printf("Proc module: post %s\n", fullclass); return 0; }
/* * Periodic timeout. * Calling apache_service_check(). */ struct list_head * apache_probe(evtsrc_module_t *emp) { // fmd_debug; struct list_head *head = nvlist_head_alloc(); nvlist_t *nvl = nvlist_alloc(); nvlist_add_nvlist(head, nvl); if(apache_service_check(nvl) != 0) { printf("Apache Module: failed to check apache service\n"); return NULL; } return head; }
/* * Generate the pool's configuration based on the current in-core state. * We infer whether to generate a complete config or just one top-level config * based on whether vd is the root vdev. */ nvlist_t * spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats) { nvlist_t *config, *nvroot; vdev_t *rvd = spa->spa_root_vdev; ASSERT(spa_config_held(spa, RW_READER)); if (vd == NULL) vd = rvd; /* * If txg is -1, report the current value of spa->spa_config_txg. */ if (txg == -1ULL) txg = spa->spa_config_txg; VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, KM_SLEEP) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, spa_version(spa)) == 0); VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, spa_name(spa)) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, spa_state(spa)) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, txg) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, spa_guid(spa)) == 0); if (vd != rvd) { VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TOP_GUID, vd->vdev_top->vdev_guid) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_GUID, vd->vdev_guid) == 0); if (vd->vdev_isspare) VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_IS_SPARE, 1ULL) == 0); vd = vd->vdev_top; /* label contains top config */ } nvroot = vdev_config_generate(spa, vd, getstats, B_FALSE); VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); nvlist_free(nvroot); return (config); }
/* ARGSUSED */ int fab_prep_basic_erpt(fmd_hdl_t *hdl, nvlist_t *nvl, nvlist_t *erpt, boolean_t isRC) { uint64_t *now; uint64_t ena; uint_t nelem; nvlist_t *detector, *new_detector; char rcpath[255]; int err = 0; /* Grab the tod, ena and detector(FMRI) */ err |= nvlist_lookup_uint64_array(nvl, "__tod", &now, &nelem); err |= nvlist_lookup_uint64(nvl, "ena", &ena); err |= nvlist_lookup_nvlist(nvl, FM_EREPORT_DETECTOR, &detector); if (err) return (err); /* Make a copy of the detector */ err = nvlist_dup(detector, &new_detector, NV_UNIQUE_NAME); if (err) return (err); /* Copy the tod and ena to erpt */ (void) nvlist_add_uint64(erpt, FM_EREPORT_ENA, ena); (void) nvlist_add_uint64_array(erpt, "__tod", now, nelem); /* * Create the correct ROOT FMRI from PCIe leaf fabric ereports. Used * only by fab_prep_fake_rc_erpt. See the fab_pciex_fake_rc_erpt_tbl * comments for more information. */ if (isRC && fab_get_rcpath(hdl, nvl, rcpath)) { /* Create the correct PCIe RC new_detector aka FMRI */ (void) nvlist_remove(new_detector, FM_FMRI_DEV_PATH, DATA_TYPE_STRING); (void) nvlist_add_string(new_detector, FM_FMRI_DEV_PATH, rcpath); } /* Copy the FMRI to erpt */ (void) nvlist_add_nvlist(erpt, FM_EREPORT_DETECTOR, new_detector); nvlist_free(new_detector); return (err); }
static int ch_end(list_wrap_t **lw, boolean_t array, int argc, char **argv) { nvlist_t *parent; char *name; if (list_wrap_depth(*lw) < 2) { (void) fprintf(stderr, "ERROR: not nested, cannot end.\n"); return (-1); } parent = (*lw)->lw_next->lw_nvl[(*lw)->lw_next->lw_pos]; name = (*lw)->lw_name; if ((*lw)->lw_array) { /* * This was an array of objects. */ nvlist_t **children = (*lw)->lw_nvl; int nelems = (*lw)->lw_pos + 1; if (nvlist_add_nvlist_array(parent, name, children, nelems) != 0) { (void) fprintf(stderr, "fail at " "nvlist_add_nvlist_array\n"); return (-1); } } else { /* * This was a single object. */ nvlist_t *child = (*lw)->lw_nvl[0]; if ((*lw)->lw_pos != 0) abort(); if (nvlist_add_nvlist(parent, name, child) != 0) { (void) fprintf(stderr, "fail at nvlist_add_nvlist\n"); return (-1); } } *lw = list_wrap_pop_and_free(*lw); return (0); }
int topo_fmri_getpgrp(topo_hdl_t *thp, nvlist_t *rsrc, const char *pgname, nvlist_t **pgroup, int *err) { int rv; nvlist_t *in = NULL; tnode_t *rnode; char *scheme; if (nvlist_lookup_string(rsrc, FM_FMRI_SCHEME, &scheme) != 0) return (set_error(thp, ETOPO_FMRI_MALFORM, err, TOPO_METH_PROP_GET, in)); if ((rnode = topo_hdl_root(thp, scheme)) == NULL) return (set_error(thp, ETOPO_METHOD_NOTSUP, err, TOPO_METH_PROP_GET, in)); if (topo_hdl_nvalloc(thp, &in, NV_UNIQUE_NAME) != 0) return (set_error(thp, ETOPO_FMRI_NVL, err, TOPO_METH_PROP_GET, in)); rv = nvlist_add_nvlist(in, TOPO_PROP_RESOURCE, rsrc); rv |= nvlist_add_string(in, TOPO_PROP_GROUP, pgname); if (rv != 0) return (set_error(thp, ETOPO_FMRI_NVL, err, TOPO_METH_PROP_GET, in)); *pgroup = NULL; rv = topo_method_invoke(rnode, TOPO_METH_PGRP_GET, TOPO_METH_PGRP_GET_VERSION, in, pgroup, err); nvlist_free(in); if (rv != 0) return (-1); /* *err is set for us */ if (*pgroup == NULL) return (set_error(thp, ETOPO_PROP_NOENT, err, TOPO_METH_PROP_GET, NULL)); return (0); }
/* * topo_fmri_create * * If possible, creates an FMRI of the requested version in the * requested scheme. Args are passed as part of the inputs to the * fmri-create method of the scheme. */ nvlist_t * topo_fmri_create(topo_hdl_t *thp, const char *scheme, const char *name, topo_instance_t inst, nvlist_t *nvl, int *err) { nvlist_t *ins; nvlist_t *out; tnode_t *rnode; ins = out = NULL; if ((rnode = topo_hdl_root(thp, scheme)) == NULL) return (set_nverror(thp, ETOPO_METHOD_NOTSUP, err, TOPO_METH_FMRI, NULL)); if ((*err = topo_hdl_nvalloc(thp, &ins, NV_UNIQUE_NAME)) != 0) return (set_nverror(thp, ETOPO_FMRI_NVL, err, TOPO_METH_FMRI, NULL)); if (nvlist_add_string(ins, TOPO_METH_FMRI_ARG_NAME, name) != 0 || nvlist_add_uint32(ins, TOPO_METH_FMRI_ARG_INST, inst) != 0) { return (set_nverror(thp, ETOPO_FMRI_NVL, err, TOPO_METH_FMRI, ins)); } if (nvl != NULL && nvlist_add_nvlist(ins, TOPO_METH_FMRI_ARG_NVL, nvl) != 0) { return (set_nverror(thp, ETOPO_FMRI_NVL, err, TOPO_METH_FMRI, ins)); } if (topo_method_invoke(rnode, TOPO_METH_FMRI, TOPO_METH_FMRI_VERSION, ins, &out, err) != 0) { return (set_nverror(thp, *err, err, TOPO_METH_FMRI, ins)); } nvlist_free(ins); return (out); }
void fnvlist_add_nvlist(nvlist_t *nvl, const char *name, nvlist_t *val) { VERIFY0(nvlist_add_nvlist(nvl, name, val)); }
/* * Pack internal format cache data to a single nvlist. * Used when writing the nvlist file. * Note this is called indirectly by the nvpflush daemon. */ static int sdev_ncache_pack_list(nvf_handle_t fd, nvlist_t **ret_nvl) { nvlist_t *nvl, *sub_nvl; nvp_devname_t *np; int rval; list_t *listp; ASSERT(fd == sdevfd_handle); ASSERT(RW_WRITE_HELD(nvf_lock(fd))); rval = nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP); if (rval != 0) { nvf_error("%s: nvlist alloc error %d\n", nvf_cache_name(fd), rval); return (DDI_FAILURE); } listp = nvf_list(sdevfd_handle); if ((np = list_head(listp)) != NULL) { ASSERT(list_next(listp, np) == NULL); rval = nvlist_alloc(&sub_nvl, NV_UNIQUE_NAME, KM_SLEEP); if (rval != 0) { nvf_error("%s: nvlist alloc error %d\n", nvf_cache_name(fd), rval); sub_nvl = NULL; goto err; } rval = nvlist_add_string_array(sub_nvl, DP_DEVNAME_NCACHE_ID, np->nvp_paths, np->nvp_npaths); if (rval != 0) { nvf_error("%s: nvlist add error %d (sdev)\n", nvf_cache_name(fd), rval); goto err; } rval = nvlist_add_int32_array(sub_nvl, DP_DEVNAME_NC_EXPIRECNT_ID, np->nvp_expirecnts, np->nvp_npaths); if (rval != 0) { nvf_error("%s: nvlist add error %d (sdev)\n", nvf_cache_name(fd), rval); goto err; } rval = nvlist_add_nvlist(nvl, DP_DEVNAME_ID, sub_nvl); if (rval != 0) { nvf_error("%s: nvlist add error %d (sublist)\n", nvf_cache_name(fd), rval); goto err; } nvlist_free(sub_nvl); } *ret_nvl = nvl; return (DDI_SUCCESS); err: nvlist_free(sub_nvl); nvlist_free(nvl); *ret_nvl = NULL; return (DDI_FAILURE); }
/* * Generate the pool's configuration based on the current in-core state. * * We infer whether to generate a complete config or just one top-level config * based on whether vd is the root vdev. */ nvlist_t * spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats) { nvlist_t *config, *nvroot; vdev_t *rvd = spa->spa_root_vdev; unsigned long hostid = 0; boolean_t locked = B_FALSE; uint64_t split_guid; if (vd == NULL) { vd = rvd; locked = B_TRUE; spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); } ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER) == (SCL_CONFIG | SCL_STATE)); /* * If txg is -1, report the current value of spa->spa_config_txg. */ if (txg == -1ULL) txg = spa->spa_config_txg; VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, KM_SLEEP) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, spa_version(spa)) == 0); VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, spa_name(spa)) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, spa_state(spa)) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, txg) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, spa_guid(spa)) == 0); VERIFY(spa->spa_comment == NULL || nvlist_add_string(config, ZPOOL_CONFIG_COMMENT, spa->spa_comment) == 0); #ifdef _KERNEL hostid = zone_get_hostid(NULL); #else /* _KERNEL */ /* * We're emulating the system's hostid in userland, so we can't use * zone_get_hostid(). */ (void) ddi_strtoul(hw_serial, NULL, 10, &hostid); #endif /* _KERNEL */ if (hostid != 0) { VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, hostid) == 0); } VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, utsname.nodename) == 0); if (vd != rvd) { VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TOP_GUID, vd->vdev_top->vdev_guid) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_GUID, vd->vdev_guid) == 0); if (vd->vdev_isspare) VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_IS_SPARE, 1ULL) == 0); if (vd->vdev_islog) VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_IS_LOG, 1ULL) == 0); vd = vd->vdev_top; /* label contains top config */ } else { /* * Only add the (potentially large) split information * in the mos config, and not in the vdev labels */ if (spa->spa_config_splitting != NULL) VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_SPLIT, spa->spa_config_splitting) == 0); } /* * Add the top-level config. We even add this on pools which * don't support holes in the namespace. */ vdev_top_config_generate(spa, config); /* * If we're splitting, record the original pool's guid. */ if (spa->spa_config_splitting != NULL && nvlist_lookup_uint64(spa->spa_config_splitting, ZPOOL_CONFIG_SPLIT_GUID, &split_guid) == 0) { VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_SPLIT_GUID, split_guid) == 0); } nvroot = vdev_config_generate(spa, vd, getstats, 0); VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); nvlist_free(nvroot); /* * Store what's necessary for reading the MOS in the label. */ VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, spa->spa_label_features) == 0); if (getstats && spa_load_state(spa) == SPA_LOAD_NONE) { ddt_histogram_t *ddh; ddt_stat_t *dds; ddt_object_t *ddo; ddh = kmem_zalloc(sizeof (ddt_histogram_t), KM_SLEEP); ddt_get_dedup_histogram(spa, ddh); VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM, (uint64_t *)ddh, sizeof (*ddh) / sizeof (uint64_t)) == 0); kmem_free(ddh, sizeof (ddt_histogram_t)); ddo = kmem_zalloc(sizeof (ddt_object_t), KM_SLEEP); ddt_get_dedup_object_stats(spa, ddo); VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS, (uint64_t *)ddo, sizeof (*ddo) / sizeof (uint64_t)) == 0); kmem_free(ddo, sizeof (ddt_object_t)); dds = kmem_zalloc(sizeof (ddt_stat_t), KM_SLEEP); ddt_get_dedup_stats(spa, dds); VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_DDT_STATS, (uint64_t *)dds, sizeof (*dds) / sizeof (uint64_t)) == 0); kmem_free(dds, sizeof (ddt_stat_t)); } if (locked) spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); return (config); }
/* * Synchronize pool configuration to disk. This must be called with the * namespace lock held. Synchronizing the pool cache is typically done after * the configuration has been synced to the MOS. This exposes a window where * the MOS config will have been updated but the cache file has not. If * the system were to crash at that instant then the cached config may not * contain the correct information to open the pool and an explicity import * would be required. */ void spa_config_sync(spa_t *target, boolean_t removing, boolean_t postsysevent) { spa_config_dirent_t *dp, *tdp; nvlist_t *nvl; boolean_t ccw_failure; int error; ASSERT(MUTEX_HELD(&spa_namespace_lock)); if (rootdir == NULL || !(spa_mode_global & FWRITE)) return; /* * Iterate over all cachefiles for the pool, past or present. When the * cachefile is changed, the new one is pushed onto this list, allowing * us to update previous cachefiles that no longer contain this pool. */ ccw_failure = B_FALSE; for (dp = list_head(&target->spa_config_list); dp != NULL; dp = list_next(&target->spa_config_list, dp)) { spa_t *spa = NULL; if (dp->scd_path == NULL) continue; /* * Iterate over all pools, adding any matching pools to 'nvl'. */ nvl = NULL; while ((spa = spa_next(spa)) != NULL) { nvlist_t *nvroot = NULL; /* * Skip over our own pool if we're about to remove * ourselves from the spa namespace or any pool that * is readonly. Since we cannot guarantee that a * readonly pool would successfully import upon reboot, * we don't allow them to be written to the cache file. */ if ((spa == target && removing) || (spa_state(spa) == POOL_STATE_ACTIVE && !spa_writeable(spa))) continue; mutex_enter(&spa->spa_props_lock); tdp = list_head(&spa->spa_config_list); if (spa->spa_config == NULL || tdp->scd_path == NULL || strcmp(tdp->scd_path, dp->scd_path) != 0) { mutex_exit(&spa->spa_props_lock); continue; } if (nvl == NULL) VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0); VERIFY(nvlist_add_nvlist(nvl, spa->spa_name, spa->spa_config) == 0); mutex_exit(&spa->spa_props_lock); if (nvlist_lookup_nvlist(nvl, spa->spa_name, &nvroot) == 0) spa_config_clean(nvroot); } error = spa_config_write(dp, nvl); if (error != 0) ccw_failure = B_TRUE; nvlist_free(nvl); } if (ccw_failure) { /* * Keep trying so that configuration data is * written if/when any temporary filesystem * resource issues are resolved. */ if (target->spa_ccw_fail_time == 0) { zfs_ereport_post(FM_EREPORT_ZFS_CONFIG_CACHE_WRITE, target, NULL, NULL, 0, 0); } target->spa_ccw_fail_time = gethrtime(); spa_async_request(target, SPA_ASYNC_CONFIG_UPDATE); } else { /* * Do not rate limit future attempts to update * the config cache. */ target->spa_ccw_fail_time = 0; } /* * Remove any config entries older than the current one. */ dp = list_head(&target->spa_config_list); while ((tdp = list_next(&target->spa_config_list, dp)) != NULL) { list_remove(&target->spa_config_list, tdp); if (tdp->scd_path != NULL) spa_strfree(tdp->scd_path); kmem_free(tdp, sizeof (spa_config_dirent_t)); } spa_config_generation++; if (postsysevent) spa_event_notify(target, NULL, ESC_ZFS_CONFIG_SYNC); }
/* * Generate the pool's configuration based on the current in-core state. * * We infer whether to generate a complete config or just one top-level config * based on whether vd is the root vdev. */ nvlist_t * spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats) { nvlist_t *config, *nvroot; vdev_t *rvd = spa->spa_root_vdev; unsigned long hostid = 0; boolean_t locked = B_FALSE; uint64_t split_guid; char *pool_name; if (vd == NULL) { vd = rvd; locked = B_TRUE; spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); } ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER) == (SCL_CONFIG | SCL_STATE)); /* * If txg is -1, report the current value of spa->spa_config_txg. */ if (txg == -1ULL) txg = spa->spa_config_txg; /* * Originally, users had to handle spa namespace collisions by either * exporting the already imported pool or by specifying a new name for * the pool with a conflicting name. In the case of root pools from * virtual guests, neither approach to collision resolution is * reasonable. This is addressed by extending the new name syntax with * an option to specify that the new name is temporary. When specified, * ZFS_IMPORT_TEMP_NAME will be set in spa->spa_import_flags to tell us * to use the previous name, which we do below. */ if (spa->spa_import_flags & ZFS_IMPORT_TEMP_NAME) { VERIFY0(nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME, &pool_name)); } else pool_name = spa_name(spa); VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, KM_SLEEP) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, spa_version(spa)) == 0); VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, pool_name) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, spa_state(spa)) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, txg) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, spa_guid(spa)) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA, spa->spa_errata) == 0); VERIFY(spa->spa_comment == NULL || nvlist_add_string(config, ZPOOL_CONFIG_COMMENT, spa->spa_comment) == 0); #ifdef _KERNEL hostid = zone_get_hostid(NULL); #else /* _KERNEL */ /* * We're emulating the system's hostid in userland, so we can't use * zone_get_hostid(). */ (void) ddi_strtoul(hw_serial, NULL, 10, &hostid); #endif /* _KERNEL */ if (hostid != 0) { VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, hostid) == 0); } VERIFY0(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, utsname()->nodename)); if (vd != rvd) { VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TOP_GUID, vd->vdev_top->vdev_guid) == 0); VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_GUID, vd->vdev_guid) == 0); if (vd->vdev_isspare) VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_IS_SPARE, 1ULL) == 0); if (vd->vdev_islog) VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_IS_LOG, 1ULL) == 0); vd = vd->vdev_top; /* label contains top config */ } else { /* * Only add the (potentially large) split information * in the mos config, and not in the vdev labels */ if (spa->spa_config_splitting != NULL) VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_SPLIT, spa->spa_config_splitting) == 0); } /* * Add the top-level config. We even add this on pools which * don't support holes in the namespace. */ vdev_top_config_generate(spa, config); /* * If we're splitting, record the original pool's guid. */ if (spa->spa_config_splitting != NULL && nvlist_lookup_uint64(spa->spa_config_splitting, ZPOOL_CONFIG_SPLIT_GUID, &split_guid) == 0) { VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_SPLIT_GUID, split_guid) == 0); } nvroot = vdev_config_generate(spa, vd, getstats, 0); VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); nvlist_free(nvroot); /* * Store what's necessary for reading the MOS in the label. */ VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, spa->spa_label_features) == 0); if (getstats && spa_load_state(spa) == SPA_LOAD_NONE) { ddt_histogram_t *ddh; ddt_stat_t *dds; ddt_object_t *ddo; ddh = kmem_zalloc(sizeof (ddt_histogram_t), KM_SLEEP); ddt_get_dedup_histogram(spa, ddh); VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM, (uint64_t *)ddh, sizeof (*ddh) / sizeof (uint64_t)) == 0); kmem_free(ddh, sizeof (ddt_histogram_t)); ddo = kmem_zalloc(sizeof (ddt_object_t), KM_SLEEP); ddt_get_dedup_object_stats(spa, ddo); VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS, (uint64_t *)ddo, sizeof (*ddo) / sizeof (uint64_t)) == 0); kmem_free(ddo, sizeof (ddt_object_t)); dds = kmem_zalloc(sizeof (ddt_stat_t), KM_SLEEP); ddt_get_dedup_stats(spa, dds); VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_DDT_STATS, (uint64_t *)dds, sizeof (*dds) / sizeof (uint64_t)) == 0); kmem_free(dds, sizeof (ddt_stat_t)); } if (locked) spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); return (config); }
/* * Synchronize pool configuration to disk. This must be called with the * namespace lock held. Synchronizing the pool cache is typically done after * the configuration has been synced to the MOS. This exposes a window where * the MOS config will have been updated but the cache file has not. If * the system were to crash at that instant then the cached config may not * contain the correct information to open the pool and an explicity import * would be required. */ void spa_config_sync(spa_t *target, boolean_t removing, boolean_t postsysevent) { spa_config_dirent_t *dp, *tdp; nvlist_t *nvl; char *pool_name; ASSERT(MUTEX_HELD(&spa_namespace_lock)); if (rootdir == NULL || !(spa_mode_global & FWRITE)) return; /* * Iterate over all cachefiles for the pool, past or present. When the * cachefile is changed, the new one is pushed onto this list, allowing * us to update previous cachefiles that no longer contain this pool. */ for (dp = list_head(&target->spa_config_list); dp != NULL; dp = list_next(&target->spa_config_list, dp)) { spa_t *spa = NULL; if (dp->scd_path == NULL) continue; /* * Iterate over all pools, adding any matching pools to 'nvl'. */ nvl = NULL; while ((spa = spa_next(spa)) != NULL) { /* * Skip over our own pool if we're about to remove * ourselves from the spa namespace or any pool that * is readonly. Since we cannot guarantee that a * readonly pool would successfully import upon reboot, * we don't allow them to be written to the cache file. */ if ((spa == target && removing) || !spa_writeable(spa)) continue; mutex_enter(&spa->spa_props_lock); tdp = list_head(&spa->spa_config_list); if (spa->spa_config == NULL || tdp->scd_path == NULL || strcmp(tdp->scd_path, dp->scd_path) != 0) { mutex_exit(&spa->spa_props_lock); continue; } if (nvl == NULL) VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0); if (spa->spa_import_flags & ZFS_IMPORT_TEMP_NAME) { VERIFY0(nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME, &pool_name)); } else pool_name = spa_name(spa); VERIFY(nvlist_add_nvlist(nvl, pool_name, spa->spa_config) == 0); mutex_exit(&spa->spa_props_lock); } spa_config_write(dp, nvl); nvlist_free(nvl); } /* * Remove any config entries older than the current one. */ dp = list_head(&target->spa_config_list); while ((tdp = list_next(&target->spa_config_list, dp)) != NULL) { list_remove(&target->spa_config_list, tdp); if (tdp->scd_path != NULL) spa_strfree(tdp->scd_path); kmem_free(tdp, sizeof (spa_config_dirent_t)); } spa_config_generation++; if (postsysevent) spa_event_notify(target, NULL, FM_EREPORT_ZFS_CONFIG_SYNC); }
int main(void) { const nvlist_t *cnvl; nvlist_t *nvl; printf("1..94\n"); nvl = nvlist_create(0); CHECK(!nvlist_exists_null(nvl, "nvlist/null")); nvlist_add_null(nvl, "nvlist/null"); CHECK(nvlist_error(nvl) == 0); CHECK(nvlist_exists_null(nvl, "nvlist/null")); CHECK(!nvlist_exists_bool(nvl, "nvlist/bool/true")); nvlist_add_bool(nvl, "nvlist/bool/true", true); CHECK(nvlist_error(nvl) == 0); CHECK(nvlist_exists_bool(nvl, "nvlist/bool/true")); CHECK(!nvlist_exists_bool(nvl, "nvlist/bool/false")); nvlist_add_bool(nvl, "nvlist/bool/false", false); CHECK(nvlist_error(nvl) == 0); CHECK(nvlist_exists_bool(nvl, "nvlist/bool/false")); CHECK(!nvlist_exists_number(nvl, "nvlist/number/0")); nvlist_add_number(nvl, "nvlist/number/0", 0); CHECK(nvlist_error(nvl) == 0); CHECK(nvlist_exists_number(nvl, "nvlist/number/0")); CHECK(!nvlist_exists_number(nvl, "nvlist/number/1")); nvlist_add_number(nvl, "nvlist/number/1", 1); CHECK(nvlist_error(nvl) == 0); CHECK(nvlist_exists_number(nvl, "nvlist/number/1")); CHECK(!nvlist_exists_number(nvl, "nvlist/number/-1")); nvlist_add_number(nvl, "nvlist/number/-1", -1); CHECK(nvlist_error(nvl) == 0); CHECK(nvlist_exists_number(nvl, "nvlist/number/-1")); CHECK(!nvlist_exists_number(nvl, "nvlist/number/UINT64_MAX")); nvlist_add_number(nvl, "nvlist/number/UINT64_MAX", UINT64_MAX); CHECK(nvlist_error(nvl) == 0); CHECK(nvlist_exists_number(nvl, "nvlist/number/UINT64_MAX")); CHECK(!nvlist_exists_number(nvl, "nvlist/number/INT64_MIN")); nvlist_add_number(nvl, "nvlist/number/INT64_MIN", INT64_MIN); CHECK(nvlist_error(nvl) == 0); CHECK(nvlist_exists_number(nvl, "nvlist/number/INT64_MIN")); CHECK(!nvlist_exists_number(nvl, "nvlist/number/INT64_MAX")); nvlist_add_number(nvl, "nvlist/number/INT64_MAX", INT64_MAX); CHECK(nvlist_error(nvl) == 0); CHECK(nvlist_exists_number(nvl, "nvlist/number/INT64_MAX")); CHECK(!nvlist_exists_string(nvl, "nvlist/string/")); nvlist_add_string(nvl, "nvlist/string/", ""); CHECK(nvlist_error(nvl) == 0); CHECK(nvlist_exists_string(nvl, "nvlist/string/")); CHECK(!nvlist_exists_string(nvl, "nvlist/string/x")); nvlist_add_string(nvl, "nvlist/string/x", "x"); CHECK(nvlist_error(nvl) == 0); CHECK(nvlist_exists_string(nvl, "nvlist/string/x")); CHECK(!nvlist_exists_string(nvl, "nvlist/string/abcdefghijklmnopqrstuvwxyz")); nvlist_add_string(nvl, "nvlist/string/abcdefghijklmnopqrstuvwxyz", "abcdefghijklmnopqrstuvwxyz"); CHECK(nvlist_error(nvl) == 0); CHECK(nvlist_exists_string(nvl, "nvlist/string/abcdefghijklmnopqrstuvwxyz")); CHECK(!nvlist_exists_string(nvl, "nvlist/stringf/")); nvlist_add_stringf(nvl, "nvlist/stringf/", "%s", ""); CHECK(nvlist_error(nvl) == 0); CHECK(nvlist_exists_string(nvl, "nvlist/stringf/")); CHECK(!nvlist_exists_string(nvl, "nvlist/stringf/x")); nvlist_add_stringf(nvl, "nvlist/stringf/x", "%s", "x"); CHECK(nvlist_error(nvl) == 0); CHECK(nvlist_exists_string(nvl, "nvlist/stringf/x")); CHECK(!nvlist_exists_string(nvl, "nvlist/stringf/666Xabc")); nvlist_add_stringf(nvl, "nvlist/stringf/666Xabc", "%d%c%s", 666, 'X', "abc"); CHECK(nvlist_error(nvl) == 0); CHECK(nvlist_exists_string(nvl, "nvlist/stringf/666Xabc")); CHECK(!nvlist_exists_descriptor(nvl, "nvlist/descriptor/STDERR_FILENO")); nvlist_add_descriptor(nvl, "nvlist/descriptor/STDERR_FILENO", STDERR_FILENO); CHECK(nvlist_error(nvl) == 0); CHECK(nvlist_exists_descriptor(nvl, "nvlist/descriptor/STDERR_FILENO")); CHECK(!nvlist_exists_binary(nvl, "nvlist/binary/x")); nvlist_add_binary(nvl, "nvlist/binary/x", "x", 1); CHECK(nvlist_error(nvl) == 0); CHECK(nvlist_exists_binary(nvl, "nvlist/binary/x")); CHECK(!nvlist_exists_binary(nvl, "nvlist/binary/abcdefghijklmnopqrstuvwxyz")); nvlist_add_binary(nvl, "nvlist/binary/abcdefghijklmnopqrstuvwxyz", "abcdefghijklmnopqrstuvwxyz", sizeof("abcdefghijklmnopqrstuvwxyz")); CHECK(nvlist_error(nvl) == 0); CHECK(nvlist_exists_binary(nvl, "nvlist/binary/abcdefghijklmnopqrstuvwxyz")); CHECK(!nvlist_exists_nvlist(nvl, "nvlist/nvlist")); nvlist_add_nvlist(nvl, "nvlist/nvlist", nvl); CHECK(nvlist_error(nvl) == 0); CHECK(nvlist_exists_nvlist(nvl, "nvlist/nvlist")); CHECK(nvlist_exists_null(nvl, "nvlist/null")); CHECK(nvlist_exists_bool(nvl, "nvlist/bool/true")); CHECK(nvlist_exists_bool(nvl, "nvlist/bool/false")); CHECK(nvlist_exists_number(nvl, "nvlist/number/0")); CHECK(nvlist_exists_number(nvl, "nvlist/number/1")); CHECK(nvlist_exists_number(nvl, "nvlist/number/-1")); CHECK(nvlist_exists_number(nvl, "nvlist/number/UINT64_MAX")); CHECK(nvlist_exists_number(nvl, "nvlist/number/INT64_MIN")); CHECK(nvlist_exists_number(nvl, "nvlist/number/INT64_MAX")); CHECK(nvlist_exists_string(nvl, "nvlist/string/")); CHECK(nvlist_exists_string(nvl, "nvlist/string/x")); CHECK(nvlist_exists_string(nvl, "nvlist/string/abcdefghijklmnopqrstuvwxyz")); CHECK(nvlist_exists_string(nvl, "nvlist/stringf/")); CHECK(nvlist_exists_string(nvl, "nvlist/stringf/x")); CHECK(nvlist_exists_string(nvl, "nvlist/stringf/666Xabc")); CHECK(nvlist_exists_descriptor(nvl, "nvlist/descriptor/STDERR_FILENO")); CHECK(nvlist_exists_binary(nvl, "nvlist/binary/x")); CHECK(nvlist_exists_binary(nvl, "nvlist/binary/abcdefghijklmnopqrstuvwxyz")); CHECK(nvlist_exists_nvlist(nvl, "nvlist/nvlist")); cnvl = nvlist_get_nvlist(nvl, "nvlist/nvlist"); CHECK(nvlist_exists_null(cnvl, "nvlist/null")); CHECK(nvlist_exists_bool(cnvl, "nvlist/bool/true")); CHECK(nvlist_exists_bool(cnvl, "nvlist/bool/false")); CHECK(nvlist_exists_number(cnvl, "nvlist/number/0")); CHECK(nvlist_exists_number(cnvl, "nvlist/number/1")); CHECK(nvlist_exists_number(cnvl, "nvlist/number/-1")); CHECK(nvlist_exists_number(cnvl, "nvlist/number/UINT64_MAX")); CHECK(nvlist_exists_number(cnvl, "nvlist/number/INT64_MIN")); CHECK(nvlist_exists_number(cnvl, "nvlist/number/INT64_MAX")); CHECK(nvlist_exists_string(cnvl, "nvlist/string/")); CHECK(nvlist_exists_string(cnvl, "nvlist/string/x")); CHECK(nvlist_exists_string(cnvl, "nvlist/string/abcdefghijklmnopqrstuvwxyz")); CHECK(nvlist_exists_string(cnvl, "nvlist/stringf/")); CHECK(nvlist_exists_string(cnvl, "nvlist/stringf/x")); CHECK(nvlist_exists_string(cnvl, "nvlist/stringf/666Xabc")); CHECK(nvlist_exists_descriptor(cnvl, "nvlist/descriptor/STDERR_FILENO")); CHECK(nvlist_exists_binary(cnvl, "nvlist/binary/x")); CHECK(nvlist_exists_binary(cnvl, "nvlist/binary/abcdefghijklmnopqrstuvwxyz")); nvlist_destroy(nvl); return (0); }
/* ARGSUSED */ static int xattr_fill_nvlist(vnode_t *vp, xattr_view_t xattr_view, nvlist_t *nvlp, cred_t *cr, caller_context_t *ct) { int error; f_attr_t attr; uint64_t fsid; xvattr_t xvattr; xoptattr_t *xoap; /* Pointer to optional attributes */ vnode_t *ppvp; const char *domain; uint32_t rid; xva_init(&xvattr); if ((xoap = xva_getxoptattr(&xvattr)) == NULL) return (EINVAL); /* * For detecting ephemeral uid/gid */ xvattr.xva_vattr.va_mask |= (AT_UID|AT_GID); /* * We need to access the real fs object. * vp points to a GFS file; ppvp points to the real object. */ ppvp = gfs_file_parent(gfs_file_parent(vp)); /* * Iterate through the attrs associated with this view */ for (attr = 0; attr < F_ATTR_ALL; attr++) { if (xattr_view != attr_to_xattr_view(attr)) { continue; } switch (attr) { case F_SYSTEM: XVA_SET_REQ(&xvattr, XAT_SYSTEM); break; case F_READONLY: XVA_SET_REQ(&xvattr, XAT_READONLY); break; case F_HIDDEN: XVA_SET_REQ(&xvattr, XAT_HIDDEN); break; case F_ARCHIVE: XVA_SET_REQ(&xvattr, XAT_ARCHIVE); break; case F_IMMUTABLE: XVA_SET_REQ(&xvattr, XAT_IMMUTABLE); break; case F_APPENDONLY: XVA_SET_REQ(&xvattr, XAT_APPENDONLY); break; case F_NOUNLINK: XVA_SET_REQ(&xvattr, XAT_NOUNLINK); break; case F_OPAQUE: XVA_SET_REQ(&xvattr, XAT_OPAQUE); break; case F_NODUMP: XVA_SET_REQ(&xvattr, XAT_NODUMP); break; case F_AV_QUARANTINED: XVA_SET_REQ(&xvattr, XAT_AV_QUARANTINED); break; case F_AV_MODIFIED: XVA_SET_REQ(&xvattr, XAT_AV_MODIFIED); break; case F_AV_SCANSTAMP: if (ppvp->v_type == VREG) XVA_SET_REQ(&xvattr, XAT_AV_SCANSTAMP); break; case F_CRTIME: XVA_SET_REQ(&xvattr, XAT_CREATETIME); break; case F_FSID: fsid = (((uint64_t)vp->v_vfsp->vfs_fsid.val[0] << 32) | (uint64_t)(vp->v_vfsp->vfs_fsid.val[1] & 0xffffffff)); VERIFY(nvlist_add_uint64(nvlp, attr_to_name(attr), fsid) == 0); break; case F_REPARSE: XVA_SET_REQ(&xvattr, XAT_REPARSE); break; case F_GEN: XVA_SET_REQ(&xvattr, XAT_GEN); break; case F_OFFLINE: XVA_SET_REQ(&xvattr, XAT_OFFLINE); break; case F_SPARSE: XVA_SET_REQ(&xvattr, XAT_SPARSE); break; default: break; } } error = VOP_GETATTR(ppvp, &xvattr.xva_vattr, 0, cr, ct); if (error) return (error); /* * Process all the optional attributes together here. Notice that * xoap was set when the optional attribute bits were set above. */ if ((xvattr.xva_vattr.va_mask & AT_XVATTR) && xoap) { if (XVA_ISSET_RTN(&xvattr, XAT_READONLY)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_READONLY), xoap->xoa_readonly) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_HIDDEN)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_HIDDEN), xoap->xoa_hidden) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_SYSTEM)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_SYSTEM), xoap->xoa_system) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_ARCHIVE)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_ARCHIVE), xoap->xoa_archive) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_IMMUTABLE)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_IMMUTABLE), xoap->xoa_immutable) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_NOUNLINK)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_NOUNLINK), xoap->xoa_nounlink) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_APPENDONLY)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_APPENDONLY), xoap->xoa_appendonly) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_NODUMP)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_NODUMP), xoap->xoa_nodump) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_OPAQUE)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_OPAQUE), xoap->xoa_opaque) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_AV_QUARANTINED)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_AV_QUARANTINED), xoap->xoa_av_quarantined) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_AV_MODIFIED)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_AV_MODIFIED), xoap->xoa_av_modified) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_AV_SCANSTAMP)) { VERIFY(nvlist_add_uint8_array(nvlp, attr_to_name(F_AV_SCANSTAMP), xoap->xoa_av_scanstamp, sizeof (xoap->xoa_av_scanstamp)) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_CREATETIME)) { VERIFY(nvlist_add_uint64_array(nvlp, attr_to_name(F_CRTIME), (uint64_t *)&(xoap->xoa_createtime), sizeof (xoap->xoa_createtime) / sizeof (uint64_t)) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_REPARSE)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_REPARSE), xoap->xoa_reparse) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_GEN)) { VERIFY(nvlist_add_uint64(nvlp, attr_to_name(F_GEN), xoap->xoa_generation) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_OFFLINE)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_OFFLINE), xoap->xoa_offline) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_SPARSE)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_SPARSE), xoap->xoa_sparse) == 0); } } /* * Check for optional ownersid/groupsid */ if (xvattr.xva_vattr.va_uid > MAXUID) { nvlist_t *nvl_sid; if (nvlist_alloc(&nvl_sid, NV_UNIQUE_NAME, KM_SLEEP)) return (ENOMEM); if (kidmap_getsidbyuid(crgetzone(cr), xvattr.xva_vattr.va_uid, &domain, &rid) == 0) { VERIFY(nvlist_add_string(nvl_sid, SID_DOMAIN, domain) == 0); VERIFY(nvlist_add_uint32(nvl_sid, SID_RID, rid) == 0); VERIFY(nvlist_add_nvlist(nvlp, attr_to_name(F_OWNERSID), nvl_sid) == 0); } nvlist_free(nvl_sid); } if (xvattr.xva_vattr.va_gid > MAXUID) { nvlist_t *nvl_sid; if (nvlist_alloc(&nvl_sid, NV_UNIQUE_NAME, KM_SLEEP)) return (ENOMEM); if (kidmap_getsidbygid(crgetzone(cr), xvattr.xva_vattr.va_gid, &domain, &rid) == 0) { VERIFY(nvlist_add_string(nvl_sid, SID_DOMAIN, domain) == 0); VERIFY(nvlist_add_uint32(nvl_sid, SID_RID, rid) == 0); VERIFY(nvlist_add_nvlist(nvlp, attr_to_name(F_GROUPSID), nvl_sid) == 0); } nvlist_free(nvl_sid); } return (0); }
/* * Convert our list of pools into the definitive set of configurations. We * start by picking the best config for each toplevel vdev. Once that's done, * we assemble the toplevel vdevs into a full config for the pool. We make a * pass to fix up any incorrect paths, and then add it to the main list to * return to the user. */ static nvlist_t * get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok, nvlist_t *policy) { pool_entry_t *pe; vdev_entry_t *ve; config_entry_t *ce; nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot; nvlist_t **spares, **l2cache; uint_t i, nspares, nl2cache; boolean_t config_seen; uint64_t best_txg; char *name, *hostname = NULL; uint64_t guid; uint_t children = 0; nvlist_t **child = NULL; uint_t holes; uint64_t *hole_array, max_id; uint_t c; boolean_t isactive; uint64_t hostid; nvlist_t *nvl; boolean_t valid_top_config = B_FALSE; if (nvlist_alloc(&ret, 0, 0) != 0) goto nomem; for (pe = pl->pools; pe != NULL; pe = pe->pe_next) { uint64_t id, max_txg = 0; if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0) goto nomem; config_seen = B_FALSE; /* * Iterate over all toplevel vdevs. Grab the pool configuration * from the first one we find, and then go through the rest and * add them as necessary to the 'vdevs' member of the config. */ for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) { /* * Determine the best configuration for this vdev by * selecting the config with the latest transaction * group. */ best_txg = 0; for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) { if (ce->ce_txg > best_txg) { tmp = ce->ce_config; best_txg = ce->ce_txg; } } /* * We rely on the fact that the max txg for the * pool will contain the most up-to-date information * about the valid top-levels in the vdev namespace. */ if (best_txg > max_txg) { (void) nvlist_remove(config, ZPOOL_CONFIG_VDEV_CHILDREN, DATA_TYPE_UINT64); (void) nvlist_remove(config, ZPOOL_CONFIG_HOLE_ARRAY, DATA_TYPE_UINT64_ARRAY); max_txg = best_txg; hole_array = NULL; holes = 0; max_id = 0; valid_top_config = B_FALSE; if (nvlist_lookup_uint64(tmp, ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) { verify(nvlist_add_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN, max_id) == 0); valid_top_config = B_TRUE; } if (nvlist_lookup_uint64_array(tmp, ZPOOL_CONFIG_HOLE_ARRAY, &hole_array, &holes) == 0) { verify(nvlist_add_uint64_array(config, ZPOOL_CONFIG_HOLE_ARRAY, hole_array, holes) == 0); } } if (!config_seen) { /* * Copy the relevant pieces of data to the pool * configuration: * * version * pool guid * name * pool txg (if available) * comment (if available) * pool state * hostid (if available) * hostname (if available) */ uint64_t state, version, pool_txg; char *comment = NULL; version = fnvlist_lookup_uint64(tmp, ZPOOL_CONFIG_VERSION); fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, version); guid = fnvlist_lookup_uint64(tmp, ZPOOL_CONFIG_POOL_GUID); fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, guid); name = fnvlist_lookup_string(tmp, ZPOOL_CONFIG_POOL_NAME); fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, name); if (nvlist_lookup_uint64(tmp, ZPOOL_CONFIG_POOL_TXG, &pool_txg) == 0) fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, pool_txg); if (nvlist_lookup_string(tmp, ZPOOL_CONFIG_COMMENT, &comment) == 0) fnvlist_add_string(config, ZPOOL_CONFIG_COMMENT, comment); state = fnvlist_lookup_uint64(tmp, ZPOOL_CONFIG_POOL_STATE); fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, state); hostid = 0; if (nvlist_lookup_uint64(tmp, ZPOOL_CONFIG_HOSTID, &hostid) == 0) { fnvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, hostid); hostname = fnvlist_lookup_string(tmp, ZPOOL_CONFIG_HOSTNAME); fnvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, hostname); } config_seen = B_TRUE; } /* * Add this top-level vdev to the child array. */ verify(nvlist_lookup_nvlist(tmp, ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0); verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID, &id) == 0); if (id >= children) { nvlist_t **newchild; newchild = zfs_alloc(hdl, (id + 1) * sizeof (nvlist_t *)); if (newchild == NULL) goto nomem; for (c = 0; c < children; c++) newchild[c] = child[c]; free(child); child = newchild; children = id + 1; } if (nvlist_dup(nvtop, &child[id], 0) != 0) goto nomem; } /* * If we have information about all the top-levels then * clean up the nvlist which we've constructed. This * means removing any extraneous devices that are * beyond the valid range or adding devices to the end * of our array which appear to be missing. */ if (valid_top_config) { if (max_id < children) { for (c = max_id; c < children; c++) nvlist_free(child[c]); children = max_id; } else if (max_id > children) { nvlist_t **newchild; newchild = zfs_alloc(hdl, (max_id) * sizeof (nvlist_t *)); if (newchild == NULL) goto nomem; for (c = 0; c < children; c++) newchild[c] = child[c]; free(child); child = newchild; children = max_id; } } verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &guid) == 0); /* * The vdev namespace may contain holes as a result of * device removal. We must add them back into the vdev * tree before we process any missing devices. */ if (holes > 0) { ASSERT(valid_top_config); for (c = 0; c < children; c++) { nvlist_t *holey; if (child[c] != NULL || !vdev_is_hole(hole_array, holes, c)) continue; if (nvlist_alloc(&holey, NV_UNIQUE_NAME, 0) != 0) goto nomem; /* * Holes in the namespace are treated as * "hole" top-level vdevs and have a * special flag set on them. */ if (nvlist_add_string(holey, ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) != 0 || nvlist_add_uint64(holey, ZPOOL_CONFIG_ID, c) != 0 || nvlist_add_uint64(holey, ZPOOL_CONFIG_GUID, 0ULL) != 0) { nvlist_free(holey); goto nomem; } child[c] = holey; } } /* * Look for any missing top-level vdevs. If this is the case, * create a faked up 'missing' vdev as a placeholder. We cannot * simply compress the child array, because the kernel performs * certain checks to make sure the vdev IDs match their location * in the configuration. */ for (c = 0; c < children; c++) { if (child[c] == NULL) { nvlist_t *missing; if (nvlist_alloc(&missing, NV_UNIQUE_NAME, 0) != 0) goto nomem; if (nvlist_add_string(missing, ZPOOL_CONFIG_TYPE, VDEV_TYPE_MISSING) != 0 || nvlist_add_uint64(missing, ZPOOL_CONFIG_ID, c) != 0 || nvlist_add_uint64(missing, ZPOOL_CONFIG_GUID, 0ULL) != 0) { nvlist_free(missing); goto nomem; } child[c] = missing; } } /* * Put all of this pool's top-level vdevs into a root vdev. */ if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) goto nomem; if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 || nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 || nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 || nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, child, children) != 0) { nvlist_free(nvroot); goto nomem; } for (c = 0; c < children; c++) nvlist_free(child[c]); free(child); children = 0; child = NULL; /* * Go through and fix up any paths and/or devids based on our * known list of vdev GUID -> path mappings. */ if (fix_paths(nvroot, pl->names) != 0) { nvlist_free(nvroot); goto nomem; } /* * Add the root vdev to this pool's configuration. */ if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) != 0) { nvlist_free(nvroot); goto nomem; } nvlist_free(nvroot); /* * zdb uses this path to report on active pools that were * imported or created using -R. */ if (active_ok) goto add_pool; /* * Determine if this pool is currently active, in which case we * can't actually import it. */ verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, &name) == 0); verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &guid) == 0); if (pool_active(hdl, name, guid, &isactive) != 0) goto error; if (isactive) { nvlist_free(config); config = NULL; continue; } if (policy != NULL) { if (nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY, policy) != 0) goto nomem; } if ((nvl = refresh_config(hdl, config)) == NULL) { nvlist_free(config); config = NULL; continue; } nvlist_free(config); config = nvl; /* * Go through and update the paths for spares, now that we have * them. */ verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { for (i = 0; i < nspares; i++) { if (fix_paths(spares[i], pl->names) != 0) goto nomem; } } /* * Update the paths for l2cache devices. */ if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0) { for (i = 0; i < nl2cache; i++) { if (fix_paths(l2cache[i], pl->names) != 0) goto nomem; } } /* * Restore the original information read from the actual label. */ (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID, DATA_TYPE_UINT64); (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME, DATA_TYPE_STRING); if (hostid != 0) { verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, hostid) == 0); verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, hostname) == 0); } add_pool: /* * Add this pool to the list of configs. */ verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, &name) == 0); if (nvlist_add_nvlist(ret, name, config) != 0) goto nomem; nvlist_free(config); config = NULL; } return (ret); nomem: (void) no_memory(hdl); error: nvlist_free(config); nvlist_free(ret); for (c = 0; c < children; c++) nvlist_free(child[c]); free(child); return (NULL); }
/* Topo Methods */ static int mem_asru_compute(topo_mod_t *mod, tnode_t *node, topo_version_t version, nvlist_t *in, nvlist_t **out) { nvlist_t *asru, *pargs, *args, *hcsp; int err; char *serial = NULL, *label = NULL; uint64_t pa, offset; if (version > TOPO_METH_ASRU_COMPUTE_VERSION) return (topo_mod_seterrno(mod, EMOD_VER_NEW)); if (strcmp(topo_node_name(node), DIMM) != 0) return (topo_mod_seterrno(mod, EMOD_METHOD_INVAL)); pargs = NULL; if (nvlist_lookup_nvlist(in, TOPO_PROP_PARGS, &pargs) == 0) (void) nvlist_lookup_string(pargs, FM_FMRI_HC_SERIAL_ID, &serial); if (serial == NULL && nvlist_lookup_nvlist(in, TOPO_PROP_ARGS, &args) == 0) (void) nvlist_lookup_string(args, FM_FMRI_HC_SERIAL_ID, &serial); (void) topo_node_label(node, &label, &err); asru = mem_fmri_create(mod, serial, label); if (label != NULL) topo_mod_strfree(mod, label); if (asru == NULL) return (topo_mod_seterrno(mod, EMOD_NOMEM)); err = 0; /* * For a memory page, 'in' includes an hc-specific member which * specifies physaddr and/or offset. Set them in asru as well. */ if (pargs && nvlist_lookup_nvlist(pargs, FM_FMRI_HC_SPECIFIC, &hcsp) == 0) { if (nvlist_lookup_uint64(hcsp, FM_FMRI_HC_SPECIFIC_PHYSADDR, &pa) == 0) err += nvlist_add_uint64(asru, FM_FMRI_MEM_PHYSADDR, pa); if (nvlist_lookup_uint64(hcsp, FM_FMRI_HC_SPECIFIC_OFFSET, &offset) == 0) err += nvlist_add_uint64(asru, FM_FMRI_MEM_OFFSET, offset); } if (err != 0 || topo_mod_nvalloc(mod, out, NV_UNIQUE_NAME) < 0) { nvlist_free(asru); return (topo_mod_seterrno(mod, EMOD_NOMEM)); } err = nvlist_add_string(*out, TOPO_PROP_VAL_NAME, TOPO_PROP_ASRU); err |= nvlist_add_uint32(*out, TOPO_PROP_VAL_TYPE, TOPO_TYPE_FMRI); err |= nvlist_add_nvlist(*out, TOPO_PROP_VAL_VAL, asru); nvlist_free(asru); if (err != 0) { nvlist_free(*out); *out = NULL; return (topo_mod_seterrno(mod, EMOD_NVL_INVAL)); } return (0); }
/* * Convert a list of nvp_list_t's to a single nvlist * Used when writing the nvlist file. */ static int sdev_nvp2nvl(nvfd_t *nvfd, nvlist_t **ret_nvl) { nvlist_t *nvl, *sub_nvl; nvp_devname_t *np; int rval; ASSERT(modrootloaded); rval = nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP); if (rval != 0) { KFIOERR((CE_CONT, "%s: nvlist alloc error %d\n", nvfd->nvf_name, rval)); return (DDI_FAILURE); } if ((np = NVF_DEVNAME_LIST(nvfd)) != NULL) { ASSERT(NVP_DEVNAME_NEXT(np) == NULL); rval = nvlist_alloc(&sub_nvl, NV_UNIQUE_NAME, KM_SLEEP); if (rval != 0) { KFIOERR((CE_CONT, "%s: nvlist alloc error %d\n", nvfd->nvf_name, rval)); sub_nvl = NULL; goto err; } rval = nvlist_add_string_array(sub_nvl, DP_DEVNAME_NCACHE_ID, np->nvp_paths, np->nvp_npaths); if (rval != 0) { KFIOERR((CE_CONT, "%s: nvlist add error %d (sdev)\n", nvfd->nvf_name, rval)); goto err; } rval = nvlist_add_int32_array(sub_nvl, DP_DEVNAME_NC_EXPIRECNT_ID, np->nvp_expirecnts, np->nvp_npaths); if (rval != 0) { KFIOERR((CE_CONT, "%s: nvlist add error %d (sdev)\n", nvfd->nvf_name, rval)); goto err; } rval = nvlist_add_nvlist(nvl, DP_DEVNAME_ID, sub_nvl); if (rval != 0) { KFIOERR((CE_CONT, "%s: nvlist add error %d (sublist)\n", nvfd->nvf_name, rval)); goto err; } nvlist_free(sub_nvl); } *ret_nvl = nvl; return (DDI_SUCCESS); err: if (sub_nvl) nvlist_free(sub_nvl); nvlist_free(nvl); *ret_nvl = NULL; return (DDI_FAILURE); }
int disk_status_check(disk_status_t *dsp) { int err; /* * Scan (or rescan) the current device. */ nvlist_free(dsp->ds_scsi_overtemp); nvlist_free(dsp->ds_scsi_predfail); nvlist_free(dsp->ds_scsi_testfail); nvlist_free(dsp->ds_ata_overtemp); nvlist_free(dsp->ds_ata_error); nvlist_free(dsp->ds_ata_testfail); nvlist_free(dsp->ds_ata_sataphyevt); dsp->ds_scsi_testfail = dsp->ds_scsi_overtemp = dsp->ds_scsi_predfail = NULL; dsp->ds_ata_testfail = dsp->ds_ata_overtemp = dsp->ds_ata_error = dsp->ds_ata_sataphyevt = NULL; /* * Even if there is an I/O failure when trying to scan the device, we * can still return the current state. */ if (dsp->ds_transport->dt_scan(dsp->ds_data) != 0 && dsp->ds_error != EDS_IO) { nvlist_free(dsp->ds_scsi_overtemp); nvlist_free(dsp->ds_scsi_predfail); nvlist_free(dsp->ds_scsi_testfail); nvlist_free(dsp->ds_ata_overtemp); nvlist_free(dsp->ds_ata_error); nvlist_free(dsp->ds_ata_testfail); nvlist_free(dsp->ds_ata_sataphyevt); dsp->ds_scsi_testfail = dsp->ds_scsi_overtemp = dsp->ds_scsi_predfail = NULL; dsp->ds_ata_testfail = dsp->ds_ata_overtemp = dsp->ds_ata_error = dsp->ds_ata_sataphyevt = NULL; return (-1); } /* * Construct the list of faults. */ #if 0 if (dsp->ds_scsi_predfail != NULL) { if ((err = nvlist_add_boolean_value(faults, FM_EREPORT_SCSI_PREDFAIL, (dsp->ds_faults & DS_FAULT_PREDFAIL) != 0)) != 0 || (err = nvlist_add_nvlist(nvl, FM_EREPORT_SCSI_PREDFAIL, dsp->ds_scsi_predfail)) != 0) goto nverror; } if (dsp->ds_scsi_testfail != NULL) { if ((err = nvlist_add_boolean_value(faults, FM_EREPORT_SCSI_TESTFAIL, (dsp->ds_faults & DS_FAULT_TESTFAIL) != 0)) != 0 || (err = nvlist_add_nvlist(nvl, FM_EREPORT_SCSI_TESTFAIL, dsp->ds_scsi_testfail)) != 0) goto nverror; } if (dsp->ds_scsi_overtemp != NULL) { if ((err = nvlist_add_boolean_value(faults, FM_EREPORT_SCSI_OVERTEMP, (dsp->ds_faults & DS_FAULT_OVERTEMP) != 0)) != 0 || (err = nvlist_add_nvlist(nvl, FM_EREPORT_SCSI_OVERTEMP, dsp->ds_scsi_overtemp)) != 0) goto nverror; } #endif if (dsp->ds_ata_error != NULL) { nvlist_add_nvlist(dsp->faults, dsp->ds_ata_error); } if (dsp->ds_ata_testfail != NULL) { nvlist_add_nvlist(dsp->faults, dsp->ds_ata_testfail); } if (dsp->ds_ata_overtemp != NULL) { nvlist_add_nvlist(dsp->faults, dsp->ds_ata_overtemp); } if (dsp->ds_ata_sataphyevt != NULL) { nvlist_add_nvlist(dsp->faults, dsp->ds_ata_sataphyevt); } return 0; }
static void set_prop(topo_hdl_t *thp, tnode_t *node, nvlist_t *fmri, struct prop_args *pp) { int ret, err = 0; topo_type_t type; nvlist_t *nvl, *f = NULL; char *end; if (pp->prop == NULL || pp->type == NULL || pp->value == NULL) return; if ((type = str2type(pp->type)) == TOPO_TYPE_INVALID) { (void) fprintf(stderr, "%s: invalid property type %s for %s\n", g_pname, pp->type, pp->prop); return; } if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) { (void) fprintf(stderr, "%s: nvlist allocation failed for " "%s=%s:%s\n", g_pname, pp->prop, pp->type, pp->value); return; } ret = nvlist_add_string(nvl, TOPO_PROP_VAL_NAME, pp->prop); ret |= nvlist_add_uint32(nvl, TOPO_PROP_VAL_TYPE, type); if (ret != 0) { (void) fprintf(stderr, "%s: invalid property type %s for %s\n", g_pname, pp->type, pp->prop); nvlist_free(nvl); return; } errno = 0; switch (type) { case TOPO_TYPE_INT32: { int32_t val; val = strtol(pp->value, &end, 0); if (errno == ERANGE) { ret = -1; break; } ret = nvlist_add_int32(nvl, TOPO_PROP_VAL_VAL, val); break; } case TOPO_TYPE_UINT32: { uint32_t val; val = strtoul(pp->value, &end, 0); if (errno == ERANGE) { ret = -1; break; } ret = nvlist_add_uint32(nvl, TOPO_PROP_VAL_VAL, val); break; } case TOPO_TYPE_INT64: { int64_t val; val = strtoll(pp->value, &end, 0); if (errno == ERANGE) { ret = -1; break; } ret = nvlist_add_int64(nvl, TOPO_PROP_VAL_VAL, val); break; } case TOPO_TYPE_UINT64: { uint64_t val; val = strtoull(pp->value, &end, 0); if (errno == ERANGE) { ret = -1; break; } ret = nvlist_add_uint64(nvl, TOPO_PROP_VAL_VAL, val); break; } case TOPO_TYPE_STRING: { ret = nvlist_add_string(nvl, TOPO_PROP_VAL_VAL, pp->value); break; } case TOPO_TYPE_FMRI: { if ((ret = topo_fmri_str2nvl(thp, pp->value, &f, &err)) < 0) break; if ((ret = nvlist_add_nvlist(nvl, TOPO_PROP_VAL_VAL, f)) != 0) err = ETOPO_PROP_NVL; break; } default: ret = -1; } if (ret != 0) { (void) fprintf(stderr, "%s: unable to set property value for " "%s: %s\n", g_pname, pp->prop, topo_strerror(err)); nvlist_free(nvl); return; } if (node != NULL) { if (topo_prop_setprop(node, pp->group, nvl, TOPO_PROP_MUTABLE, f, &ret) < 0) { (void) fprintf(stderr, "%s: unable to set property " "value for " "%s=%s:%s: %s\n", g_pname, pp->prop, pp->type, pp->value, topo_strerror(ret)); nvlist_free(nvl); nvlist_free(f); return; } } else { if (topo_fmri_setprop(thp, fmri, pp->group, nvl, TOPO_PROP_MUTABLE, f, &ret) < 0) { (void) fprintf(stderr, "%s: unable to set property " "value for " "%s=%s:%s: %s\n", g_pname, pp->prop, pp->type, pp->value, topo_strerror(ret)); nvlist_free(nvl); nvlist_free(f); return; } } nvlist_free(nvl); /* * Now, get the property back for printing */ if (node != NULL) { if (topo_prop_getprop(node, pp->group, pp->prop, f, &nvl, &err) < 0) { (void) fprintf(stderr, "%s: failed to get %s.%s: %s\n", g_pname, pp->group, pp->prop, topo_strerror(err)); nvlist_free(f); return; } } else { if (topo_fmri_getprop(thp, fmri, pp->group, pp->prop, f, &nvl, &err) < 0) { (void) fprintf(stderr, "%s: failed to get %s.%s: %s\n", g_pname, pp->group, pp->prop, topo_strerror(err)); nvlist_free(f); return; } } print_pgroup(thp, node, pp->group, NULL, NULL, 0); print_prop_nameval(thp, node, nvl); nvlist_free(nvl); nvlist_free(f); }
/*ARGSUSED*/ int topo_method_sensor_failure(topo_mod_t *mod, tnode_t *node, topo_version_t version, nvlist_t *in, nvlist_t **out) { const char *name = topo_node_name(node); topo_faclist_t faclist, *fp; int err; nvlist_t *nvl, *props, *propval, *tmp; int ret = -1; uint32_t type, state, units; nvpair_t *elem; double reading; char *propname; boolean_t has_reading; struct sensor_errinfo seinfo; if (strcmp(name, PSU) != 0 && strcmp(name, FAN) != 0) return (topo_mod_seterrno(mod, ETOPO_METHOD_NOTSUP)); if (topo_node_facility(mod->tm_hdl, node, TOPO_FAC_TYPE_SENSOR, TOPO_FAC_TYPE_ANY, &faclist, &err) != 0) return (topo_mod_seterrno(mod, ETOPO_METHOD_NOTSUP)); if (topo_mod_nvalloc(mod, &nvl, NV_UNIQUE_NAME) != 0) goto error; for (fp = topo_list_next(&faclist.tf_list); fp != NULL; fp = topo_list_next(fp)) { if (topo_prop_getpgrp(fp->tf_node, TOPO_PGROUP_FACILITY, &props, &err) != 0) { nvlist_free(nvl); goto error; } type = state = units = 0; reading = 0; has_reading = B_FALSE; elem = NULL; while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { if (strcmp(nvpair_name(elem), TOPO_PROP_VAL) != 0 || nvpair_type(elem) != DATA_TYPE_NVLIST) continue; (void) nvpair_value_nvlist(elem, &propval); if (nvlist_lookup_string(propval, TOPO_PROP_VAL_NAME, &propname) != 0) continue; if (strcmp(propname, TOPO_FACILITY_TYPE) == 0) { (void) nvlist_lookup_uint32(propval, TOPO_PROP_VAL_VAL, &type); } else if (strcmp(propname, TOPO_SENSOR_STATE) == 0) { (void) nvlist_lookup_uint32(propval, TOPO_PROP_VAL_VAL, &state); } else if (strcmp(propname, TOPO_SENSOR_UNITS) == 0) { (void) nvlist_lookup_uint32(propval, TOPO_PROP_VAL_VAL, &units); } else if (strcmp(propname, TOPO_SENSOR_READING) == 0) { has_reading = B_TRUE; (void) nvlist_lookup_double(propval, TOPO_PROP_VAL_VAL, &reading); } } if (topo_sensor_failed(type, state, &seinfo)) { tmp = NULL; if (topo_mod_nvalloc(mod, &tmp, NV_UNIQUE_NAME) != 0 || nvlist_add_uint32(tmp, TOPO_FACILITY_TYPE, type) != 0 || nvlist_add_uint32(tmp, TOPO_SENSOR_STATE, state) != 0 || nvlist_add_uint32(tmp, TOPO_SENSOR_UNITS, units) != 0 || nvlist_add_boolean_value(tmp, "nonrecov", seinfo.se_nonrecov) != 0 || nvlist_add_boolean_value(tmp, "predictive", seinfo.se_predictive) != 0 || nvlist_add_uint32(tmp, "source", seinfo.se_src) != 0 || (has_reading && nvlist_add_double(tmp, TOPO_SENSOR_READING, reading) != 0) || nvlist_add_nvlist(nvl, topo_node_name(fp->tf_node), tmp) != 0) { nvlist_free(props); nvlist_free(tmp); nvlist_free(nvl); ret = topo_mod_seterrno(mod, ETOPO_METHOD_NOMEM); goto error; } nvlist_free(tmp); } nvlist_free(props); } *out = nvl; ret = 0; error: while ((fp = topo_list_next(&faclist.tf_list)) != NULL) { topo_list_delete(&faclist.tf_list, fp); topo_mod_free(mod, fp, sizeof (topo_faclist_t)); } return (ret); }
/* * Given a cache file, return the contents as a list of importable pools. * poolname or guid (but not both) are provided by the caller when trying * to import a specific pool. */ nvlist_t * zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile, char *poolname, uint64_t guid) { char *buf; int fd; struct stat statbuf; nvlist_t *raw, *src, *dst; nvlist_t *pools; nvpair_t *elem; char *name; uint64_t this_guid; boolean_t active; verify(poolname == NULL || guid == 0); if ((fd = open(cachefile, O_RDONLY)) < 0) { zfs_error_aux(hdl, "%s", strerror(errno)); (void) zfs_error(hdl, EZFS_BADCACHE, dgettext(TEXT_DOMAIN, "failed to open cache file")); return (NULL); } if (fstat(fd, &statbuf) != 0) { zfs_error_aux(hdl, "%s", strerror(errno)); (void) close(fd); (void) zfs_error(hdl, EZFS_BADCACHE, dgettext(TEXT_DOMAIN, "failed to get size of cache file")); return (NULL); } if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) { (void) close(fd); return (NULL); } if (read(fd, buf, statbuf.st_size) != statbuf.st_size) { (void) close(fd); free(buf); (void) zfs_error(hdl, EZFS_BADCACHE, dgettext(TEXT_DOMAIN, "failed to read cache file contents")); return (NULL); } (void) close(fd); if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) { free(buf); (void) zfs_error(hdl, EZFS_BADCACHE, dgettext(TEXT_DOMAIN, "invalid or corrupt cache file contents")); return (NULL); } free(buf); /* * Go through and get the current state of the pools and refresh their * state. */ if (nvlist_alloc(&pools, 0, 0) != 0) { (void) no_memory(hdl); nvlist_free(raw); return (NULL); } elem = NULL; while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) { src = fnvpair_value_nvlist(elem); name = fnvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME); if (poolname != NULL && strcmp(poolname, name) != 0) continue; this_guid = fnvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID); if (guid != 0 && guid != this_guid) continue; if (pool_active(hdl, name, this_guid, &active) != 0) { nvlist_free(raw); nvlist_free(pools); return (NULL); } if (active) continue; if (nvlist_add_string(src, ZPOOL_CONFIG_CACHEFILE, cachefile) != 0) { (void) no_memory(hdl); nvlist_free(raw); nvlist_free(pools); return (NULL); } if ((dst = refresh_config(hdl, src)) == NULL) { nvlist_free(raw); nvlist_free(pools); return (NULL); } if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) { (void) no_memory(hdl); nvlist_free(dst); nvlist_free(raw); nvlist_free(pools); return (NULL); } nvlist_free(dst); } nvlist_free(raw); return (pools); }
static int zpool_import_by_guid(uint64_t searchguid) { int err = 0; nvlist_t *pools = NULL; nvpair_t *elem; nvlist_t *config; nvlist_t *found_config = NULL; nvlist_t *policy = NULL; boolean_t first; int flags = ZFS_IMPORT_NORMAL; uint32_t rewind_policy = ZPOOL_NO_REWIND; uint64_t pool_state, txg = -1ULL; importargs_t idata = { 0 }; #ifdef ZFS_AUTOIMPORT_ZPOOL_STATUS_OK_ONLY char *msgid; zpool_status_t reason; zpool_errata_t errata; #endif if ((g_zfs = libzfs_init()) == NULL) return (1); idata.unique = B_TRUE; /* In the future, we can capture further policy and include it here */ if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 || nvlist_add_uint64(policy, ZPOOL_REWIND_REQUEST_TXG, txg) != 0 || nvlist_add_uint32(policy, ZPOOL_REWIND_REQUEST, rewind_policy) != 0) goto error; if (!priv_ineffect(PRIV_SYS_CONFIG)) { printf("cannot discover pools: permission denied\n"); nvlist_free(policy); return (1); } idata.guid = searchguid; pools = zpool_search_import(g_zfs, &idata); if (pools == NULL && idata.exists) { printf("cannot import '%llu': a pool with that guid is already " "created/imported\n", searchguid); err = 1; } else if (pools == NULL) { printf("cannot import '%llu': no such pool available\n", searchguid); err = 1; } if (err == 1) { nvlist_free(policy); return (1); } /* * At this point we have a list of import candidate configs. Even though * we were searching by guid, we still need to post-process the list to * deal with pool state. */ err = 0; elem = NULL; first = B_TRUE; while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) { verify(nvpair_value_nvlist(elem, &config) == 0); verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &pool_state) == 0); if (pool_state == POOL_STATE_DESTROYED) continue; verify(nvlist_add_nvlist(config, ZPOOL_REWIND_POLICY, policy) == 0); uint64_t guid; /* * Search for a pool by guid. */ verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &guid) == 0); if (guid == searchguid) found_config = config; } /* * If we were searching for a specific pool, verify that we found a * pool, and then do the import. */ if (err == 0) { if (found_config == NULL) { printf("cannot import '%llu': no such pool available\n", searchguid); err = B_TRUE; } else { #ifdef ZFS_AUTOIMPORT_ZPOOL_STATUS_OK_ONLY reason = zpool_import_status(config, &msgid, &errata); if (reason == ZPOOL_STATUS_OK) err |= do_import(found_config, NULL, NULL, NULL, flags); else err = 1; #else err |= do_import(found_config, NULL, NULL, NULL, flags); #endif } } error: nvlist_free(pools); nvlist_free(policy); libzfs_fini(g_zfs); return (err ? 1 : 0); }
/* * Synchronize all pools to disk. This must be called with the namespace lock * held. */ void spa_config_sync(void) { spa_t *spa = NULL; nvlist_t *config; size_t buflen; char *buf; vnode_t *vp; int oflags = FWRITE | FTRUNC | FCREAT | FOFFMAX; char pathname[128]; char pathname2[128]; ASSERT(MUTEX_HELD(&spa_namespace_lock)); VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, KM_SLEEP) == 0); /* * Add all known pools to the configuration list, ignoring those with * alternate root paths. */ spa = NULL; while ((spa = spa_next(spa)) != NULL) { mutex_enter(&spa->spa_config_cache_lock); if (spa->spa_config && spa->spa_name && spa->spa_root == NULL) VERIFY(nvlist_add_nvlist(config, spa->spa_name, spa->spa_config) == 0); mutex_exit(&spa->spa_config_cache_lock); } /* * Pack the configuration into a buffer. */ VERIFY(nvlist_size(config, &buflen, NV_ENCODE_XDR) == 0); buf = kmem_alloc(buflen, KM_SLEEP); VERIFY(nvlist_pack(config, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP) == 0); /* * Write the configuration to disk. We need to do the traditional * 'write to temporary file, sync, move over original' to make sure we * always have a consistent view of the data. */ (void) snprintf(pathname, sizeof (pathname), "%s/%s", spa_config_dir, ZPOOL_CACHE_TMP); if (vn_open(pathname, UIO_SYSSPACE, oflags, 0644, &vp, CRCREAT, 0) != 0) goto out; if (vn_rdwr(UIO_WRITE, vp, buf, buflen, 0, UIO_SYSSPACE, 0, RLIM64_INFINITY, kcred, NULL) == 0 && VOP_FSYNC(vp, FSYNC, kcred) == 0) { (void) snprintf(pathname2, sizeof (pathname2), "%s/%s", spa_config_dir, ZPOOL_CACHE_FILE); (void) vn_rename(pathname, pathname2, UIO_SYSSPACE); } (void) VOP_CLOSE(vp, oflags, 1, 0, kcred); VN_RELE(vp); out: (void) vn_remove(pathname, UIO_SYSSPACE, RMFILE); spa_config_generation++; kmem_free(buf, buflen); nvlist_free(config); }
/* * Synchronize pool configuration to disk. This must be called with the * namespace lock held. */ void spa_config_sync(spa_t *target, boolean_t removing, boolean_t postsysevent) { spa_config_dirent_t *dp, *tdp; nvlist_t *nvl; ASSERT(MUTEX_HELD(&spa_namespace_lock)); if (rootdir == NULL || !(spa_mode_global & FWRITE)) return; /* * Iterate over all cachefiles for the pool, past or present. When the * cachefile is changed, the new one is pushed onto this list, allowing * us to update previous cachefiles that no longer contain this pool. */ for (dp = list_head(&target->spa_config_list); dp != NULL; dp = list_next(&target->spa_config_list, dp)) { spa_t *spa = NULL; if (dp->scd_path == NULL) continue; /* * Iterate over all pools, adding any matching pools to 'nvl'. */ nvl = NULL; while ((spa = spa_next(spa)) != NULL) { if (spa == target && removing) continue; mutex_enter(&spa->spa_props_lock); tdp = list_head(&spa->spa_config_list); if (spa->spa_config == NULL || tdp->scd_path == NULL || strcmp(tdp->scd_path, dp->scd_path) != 0) { mutex_exit(&spa->spa_props_lock); continue; } if (nvl == NULL) VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0); VERIFY(nvlist_add_nvlist(nvl, spa->spa_name, spa->spa_config) == 0); mutex_exit(&spa->spa_props_lock); } spa_config_write(dp, nvl); nvlist_free(nvl); } /* * Remove any config entries older than the current one. */ dp = list_head(&target->spa_config_list); while ((tdp = list_next(&target->spa_config_list, dp)) != NULL) { list_remove(&target->spa_config_list, tdp); if (tdp->scd_path != NULL) spa_strfree(tdp->scd_path); kmem_free(tdp, sizeof (spa_config_dirent_t)); } spa_config_generation++; if (postsysevent) spa_event_notify(target, NULL, FM_EREPORT_ZFS_CONFIG_SYNC); }
int main(void) { nvlist_t *nvl; printf("1..232\n"); nvl = nvlist_create(0); CHECK(!nvlist_exists(nvl, "nvlist/null")); CHECK(!nvlist_exists_null(nvl, "nvlist/null")); CHECK(!nvlist_exists_bool(nvl, "nvlist/null")); CHECK(!nvlist_exists_number(nvl, "nvlist/null")); CHECK(!nvlist_exists_string(nvl, "nvlist/null")); CHECK(!nvlist_exists_nvlist(nvl, "nvlist/null")); CHECK(!nvlist_exists_descriptor(nvl, "nvlist/null")); CHECK(!nvlist_exists_binary(nvl, "nvlist/null")); nvlist_add_null(nvl, "nvlist/null"); CHECK(nvlist_error(nvl) == 0); CHECK(nvlist_exists(nvl, "nvlist/null")); CHECK(nvlist_exists_null(nvl, "nvlist/null")); CHECK(!nvlist_exists_bool(nvl, "nvlist/null")); CHECK(!nvlist_exists_number(nvl, "nvlist/null")); CHECK(!nvlist_exists_string(nvl, "nvlist/null")); CHECK(!nvlist_exists_nvlist(nvl, "nvlist/null")); CHECK(!nvlist_exists_descriptor(nvl, "nvlist/null")); CHECK(!nvlist_exists_binary(nvl, "nvlist/null")); CHECK(!nvlist_exists(nvl, "nvlist/bool")); CHECK(!nvlist_exists_null(nvl, "nvlist/bool")); CHECK(!nvlist_exists_bool(nvl, "nvlist/bool")); CHECK(!nvlist_exists_number(nvl, "nvlist/bool")); CHECK(!nvlist_exists_string(nvl, "nvlist/bool")); CHECK(!nvlist_exists_nvlist(nvl, "nvlist/bool")); CHECK(!nvlist_exists_descriptor(nvl, "nvlist/bool")); CHECK(!nvlist_exists_binary(nvl, "nvlist/bool")); nvlist_add_bool(nvl, "nvlist/bool", true); CHECK(nvlist_error(nvl) == 0); CHECK(nvlist_exists(nvl, "nvlist/bool")); CHECK(!nvlist_exists_null(nvl, "nvlist/bool")); CHECK(nvlist_exists_bool(nvl, "nvlist/bool")); CHECK(!nvlist_exists_number(nvl, "nvlist/bool")); CHECK(!nvlist_exists_string(nvl, "nvlist/bool")); CHECK(!nvlist_exists_nvlist(nvl, "nvlist/bool")); CHECK(!nvlist_exists_descriptor(nvl, "nvlist/bool")); CHECK(!nvlist_exists_binary(nvl, "nvlist/bool")); CHECK(!nvlist_exists(nvl, "nvlist/number")); CHECK(!nvlist_exists_null(nvl, "nvlist/number")); CHECK(!nvlist_exists_bool(nvl, "nvlist/number")); CHECK(!nvlist_exists_number(nvl, "nvlist/number")); CHECK(!nvlist_exists_string(nvl, "nvlist/number")); CHECK(!nvlist_exists_nvlist(nvl, "nvlist/number")); CHECK(!nvlist_exists_descriptor(nvl, "nvlist/number")); CHECK(!nvlist_exists_binary(nvl, "nvlist/number")); nvlist_add_number(nvl, "nvlist/number", 0); CHECK(nvlist_error(nvl) == 0); CHECK(nvlist_exists(nvl, "nvlist/number")); CHECK(!nvlist_exists_null(nvl, "nvlist/number")); CHECK(!nvlist_exists_bool(nvl, "nvlist/number")); CHECK(nvlist_exists_number(nvl, "nvlist/number")); CHECK(!nvlist_exists_string(nvl, "nvlist/number")); CHECK(!nvlist_exists_nvlist(nvl, "nvlist/number")); CHECK(!nvlist_exists_descriptor(nvl, "nvlist/number")); CHECK(!nvlist_exists_binary(nvl, "nvlist/number")); CHECK(!nvlist_exists(nvl, "nvlist/string")); CHECK(!nvlist_exists_null(nvl, "nvlist/string")); CHECK(!nvlist_exists_bool(nvl, "nvlist/string")); CHECK(!nvlist_exists_number(nvl, "nvlist/string")); CHECK(!nvlist_exists_string(nvl, "nvlist/string")); CHECK(!nvlist_exists_nvlist(nvl, "nvlist/string")); CHECK(!nvlist_exists_descriptor(nvl, "nvlist/string")); CHECK(!nvlist_exists_binary(nvl, "nvlist/string")); nvlist_add_string(nvl, "nvlist/string", "test"); CHECK(nvlist_error(nvl) == 0); CHECK(nvlist_exists(nvl, "nvlist/string")); CHECK(!nvlist_exists_null(nvl, "nvlist/string")); CHECK(!nvlist_exists_bool(nvl, "nvlist/string")); CHECK(!nvlist_exists_number(nvl, "nvlist/string")); CHECK(nvlist_exists_string(nvl, "nvlist/string")); CHECK(!nvlist_exists_nvlist(nvl, "nvlist/string")); CHECK(!nvlist_exists_descriptor(nvl, "nvlist/string")); CHECK(!nvlist_exists_binary(nvl, "nvlist/string")); CHECK(!nvlist_exists(nvl, "nvlist/nvlist")); CHECK(!nvlist_exists_null(nvl, "nvlist/nvlist")); CHECK(!nvlist_exists_bool(nvl, "nvlist/nvlist")); CHECK(!nvlist_exists_number(nvl, "nvlist/nvlist")); CHECK(!nvlist_exists_string(nvl, "nvlist/nvlist")); CHECK(!nvlist_exists_nvlist(nvl, "nvlist/nvlist")); CHECK(!nvlist_exists_descriptor(nvl, "nvlist/nvlist")); CHECK(!nvlist_exists_binary(nvl, "nvlist/nvlist")); nvlist_add_nvlist(nvl, "nvlist/nvlist", nvl); CHECK(nvlist_error(nvl) == 0); CHECK(nvlist_exists(nvl, "nvlist/nvlist")); CHECK(!nvlist_exists_null(nvl, "nvlist/nvlist")); CHECK(!nvlist_exists_bool(nvl, "nvlist/nvlist")); CHECK(!nvlist_exists_number(nvl, "nvlist/nvlist")); CHECK(!nvlist_exists_string(nvl, "nvlist/nvlist")); CHECK(nvlist_exists_nvlist(nvl, "nvlist/nvlist")); CHECK(!nvlist_exists_descriptor(nvl, "nvlist/nvlist")); CHECK(!nvlist_exists_binary(nvl, "nvlist/nvlist")); CHECK(!nvlist_exists(nvl, "nvlist/descriptor")); CHECK(!nvlist_exists_null(nvl, "nvlist/descriptor")); CHECK(!nvlist_exists_bool(nvl, "nvlist/descriptor")); CHECK(!nvlist_exists_number(nvl, "nvlist/descriptor")); CHECK(!nvlist_exists_string(nvl, "nvlist/descriptor")); CHECK(!nvlist_exists_nvlist(nvl, "nvlist/descriptor")); CHECK(!nvlist_exists_descriptor(nvl, "nvlist/descriptor")); CHECK(!nvlist_exists_binary(nvl, "nvlist/descriptor")); nvlist_add_descriptor(nvl, "nvlist/descriptor", STDERR_FILENO); CHECK(nvlist_error(nvl) == 0); CHECK(nvlist_exists(nvl, "nvlist/descriptor")); CHECK(!nvlist_exists_null(nvl, "nvlist/descriptor")); CHECK(!nvlist_exists_bool(nvl, "nvlist/descriptor")); CHECK(!nvlist_exists_number(nvl, "nvlist/descriptor")); CHECK(!nvlist_exists_string(nvl, "nvlist/descriptor")); CHECK(!nvlist_exists_nvlist(nvl, "nvlist/descriptor")); CHECK(nvlist_exists_descriptor(nvl, "nvlist/descriptor")); CHECK(!nvlist_exists_binary(nvl, "nvlist/descriptor")); CHECK(!nvlist_exists(nvl, "nvlist/binary")); CHECK(!nvlist_exists_null(nvl, "nvlist/binary")); CHECK(!nvlist_exists_bool(nvl, "nvlist/binary")); CHECK(!nvlist_exists_number(nvl, "nvlist/binary")); CHECK(!nvlist_exists_string(nvl, "nvlist/binary")); CHECK(!nvlist_exists_nvlist(nvl, "nvlist/binary")); CHECK(!nvlist_exists_descriptor(nvl, "nvlist/binary")); CHECK(!nvlist_exists_binary(nvl, "nvlist/binary")); nvlist_add_binary(nvl, "nvlist/binary", "test", 4); CHECK(nvlist_error(nvl) == 0); CHECK(nvlist_exists(nvl, "nvlist/binary")); CHECK(!nvlist_exists_null(nvl, "nvlist/binary")); CHECK(!nvlist_exists_bool(nvl, "nvlist/binary")); CHECK(!nvlist_exists_number(nvl, "nvlist/binary")); CHECK(!nvlist_exists_string(nvl, "nvlist/binary")); CHECK(!nvlist_exists_nvlist(nvl, "nvlist/binary")); CHECK(!nvlist_exists_descriptor(nvl, "nvlist/binary")); CHECK(nvlist_exists_binary(nvl, "nvlist/binary")); CHECK(nvlist_exists(nvl, "nvlist/null")); CHECK(nvlist_exists(nvl, "nvlist/bool")); CHECK(nvlist_exists(nvl, "nvlist/number")); CHECK(nvlist_exists(nvl, "nvlist/string")); CHECK(nvlist_exists(nvl, "nvlist/nvlist")); CHECK(nvlist_exists(nvl, "nvlist/descriptor")); CHECK(nvlist_exists(nvl, "nvlist/binary")); CHECK(nvlist_exists_null(nvl, "nvlist/null")); CHECK(nvlist_exists_bool(nvl, "nvlist/bool")); CHECK(nvlist_exists_number(nvl, "nvlist/number")); CHECK(nvlist_exists_string(nvl, "nvlist/string")); CHECK(nvlist_exists_nvlist(nvl, "nvlist/nvlist")); CHECK(nvlist_exists_descriptor(nvl, "nvlist/descriptor")); CHECK(nvlist_exists_binary(nvl, "nvlist/binary")); nvlist_free_null(nvl, "nvlist/null"); CHECK(!nvlist_exists(nvl, "nvlist/null")); CHECK(nvlist_exists(nvl, "nvlist/bool")); CHECK(nvlist_exists(nvl, "nvlist/number")); CHECK(nvlist_exists(nvl, "nvlist/string")); CHECK(nvlist_exists(nvl, "nvlist/nvlist")); CHECK(nvlist_exists(nvl, "nvlist/descriptor")); CHECK(nvlist_exists(nvl, "nvlist/binary")); CHECK(!nvlist_exists_null(nvl, "nvlist/null")); CHECK(nvlist_exists_bool(nvl, "nvlist/bool")); CHECK(nvlist_exists_number(nvl, "nvlist/number")); CHECK(nvlist_exists_string(nvl, "nvlist/string")); CHECK(nvlist_exists_nvlist(nvl, "nvlist/nvlist")); CHECK(nvlist_exists_descriptor(nvl, "nvlist/descriptor")); CHECK(nvlist_exists_binary(nvl, "nvlist/binary")); nvlist_free_bool(nvl, "nvlist/bool"); CHECK(!nvlist_exists(nvl, "nvlist/null")); CHECK(!nvlist_exists(nvl, "nvlist/bool")); CHECK(nvlist_exists(nvl, "nvlist/number")); CHECK(nvlist_exists(nvl, "nvlist/string")); CHECK(nvlist_exists(nvl, "nvlist/nvlist")); CHECK(nvlist_exists(nvl, "nvlist/descriptor")); CHECK(nvlist_exists(nvl, "nvlist/binary")); CHECK(!nvlist_exists_null(nvl, "nvlist/null")); CHECK(!nvlist_exists_bool(nvl, "nvlist/bool")); CHECK(nvlist_exists_number(nvl, "nvlist/number")); CHECK(nvlist_exists_string(nvl, "nvlist/string")); CHECK(nvlist_exists_nvlist(nvl, "nvlist/nvlist")); CHECK(nvlist_exists_descriptor(nvl, "nvlist/descriptor")); CHECK(nvlist_exists_binary(nvl, "nvlist/binary")); nvlist_free_number(nvl, "nvlist/number"); CHECK(!nvlist_exists(nvl, "nvlist/null")); CHECK(!nvlist_exists(nvl, "nvlist/bool")); CHECK(!nvlist_exists(nvl, "nvlist/number")); CHECK(nvlist_exists(nvl, "nvlist/string")); CHECK(nvlist_exists(nvl, "nvlist/nvlist")); CHECK(nvlist_exists(nvl, "nvlist/descriptor")); CHECK(nvlist_exists(nvl, "nvlist/binary")); CHECK(!nvlist_exists_null(nvl, "nvlist/null")); CHECK(!nvlist_exists_bool(nvl, "nvlist/bool")); CHECK(!nvlist_exists_number(nvl, "nvlist/number")); CHECK(nvlist_exists_string(nvl, "nvlist/string")); CHECK(nvlist_exists_nvlist(nvl, "nvlist/nvlist")); CHECK(nvlist_exists_descriptor(nvl, "nvlist/descriptor")); CHECK(nvlist_exists_binary(nvl, "nvlist/binary")); nvlist_free_string(nvl, "nvlist/string"); CHECK(!nvlist_exists(nvl, "nvlist/null")); CHECK(!nvlist_exists(nvl, "nvlist/bool")); CHECK(!nvlist_exists(nvl, "nvlist/number")); CHECK(!nvlist_exists(nvl, "nvlist/string")); CHECK(nvlist_exists(nvl, "nvlist/nvlist")); CHECK(nvlist_exists(nvl, "nvlist/descriptor")); CHECK(nvlist_exists(nvl, "nvlist/binary")); CHECK(!nvlist_exists_null(nvl, "nvlist/null")); CHECK(!nvlist_exists_bool(nvl, "nvlist/bool")); CHECK(!nvlist_exists_number(nvl, "nvlist/number")); CHECK(!nvlist_exists_string(nvl, "nvlist/string")); CHECK(nvlist_exists_nvlist(nvl, "nvlist/nvlist")); CHECK(nvlist_exists_descriptor(nvl, "nvlist/descriptor")); CHECK(nvlist_exists_binary(nvl, "nvlist/binary")); nvlist_free_nvlist(nvl, "nvlist/nvlist"); CHECK(!nvlist_exists(nvl, "nvlist/null")); CHECK(!nvlist_exists(nvl, "nvlist/bool")); CHECK(!nvlist_exists(nvl, "nvlist/number")); CHECK(!nvlist_exists(nvl, "nvlist/string")); CHECK(!nvlist_exists(nvl, "nvlist/nvlist")); CHECK(nvlist_exists(nvl, "nvlist/descriptor")); CHECK(nvlist_exists(nvl, "nvlist/binary")); CHECK(!nvlist_exists_null(nvl, "nvlist/null")); CHECK(!nvlist_exists_bool(nvl, "nvlist/bool")); CHECK(!nvlist_exists_number(nvl, "nvlist/number")); CHECK(!nvlist_exists_string(nvl, "nvlist/string")); CHECK(!nvlist_exists_nvlist(nvl, "nvlist/nvlist")); CHECK(nvlist_exists_descriptor(nvl, "nvlist/descriptor")); CHECK(nvlist_exists_binary(nvl, "nvlist/binary")); nvlist_free_descriptor(nvl, "nvlist/descriptor"); CHECK(!nvlist_exists(nvl, "nvlist/null")); CHECK(!nvlist_exists(nvl, "nvlist/bool")); CHECK(!nvlist_exists(nvl, "nvlist/number")); CHECK(!nvlist_exists(nvl, "nvlist/string")); CHECK(!nvlist_exists(nvl, "nvlist/nvlist")); CHECK(!nvlist_exists(nvl, "nvlist/descriptor")); CHECK(nvlist_exists(nvl, "nvlist/binary")); CHECK(!nvlist_exists_null(nvl, "nvlist/null")); CHECK(!nvlist_exists_bool(nvl, "nvlist/bool")); CHECK(!nvlist_exists_number(nvl, "nvlist/number")); CHECK(!nvlist_exists_string(nvl, "nvlist/string")); CHECK(!nvlist_exists_nvlist(nvl, "nvlist/nvlist")); CHECK(!nvlist_exists_descriptor(nvl, "nvlist/descriptor")); CHECK(nvlist_exists_binary(nvl, "nvlist/binary")); nvlist_free_binary(nvl, "nvlist/binary"); CHECK(!nvlist_exists(nvl, "nvlist/null")); CHECK(!nvlist_exists(nvl, "nvlist/bool")); CHECK(!nvlist_exists(nvl, "nvlist/number")); CHECK(!nvlist_exists(nvl, "nvlist/string")); CHECK(!nvlist_exists(nvl, "nvlist/nvlist")); CHECK(!nvlist_exists(nvl, "nvlist/descriptor")); CHECK(!nvlist_exists(nvl, "nvlist/binary")); CHECK(!nvlist_exists_null(nvl, "nvlist/null")); CHECK(!nvlist_exists_bool(nvl, "nvlist/bool")); CHECK(!nvlist_exists_number(nvl, "nvlist/number")); CHECK(!nvlist_exists_string(nvl, "nvlist/string")); CHECK(!nvlist_exists_nvlist(nvl, "nvlist/nvlist")); CHECK(!nvlist_exists_descriptor(nvl, "nvlist/descriptor")); CHECK(!nvlist_exists_binary(nvl, "nvlist/binary")); CHECK(nvlist_empty(nvl)); nvlist_destroy(nvl); return (0); }
/* * Find all 'allow' permissions from a given point and then continue * traversing up to the root. * * This function constructs an nvlist of nvlists. * each setpoint is an nvlist composed of an nvlist of an nvlist * of the individual * users/groups/everyone/create * permissions. * * The nvlist will look like this. * * { source fsname -> { whokeys { permissions,...}, ...}} * * The fsname nvpairs will be arranged in a bottom up order. For example, * if we have the following structure a/b/c then the nvpairs for the fsnames * will be ordered a/b/c, a/b, a. */ int dsl_deleg_get(const char *ddname, nvlist_t **nvp) { dsl_dir_t *dd, *startdd; dsl_pool_t *dp; int error; objset_t *mos; error = dsl_dir_open(ddname, FTAG, &startdd, NULL); if (error) return (error); dp = startdd->dd_pool; mos = dp->dp_meta_objset; VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); rw_enter(&dp->dp_config_rwlock, RW_READER); for (dd = startdd; dd != NULL; dd = dd->dd_parent) { zap_cursor_t basezc; zap_attribute_t baseza; nvlist_t *sp_nvp; uint64_t n; char source[MAXNAMELEN]; if (dd->dd_phys->dd_deleg_zapobj && (zap_count(mos, dd->dd_phys->dd_deleg_zapobj, &n) == 0) && n) { VERIFY(nvlist_alloc(&sp_nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); } else { continue; } for (zap_cursor_init(&basezc, mos, dd->dd_phys->dd_deleg_zapobj); zap_cursor_retrieve(&basezc, &baseza) == 0; zap_cursor_advance(&basezc)) { zap_cursor_t zc; zap_attribute_t za; nvlist_t *perms_nvp; ASSERT(baseza.za_integer_length == 8); ASSERT(baseza.za_num_integers == 1); VERIFY(nvlist_alloc(&perms_nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); for (zap_cursor_init(&zc, mos, baseza.za_first_integer); zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) { VERIFY(nvlist_add_boolean(perms_nvp, za.za_name) == 0); } zap_cursor_fini(&zc); VERIFY(nvlist_add_nvlist(sp_nvp, baseza.za_name, perms_nvp) == 0); nvlist_free(perms_nvp); } zap_cursor_fini(&basezc); dsl_dir_name(dd, source); VERIFY(nvlist_add_nvlist(*nvp, source, sp_nvp) == 0); nvlist_free(sp_nvp); } rw_exit(&dp->dp_config_rwlock); dsl_dir_close(startdd, FTAG); return (0); }