static int namespace_reload(libzfs_handle_t *p_hdl) { nvlist_t *pnv_config; nvpair_t *pnv_elem; config_node_t *p_cn; void *cookie; if(p_hdl->libzfs_ns_gen == 0) { /* * This is the first time we've accessed the configuration * cache. Initialize the AVL tree and then fall through to the * common code. */ if(!(p_hdl->libzfs_ns_avlpool = uu_avl_pool_create("config_pool", sizeof (config_node_t), offsetof(config_node_t, cn_avl), config_node_compare, UU_DEFAULT))) return -1; if((p_hdl->libzfs_ns_avl = uu_avl_create(p_hdl->libzfs_ns_avlpool, NULL, UU_DEFAULT)) == NULL) return 1; } pnv_config = spa_all_configs(&p_hdl->libzfs_ns_gen); if(!pnv_config) return -1; /* * Clear out any existing configuration information. */ cookie = NULL; while((p_cn = uu_avl_teardown(p_hdl->libzfs_ns_avl, &cookie)) != NULL) { nvlist_free(p_cn->cn_config); free(p_cn->cn_name); free(p_cn); } pnv_elem = NULL; while((pnv_elem = nvlist_next_nvpair(pnv_config, pnv_elem)) != NULL) { nvlist_t *child; uu_avl_index_t where; if((p_cn = zfs_alloc(p_hdl, sizeof (config_node_t))) == NULL) { nvlist_free(pnv_config); return -1; } if((p_cn->cn_name = zfs_strdup(p_hdl, nvpair_name(pnv_elem))) == NULL) { free(p_cn); nvlist_free(pnv_config); return -1; } verify(nvpair_value_nvlist(pnv_elem, &child) == 0); if (nvlist_dup(child, &p_cn->cn_config, 0) != 0) { free(p_cn->cn_name); free(p_cn); nvlist_free(pnv_config); return -1; } verify(uu_avl_find(p_hdl->libzfs_ns_avl, p_cn, NULL, &where) == NULL); uu_avl_insert(p_hdl->libzfs_ns_avl, p_cn, where); } nvlist_free(pnv_config); return 0; }
/* * Convert our list of pools into the definitive set of configurations. We * start by picking the best config for each toplevel vdev. Once that's done, * we assemble the toplevel vdevs into a full config for the pool. We make a * pass to fix up any incorrect paths, and then add it to the main list to * return to the user. */ static nvlist_t * get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok) { pool_entry_t *pe; vdev_entry_t *ve; config_entry_t *ce; nvlist_t *ret = NULL, *config = NULL, *tmp, *nvtop, *nvroot; nvlist_t **spares, **l2cache; uint_t i, nspares, nl2cache; boolean_t config_seen; uint64_t best_txg; char *name, *hostname; uint64_t guid; uint_t children = 0; nvlist_t **child = NULL; uint_t holes; uint64_t *hole_array, max_id; uint_t c; boolean_t isactive; uint64_t hostid; nvlist_t *nvl; boolean_t found_one = B_FALSE; boolean_t valid_top_config = B_FALSE; if (nvlist_alloc(&ret, 0, 0) != 0) goto nomem; for (pe = pl->pools; pe != NULL; pe = pe->pe_next) { uint64_t id, max_txg = 0; if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0) goto nomem; config_seen = B_FALSE; /* * Iterate over all toplevel vdevs. Grab the pool configuration * from the first one we find, and then go through the rest and * add them as necessary to the 'vdevs' member of the config. */ for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) { /* * Determine the best configuration for this vdev by * selecting the config with the latest transaction * group. */ best_txg = 0; for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) { if (ce->ce_txg > best_txg) { tmp = ce->ce_config; best_txg = ce->ce_txg; } } /* * We rely on the fact that the max txg for the * pool will contain the most up-to-date information * about the valid top-levels in the vdev namespace. */ if (best_txg > max_txg) { (void) nvlist_remove(config, ZPOOL_CONFIG_VDEV_CHILDREN, DATA_TYPE_UINT64); (void) nvlist_remove(config, ZPOOL_CONFIG_HOLE_ARRAY, DATA_TYPE_UINT64_ARRAY); max_txg = best_txg; hole_array = NULL; holes = 0; max_id = 0; valid_top_config = B_FALSE; if (nvlist_lookup_uint64(tmp, ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) { verify(nvlist_add_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN, max_id) == 0); valid_top_config = B_TRUE; } if (nvlist_lookup_uint64_array(tmp, ZPOOL_CONFIG_HOLE_ARRAY, &hole_array, &holes) == 0) { verify(nvlist_add_uint64_array(config, ZPOOL_CONFIG_HOLE_ARRAY, hole_array, holes) == 0); } } if (!config_seen) { /* * Copy the relevant pieces of data to the pool * configuration: * * version * pool guid * name * pool txg (if available) * comment (if available) * pool state * hostid (if available) * hostname (if available) */ uint64_t state, version, pool_txg; char *comment = NULL; version = fnvlist_lookup_uint64(tmp, ZPOOL_CONFIG_VERSION); fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, version); guid = fnvlist_lookup_uint64(tmp, ZPOOL_CONFIG_POOL_GUID); fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, guid); name = fnvlist_lookup_string(tmp, ZPOOL_CONFIG_POOL_NAME); fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, name); if (nvlist_lookup_uint64(tmp, ZPOOL_CONFIG_POOL_TXG, &pool_txg) == 0) fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, pool_txg); if (nvlist_lookup_string(tmp, ZPOOL_CONFIG_COMMENT, &comment) == 0) fnvlist_add_string(config, ZPOOL_CONFIG_COMMENT, comment); state = fnvlist_lookup_uint64(tmp, ZPOOL_CONFIG_POOL_STATE); fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, state); hostid = 0; if (nvlist_lookup_uint64(tmp, ZPOOL_CONFIG_HOSTID, &hostid) == 0) { fnvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, hostid); hostname = fnvlist_lookup_string(tmp, ZPOOL_CONFIG_HOSTNAME); fnvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, hostname); } config_seen = B_TRUE; } /* * Add this top-level vdev to the child array. */ verify(nvlist_lookup_nvlist(tmp, ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0); verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID, &id) == 0); if (id >= children) { nvlist_t **newchild; newchild = zfs_alloc(hdl, (id + 1) * sizeof (nvlist_t *)); if (newchild == NULL) goto nomem; for (c = 0; c < children; c++) newchild[c] = child[c]; free(child); child = newchild; children = id + 1; } if (nvlist_dup(nvtop, &child[id], 0) != 0) goto nomem; } /* * If we have information about all the top-levels then * clean up the nvlist which we've constructed. This * means removing any extraneous devices that are * beyond the valid range or adding devices to the end * of our array which appear to be missing. */ if (valid_top_config) { if (max_id < children) { for (c = max_id; c < children; c++) nvlist_free(child[c]); children = max_id; } else if (max_id > children) { nvlist_t **newchild; newchild = zfs_alloc(hdl, (max_id) * sizeof (nvlist_t *)); if (newchild == NULL) goto nomem; for (c = 0; c < children; c++) newchild[c] = child[c]; free(child); child = newchild; children = max_id; } } verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &guid) == 0); /* * The vdev namespace may contain holes as a result of * device removal. We must add them back into the vdev * tree before we process any missing devices. */ if (holes > 0) { ASSERT(valid_top_config); for (c = 0; c < children; c++) { nvlist_t *holey; if (child[c] != NULL || !vdev_is_hole(hole_array, holes, c)) continue; if (nvlist_alloc(&holey, NV_UNIQUE_NAME, 0) != 0) goto nomem; /* * Holes in the namespace are treated as * "hole" top-level vdevs and have a * special flag set on them. */ if (nvlist_add_string(holey, ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) != 0 || nvlist_add_uint64(holey, ZPOOL_CONFIG_ID, c) != 0 || nvlist_add_uint64(holey, ZPOOL_CONFIG_GUID, 0ULL) != 0) goto nomem; child[c] = holey; } } /* * Look for any missing top-level vdevs. If this is the case, * create a faked up 'missing' vdev as a placeholder. We cannot * simply compress the child array, because the kernel performs * certain checks to make sure the vdev IDs match their location * in the configuration. */ for (c = 0; c < children; c++) { if (child[c] == NULL) { nvlist_t *missing; if (nvlist_alloc(&missing, NV_UNIQUE_NAME, 0) != 0) goto nomem; if (nvlist_add_string(missing, ZPOOL_CONFIG_TYPE, VDEV_TYPE_MISSING) != 0 || nvlist_add_uint64(missing, ZPOOL_CONFIG_ID, c) != 0 || nvlist_add_uint64(missing, ZPOOL_CONFIG_GUID, 0ULL) != 0) { nvlist_free(missing); goto nomem; } child[c] = missing; } } /* * Put all of this pool's top-level vdevs into a root vdev. */ if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) goto nomem; if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 || nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 || nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 || nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, child, children) != 0) { nvlist_free(nvroot); goto nomem; } for (c = 0; c < children; c++) nvlist_free(child[c]); free(child); children = 0; child = NULL; /* * Go through and fix up any paths and/or devids based on our * known list of vdev GUID -> path mappings. */ if (fix_paths(nvroot, pl->names) != 0) { nvlist_free(nvroot); goto nomem; } /* * Add the root vdev to this pool's configuration. */ if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) != 0) { nvlist_free(nvroot); goto nomem; } nvlist_free(nvroot); /* * zdb uses this path to report on active pools that were * imported or created using -R. */ if (active_ok) goto add_pool; /* * Determine if this pool is currently active, in which case we * can't actually import it. */ verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, &name) == 0); verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &guid) == 0); if (pool_active(hdl, name, guid, &isactive) != 0) goto error; if (isactive) { nvlist_free(config); config = NULL; continue; } if ((nvl = refresh_config(hdl, config)) == NULL) { nvlist_free(config); config = NULL; continue; } nvlist_free(config); config = nvl; /* * Go through and update the paths for spares, now that we have * them. */ verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { for (i = 0; i < nspares; i++) { if (fix_paths(spares[i], pl->names) != 0) goto nomem; } } /* * Update the paths for l2cache devices. */ if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0) { for (i = 0; i < nl2cache; i++) { if (fix_paths(l2cache[i], pl->names) != 0) goto nomem; } } /* * Restore the original information read from the actual label. */ (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID, DATA_TYPE_UINT64); (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME, DATA_TYPE_STRING); if (hostid != 0) { verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, hostid) == 0); verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, hostname) == 0); } add_pool: /* * Add this pool to the list of configs. */ verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, &name) == 0); if (nvlist_add_nvlist(ret, name, config) != 0) goto nomem; found_one = B_TRUE; nvlist_free(config); config = NULL; } if (!found_one) { nvlist_free(ret); ret = NULL; } return (ret); nomem: (void) no_memory(hdl); error: nvlist_free(config); nvlist_free(ret); for (c = 0; c < children; c++) nvlist_free(child[c]); free(child); return (NULL); }
/* * Take a snapshot of the current state of processor sets and CPUs, * pack it in the exacct format, and attach it to specified exacct record. */ int pool_pset_pack(ea_object_t *eo_system) { ea_object_t *eo_pset, *eo_cpu; cpupart_t *cpupart; psetid_t mypsetid; pool_pset_t *pset; nvlist_t *nvl; size_t bufsz; cpu_t *cpu; char *buf; int ncpu; ASSERT(pool_lock_held()); mutex_enter(&cpu_lock); mypsetid = zone_pset_get(curproc->p_zone); for (pset = list_head(&pool_pset_list); pset; pset = list_next(&pool_pset_list, pset)) { psetid_t psetid = pset->pset_id; if (!INGLOBALZONE(curproc) && mypsetid != psetid) continue; cpupart = cpupart_find(psetid); ASSERT(cpupart != NULL); eo_pset = ea_alloc_group(EXT_GROUP | EXC_LOCAL | EXD_GROUP_PSET); (void) ea_attach_item(eo_pset, &psetid, sizeof (id_t), EXC_LOCAL | EXD_PSET_PSETID | EXT_UINT32); /* * Pack info for all CPUs in this processor set. */ ncpu = 0; cpu = cpu_list; do { if (cpu->cpu_part != cpupart) /* not our pset */ continue; ncpu++; eo_cpu = ea_alloc_group(EXT_GROUP | EXC_LOCAL | EXD_GROUP_CPU); (void) ea_attach_item(eo_cpu, &cpu->cpu_id, sizeof (processorid_t), EXC_LOCAL | EXD_CPU_CPUID | EXT_UINT32); if (cpu->cpu_props == NULL) { (void) nvlist_alloc(&cpu->cpu_props, NV_UNIQUE_NAME, KM_SLEEP); (void) nvlist_add_string(cpu->cpu_props, "cpu.comment", ""); } (void) nvlist_dup(cpu->cpu_props, &nvl, KM_SLEEP); (void) nvlist_add_int64(nvl, "cpu.sys_id", cpu->cpu_id); (void) nvlist_add_string(nvl, "cpu.status", (char *)cpu_get_state_str(cpu)); buf = NULL; bufsz = 0; (void) nvlist_pack(nvl, &buf, &bufsz, NV_ENCODE_NATIVE, 0); (void) ea_attach_item(eo_cpu, buf, bufsz, EXC_LOCAL | EXD_CPU_PROP | EXT_RAW); (void) nvlist_free(nvl); kmem_free(buf, bufsz); (void) ea_attach_to_group(eo_pset, eo_cpu); } while ((cpu = cpu->cpu_next) != cpu_list); (void) nvlist_dup(pset->pset_props, &nvl, KM_SLEEP); (void) nvlist_add_uint64(nvl, "pset.size", ncpu); (void) nvlist_add_uint64(nvl, "pset.load", (uint64_t)PSET_LOAD(cpupart->cp_hp_avenrun[0])); buf = NULL; bufsz = 0; (void) nvlist_pack(nvl, &buf, &bufsz, NV_ENCODE_NATIVE, 0); (void) ea_attach_item(eo_pset, buf, bufsz, EXC_LOCAL | EXD_PSET_PROP | EXT_RAW); (void) nvlist_free(nvl); kmem_free(buf, bufsz); (void) ea_attach_to_group(eo_system, eo_pset); } mutex_exit(&cpu_lock); return (0); }
/* * Loads the pool namespace, or re-loads it if the cache has changed. */ static int namespace_reload(libzfs_handle_t *hdl) { nvlist_t *config; config_node_t *cn; nvpair_t *elem; zfs_cmd_t zc = {"\0"}; void *cookie; if (hdl->libzfs_ns_gen == 0) { /* * This is the first time we've accessed the configuration * cache. Initialize the AVL tree and then fall through to the * common code. */ if ((hdl->libzfs_ns_avlpool = uu_avl_pool_create("config_pool", sizeof (config_node_t), offsetof(config_node_t, cn_avl), config_node_compare, UU_DEFAULT)) == NULL) return (no_memory(hdl)); if ((hdl->libzfs_ns_avl = uu_avl_create(hdl->libzfs_ns_avlpool, NULL, UU_DEFAULT)) == NULL) return (no_memory(hdl)); } if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) return (-1); for (;;) { zc.zc_cookie = hdl->libzfs_ns_gen; if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_CONFIGS, &zc) != 0) { switch (errno) { case EEXIST: /* * The namespace hasn't changed. */ zcmd_free_nvlists(&zc); return (0); case ENOMEM: if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { zcmd_free_nvlists(&zc); return (-1); } break; default: zcmd_free_nvlists(&zc); return (zfs_standard_error(hdl, errno, dgettext(TEXT_DOMAIN, "failed to read " "pool configuration"))); } } else { hdl->libzfs_ns_gen = zc.zc_cookie; break; } } if (zcmd_read_dst_nvlist(hdl, &zc, &config) != 0) { zcmd_free_nvlists(&zc); return (-1); } zcmd_free_nvlists(&zc); /* * Clear out any existing configuration information. */ cookie = NULL; while ((cn = uu_avl_teardown(hdl->libzfs_ns_avl, &cookie)) != NULL) { nvlist_free(cn->cn_config); free(cn->cn_name); free(cn); } elem = NULL; while ((elem = nvlist_next_nvpair(config, elem)) != NULL) { nvlist_t *child; uu_avl_index_t where; if ((cn = zfs_alloc(hdl, sizeof (config_node_t))) == NULL) { nvlist_free(config); return (-1); } if ((cn->cn_name = zfs_strdup(hdl, nvpair_name(elem))) == NULL) { free(cn); nvlist_free(config); return (-1); } verify(nvpair_value_nvlist(elem, &child) == 0); if (nvlist_dup(child, &cn->cn_config, 0) != 0) { free(cn->cn_name); free(cn); nvlist_free(config); return (no_memory(hdl)); } verify(uu_avl_find(hdl->libzfs_ns_avl, cn, NULL, &where) == NULL); uu_avl_insert(hdl->libzfs_ns_avl, cn, where); } nvlist_free(config); return (0); }
/* * Called when the module is first loaded, this routine loads the configuration * file into the SPA namespace. It does not actually open or load the pools; it * only populates the namespace. */ void spa_config_load(void) { void *buf = NULL; nvlist_t *nvlist, *child; nvpair_t *nvpair; spa_t *spa; char *pathname; struct _buf *file; uint64_t fsize; /* * Open the configuration file. */ pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP); (void) snprintf(pathname, MAXPATHLEN, "%s%s", (rootdir != NULL) ? "./" : "", spa_config_path); file = kobj_open_file(pathname); kmem_free(pathname, MAXPATHLEN); if (file == (struct _buf *)-1) return; if (kobj_get_filesize(file, &fsize) != 0) goto out; buf = kmem_alloc(fsize, KM_SLEEP); /* * Read the nvlist from the file. */ if (kobj_read_file(file, buf, fsize, 0) < 0) goto out; /* * Unpack the nvlist. */ if (nvlist_unpack(buf, fsize, &nvlist, KM_SLEEP) != 0) goto out; /* * Iterate over all elements in the nvlist, creating a new spa_t for * each one with the specified configuration. */ mutex_enter(&spa_namespace_lock); nvpair = NULL; while ((nvpair = nvlist_next_nvpair(nvlist, nvpair)) != NULL) { if (nvpair_type(nvpair) != DATA_TYPE_NVLIST) continue; VERIFY(nvpair_value_nvlist(nvpair, &child) == 0); if (spa_lookup(nvpair_name(nvpair)) != NULL) continue; spa = spa_add(nvpair_name(nvpair), NULL); /* * We blindly duplicate the configuration here. If it's * invalid, we will catch it when the pool is first opened. */ VERIFY(nvlist_dup(child, &spa->spa_config, 0) == 0); } mutex_exit(&spa_namespace_lock); nvlist_free(nvlist); out: if (buf != NULL) kmem_free(buf, fsize); kobj_close_file(file); }
/* * Function: it_tgt_setprop() * * Validate the provided property list and set the properties for * the specified target. If errlist is not NULL, returns detailed * errors for each property that failed. The format for errorlist * is key = property, value = error string. * * Parameters: * * cfg The current iSCSI configuration obtained from * it_config_load() * tgt Pointer to an iSCSI target structure * proplist nvlist_t containing properties for this target. * errlist (optional) nvlist_t of errors encountered when * validating the properties. * * Return Values: * 0 Success * EINVAL Invalid property * */ int it_tgt_setprop(it_config_t *cfg, it_tgt_t *tgt, nvlist_t *proplist, nvlist_t **errlist) { int ret; nvlist_t *errs = NULL; nvlist_t *tprops = NULL; char *val = NULL; if (!cfg || !tgt || !proplist) { return (EINVAL); } /* verify the target name in case the target node is renamed */ if (!validate_iscsi_name(tgt->tgt_name)) { return (EINVAL); } canonical_iscsi_name(tgt->tgt_name); if (errlist) { (void) nvlist_alloc(&errs, 0, 0); *errlist = errs; } /* * copy the existing properties, merge, then validate * the merged properties before committing them. */ if (tgt->tgt_properties) { ret = nvlist_dup(tgt->tgt_properties, &tprops, 0); } else { ret = nvlist_alloc(&tprops, NV_UNIQUE_NAME, 0); } if (ret != 0) { return (ret); } ret = nvlist_merge(tprops, proplist, 0); if (ret != 0) { nvlist_free(tprops); return (ret); } /* unset chap username or alias if requested */ val = NULL; (void) nvlist_lookup_string(proplist, PROP_TARGET_CHAP_USER, &val); if (val && (strcasecmp(val, "none") == 0)) { (void) nvlist_remove_all(tprops, PROP_TARGET_CHAP_USER); } val = NULL; (void) nvlist_lookup_string(proplist, PROP_ALIAS, &val); if (val && (strcasecmp(val, "none") == 0)) { (void) nvlist_remove_all(tprops, PROP_ALIAS); } /* base64 encode the CHAP secret, if it's changed */ val = NULL; (void) nvlist_lookup_string(proplist, PROP_TARGET_CHAP_SECRET, &val); if (val) { char bsecret[MAX_BASE64_LEN]; ret = it_val_pass(PROP_TARGET_CHAP_SECRET, val, errs); if (ret == 0) { (void) memset(bsecret, 0, MAX_BASE64_LEN); ret = iscsi_binary_to_base64_str((uint8_t *)val, strlen(val), bsecret, MAX_BASE64_LEN); if (ret == 0) { /* replace the value in the nvlist */ ret = nvlist_add_string(tprops, PROP_TARGET_CHAP_SECRET, bsecret); } } } if (ret == 0) { ret = it_validate_tgtprops(tprops, errs); } if (ret != 0) { if (tprops) { nvlist_free(tprops); } return (ret); } if (tgt->tgt_properties) { nvlist_free(tgt->tgt_properties); } tgt->tgt_properties = tprops; free_empty_errlist(errlist); return (0); }
/* * Function: it_config_setprop() * * Validate the provided property list and set the global properties * for iSCSI Target. If errlist is not NULL, returns detailed * errors for each property that failed. The format for errorlist * is key = property, value = error string. * * Parameters: * * cfg The current iSCSI configuration obtained from * it_config_load() * proplist nvlist_t containing properties for this target. * errlist (optional) nvlist_t of errors encountered when * validating the properties. * * Return Values: * 0 Success * EINVAL Invalid property * */ int it_config_setprop(it_config_t *cfg, nvlist_t *proplist, nvlist_t **errlist) { int ret; nvlist_t *errs = NULL; it_portal_t *isns = NULL; it_portal_t *pnext = NULL; it_portal_t *newisnslist = NULL; char **arr; uint32_t count; uint32_t newcount; nvlist_t *cprops = NULL; char *val = NULL; if (!cfg || !proplist) { return (EINVAL); } if (errlist) { (void) nvlist_alloc(&errs, 0, 0); *errlist = errs; } /* * copy the existing properties, merge, then validate * the merged properties before committing them. */ if (cfg->config_global_properties) { ret = nvlist_dup(cfg->config_global_properties, &cprops, 0); } else { ret = nvlist_alloc(&cprops, NV_UNIQUE_NAME, 0); } if (ret != 0) { return (ret); } ret = nvlist_merge(cprops, proplist, 0); if (ret != 0) { nvlist_free(cprops); return (ret); } /* * base64 encode the radius secret, if it's changed. */ val = NULL; (void) nvlist_lookup_string(proplist, PROP_RADIUS_SECRET, &val); if (val) { char bsecret[MAX_BASE64_LEN]; ret = it_val_pass(PROP_RADIUS_SECRET, val, errs); if (ret == 0) { (void) memset(bsecret, 0, MAX_BASE64_LEN); ret = iscsi_binary_to_base64_str((uint8_t *)val, strlen(val), bsecret, MAX_BASE64_LEN); if (ret == 0) { /* replace the value in the nvlist */ ret = nvlist_add_string(cprops, PROP_RADIUS_SECRET, bsecret); } } } if (ret != 0) { nvlist_free(cprops); return (ret); } /* see if we need to remove the radius server setting */ val = NULL; (void) nvlist_lookup_string(cprops, PROP_RADIUS_SERVER, &val); if (val && (strcasecmp(val, "none") == 0)) { (void) nvlist_remove_all(cprops, PROP_RADIUS_SERVER); } /* and/or remove the alias */ val = NULL; (void) nvlist_lookup_string(cprops, PROP_ALIAS, &val); if (val && (strcasecmp(val, "none") == 0)) { (void) nvlist_remove_all(cprops, PROP_ALIAS); } ret = it_validate_configprops(cprops, errs); if (ret != 0) { if (cprops) { nvlist_free(cprops); } return (ret); } /* * Update iSNS server list, if exists in provided property list. */ ret = nvlist_lookup_string_array(proplist, PROP_ISNS_SERVER, &arr, &count); if (ret == 0) { /* special case: if "none", remove all defined */ if (strcasecmp(arr[0], "none") != 0) { ret = it_array_to_portallist(arr, count, ISNS_DEFAULT_SERVER_PORT, &newisnslist, &newcount); } else { newisnslist = NULL; newcount = 0; (void) nvlist_remove_all(cprops, PROP_ISNS_SERVER); } if (ret == 0) { isns = cfg->config_isns_svr_list; while (isns) { pnext = isns->portal_next; free(isns); isns = pnext; } cfg->config_isns_svr_list = newisnslist; cfg->config_isns_svr_count = newcount; /* * Replace the array in the nvlist to ensure * duplicates are properly removed & port numbers * are added. */ if (newcount > 0) { int i = 0; char **newarray; newarray = malloc(sizeof (char *) * newcount); if (newarray == NULL) { ret = ENOMEM; } else { for (isns = newisnslist; isns != NULL; isns = isns->portal_next) { (void) sockaddr_to_str( &(isns->portal_addr), &(newarray[i++])); } (void) nvlist_add_string_array(cprops, PROP_ISNS_SERVER, newarray, newcount); for (i = 0; i < newcount; i++) { if (newarray[i]) { free(newarray[i]); } } free(newarray); } } } } else if (ret == ENOENT) { /* not an error */ ret = 0; } if (ret == 0) { /* replace the global properties list */ nvlist_free(cfg->config_global_properties); cfg->config_global_properties = cprops; } else { if (cprops) { nvlist_free(cprops); } } if (ret == 0) free_empty_errlist(errlist); return (ret); }
/* * Function: it_ini_setprop() * * Validate the provided property list and set the initiator properties. * If errlist is not NULL, returns detailed errors for each property * that failed. The format for errorlist is key = property, * value = error string. * * Parameters: * * ini The initiator being updated. * proplist nvlist_t containing properties for this target. * errlist (optional) nvlist_t of errors encountered when * validating the properties. * * Return Values: * 0 Success * EINVAL Invalid property * */ int it_ini_setprop(it_ini_t *ini, nvlist_t *proplist, nvlist_t **errlist) { int ret; nvlist_t *errs = NULL; nvlist_t *iprops = NULL; char *val = NULL; if (!ini || !proplist) { return (EINVAL); } if (errlist) { (void) nvlist_alloc(&errs, 0, 0); *errlist = errs; } /* * copy the existing properties, merge, then validate * the merged properties before committing them. */ if (ini->ini_properties) { ret = nvlist_dup(ini->ini_properties, &iprops, 0); } else { ret = nvlist_alloc(&iprops, NV_UNIQUE_NAME, 0); } if (ret != 0) { return (ret); } ret = nvlist_merge(iprops, proplist, 0); if (ret != 0) { nvlist_free(iprops); return (ret); } /* unset chap username if requested */ if ((nvlist_lookup_string(proplist, PROP_CHAP_USER, &val)) == 0) { if (strcasecmp(val, "none") == 0) { (void) nvlist_remove_all(iprops, PROP_CHAP_USER); } } /* base64 encode the CHAP secret, if it's changed */ if ((nvlist_lookup_string(proplist, PROP_CHAP_SECRET, &val)) == 0) { char bsecret[MAX_BASE64_LEN]; ret = it_val_pass(PROP_CHAP_SECRET, val, errs); if (ret == 0) { (void) memset(bsecret, 0, MAX_BASE64_LEN); ret = iscsi_binary_to_base64_str((uint8_t *)val, strlen(val), bsecret, MAX_BASE64_LEN); if (ret == 0) { /* replace the value in the nvlist */ ret = nvlist_add_string(iprops, PROP_CHAP_SECRET, bsecret); } } } if (ret == 0) { ret = it_validate_iniprops(iprops, errs); } if (ret != 0) { if (iprops) { nvlist_free(iprops); } return (ret); } if (ini->ini_properties) { nvlist_free(ini->ini_properties); } ini->ini_properties = iprops; free_empty_errlist(errlist); return (0); }
/*ARGSUSED*/ int cma_page_retire(fmd_hdl_t *hdl, nvlist_t *nvl, nvlist_t *asru, const char *uuid, boolean_t repair) { cma_page_t *page; uint64_t pageaddr; const char *action = repair ? "unretire" : "retire"; int rc; nvlist_t *rsrc = NULL, *asrucp = NULL, *hcsp; (void) nvlist_lookup_nvlist(nvl, FM_FAULT_RESOURCE, &rsrc); if (nvlist_dup(asru, &asrucp, 0) != 0) { fmd_hdl_debug(hdl, "page retire nvlist dup failed\n"); return (CMA_RA_FAILURE); } /* It should already be expanded, but we'll do it again anyway */ if (fmd_nvl_fmri_expand(hdl, asrucp) < 0) { fmd_hdl_debug(hdl, "failed to expand page asru\n"); cma_stats.bad_flts.fmds_value.ui64++; nvlist_free(asrucp); return (CMA_RA_FAILURE); } if (!repair && !fmd_nvl_fmri_present(hdl, asrucp)) { fmd_hdl_debug(hdl, "page retire overtaken by events\n"); cma_stats.page_nonent.fmds_value.ui64++; nvlist_free(asrucp); return (CMA_RA_SUCCESS); } /* Figure out physaddr from resource or asru */ if (rsrc == NULL || nvlist_lookup_nvlist(rsrc, FM_FMRI_HC_SPECIFIC, &hcsp) != 0 || (nvlist_lookup_uint64(hcsp, "asru-" FM_FMRI_HC_SPECIFIC_PHYSADDR, &pageaddr) != 0 && nvlist_lookup_uint64(hcsp, FM_FMRI_HC_SPECIFIC_PHYSADDR, &pageaddr) != 0)) { if (nvlist_lookup_uint64(asrucp, FM_FMRI_MEM_PHYSADDR, &pageaddr) != 0) { fmd_hdl_debug(hdl, "mem fault missing 'physaddr'\n"); cma_stats.bad_flts.fmds_value.ui64++; nvlist_free(asrucp); return (CMA_RA_FAILURE); } } if (repair) { if (!cma.cma_page_dounretire) { fmd_hdl_debug(hdl, "suppressed unretire of page %llx\n", (u_longlong_t)pageaddr); cma_stats.page_supp.fmds_value.ui64++; nvlist_free(asrucp); return (CMA_RA_SUCCESS); } /* If unretire via topo fails, we fall back to legacy way */ if (rsrc == NULL || (rc = fmd_nvl_fmri_unretire(hdl, rsrc)) < 0) rc = cma_fmri_page_unretire(hdl, asrucp); } else { if (!cma.cma_page_doretire) { fmd_hdl_debug(hdl, "suppressed retire of page %llx\n", (u_longlong_t)pageaddr); cma_stats.page_supp.fmds_value.ui64++; nvlist_free(asrucp); return (CMA_RA_FAILURE); } /* If retire via topo fails, we fall back to legacy way */ if (rsrc == NULL || (rc = fmd_nvl_fmri_retire(hdl, rsrc)) < 0) rc = cma_fmri_page_retire(hdl, asrucp); } if (rc == FMD_AGENT_RETIRE_DONE) { fmd_hdl_debug(hdl, "%sd page 0x%llx\n", action, (u_longlong_t)pageaddr); if (repair) cma_stats.page_repairs.fmds_value.ui64++; else cma_stats.page_flts.fmds_value.ui64++; nvlist_free(asrucp); return (CMA_RA_SUCCESS); } else if (repair || rc != FMD_AGENT_RETIRE_ASYNC) { fmd_hdl_debug(hdl, "%s of page 0x%llx failed, will not " "retry: %s\n", action, (u_longlong_t)pageaddr, strerror(errno)); cma_stats.page_fails.fmds_value.ui64++; nvlist_free(asrucp); return (CMA_RA_FAILURE); } /* * The page didn't immediately retire. We'll need to periodically * check to see if it has been retired. */ fmd_hdl_debug(hdl, "page didn't retire - sleeping\n"); page = fmd_hdl_zalloc(hdl, sizeof (cma_page_t), FMD_SLEEP); page->pg_addr = pageaddr; if (rsrc != NULL) (void) nvlist_dup(rsrc, &page->pg_rsrc, 0); page->pg_asru = asrucp; if (uuid != NULL) page->pg_uuid = fmd_hdl_strdup(hdl, uuid, FMD_SLEEP); page->pg_next = cma.cma_pages; cma.cma_pages = page; if (cma.cma_page_timerid != 0) fmd_timer_remove(hdl, cma.cma_page_timerid); cma.cma_page_curdelay = cma.cma_page_mindelay; cma.cma_page_timerid = fmd_timer_install(hdl, NULL, NULL, cma.cma_page_curdelay); /* Don't free asrucp here. This FMRI will be needed for retry. */ return (CMA_RA_FAILURE); }