int zfs_deleg_verify_nvlist(nvlist_t *nvp) { nvpair_t *who, *perm_name; nvlist_t *perms; int error; if (nvp == NULL) return (-1); who = nvlist_next_nvpair(nvp, NULL); if (who == NULL) return (-1); do { if (zfs_validate_who(nvpair_name(who))) return (-1); error = nvlist_lookup_nvlist(nvp, nvpair_name(who), &perms); if (error && error != ENOENT) return (-1); if (error == ENOENT) continue; perm_name = nvlist_next_nvpair(perms, NULL); if (perm_name == NULL) { return (-1); } do { error = zfs_valid_permission_name( nvpair_name(perm_name)); if (error) return (-1); } while ((perm_name = nvlist_next_nvpair(perms, perm_name)) != NULL); } while ((who = nvlist_next_nvpair(nvp, who)) != NULL); return (0); }
static void dump_mos(spa_t *spa) { nvlist_t *nv = spa->spa_label_features; nvpair_t *pair; (void) printf("label config:\n"); for (pair = nvlist_next_nvpair(nv, NULL); pair != NULL; pair = nvlist_next_nvpair(nv, pair)) { (void) printf("\t%s\n", nvpair_name(pair)); } }
int ses_node_ctl(ses_node_t *np, const char *op, nvlist_t *arg) { ses_target_t *tp = np->sn_snapshot->ss_target; ses_plugin_t *sp; nvlist_t *nvl; nvpair_t *nvp; int ret; if (nvlist_dup(arg, &nvl, 0) != 0) return (ses_set_errno(ESES_NOMEM)); /* * Technically we could get away with a per-snapshot lock while we fill * the control page contents, but this doesn't take much time and we * want actual control operations to be protected per-target, so we just * take the target lock. */ (void) pthread_mutex_lock(&tp->st_lock); /* * We walk the list of plugins backwards, so that a product-specific * plugin can rewrite the nvlist to control operations in terms of the * standard mechanisms, if desired. */ for (sp = tp->st_plugin_first; sp != NULL; sp = sp->sp_next) { if (sp->sp_node_ctl == NULL) continue; if (sp->sp_node_ctl(sp, np, op, nvl) != 0) { nvlist_free(nvl); (void) pthread_mutex_unlock(&tp->st_lock); return (-1); } } if ((nvp = nvlist_next_nvpair(nvl, NULL)) != NULL) { (void) ses_error(ESES_NOTSUP, "property '%s' invalid for " "this node", nvpair_name(nvp)); nvlist_free(nvl); (void) pthread_mutex_unlock(&tp->st_lock); return (-1); } nvlist_free(nvl); ret = ses_snap_do_ctl(np->sn_snapshot); (void) pthread_mutex_unlock(&tp->st_lock); return (ret); }
static int dsl_dataset_user_hold_check(void *arg, dmu_tx_t *tx) { dsl_dataset_user_hold_arg_t *dduha = arg; dsl_pool_t *dp = dmu_tx_pool(tx); nvpair_t *pair; if (spa_version(dp->dp_spa) < SPA_VERSION_USERREFS) return (SET_ERROR(ENOTSUP)); if (!dmu_tx_is_syncing(tx)) return (0); for (pair = nvlist_next_nvpair(dduha->dduha_holds, NULL); pair != NULL; pair = nvlist_next_nvpair(dduha->dduha_holds, pair)) { dsl_dataset_t *ds; int error = 0; char *htag, *name; /* must be a snapshot */ name = nvpair_name(pair); if (strchr(name, '@') == NULL) error = SET_ERROR(EINVAL); if (error == 0) error = nvpair_value_string(pair, &htag); if (error == 0) error = dsl_dataset_hold(dp, name, FTAG, &ds); if (error == 0) { error = dsl_dataset_user_hold_check_one(ds, htag, dduha->dduha_minor != 0, tx); dsl_dataset_rele(ds, FTAG); } if (error == 0) { fnvlist_add_string(dduha->dduha_chkholds, name, htag); } else { /* * We register ENOENT errors so they can be correctly * reported if needed, such as when all holds fail. */ fnvlist_add_int32(dduha->dduha_errlist, name, error); if (error != ENOENT) return (error); } } return (0); }
static int dsl_dataset_user_release_check(void *arg, dmu_tx_t *tx) { dsl_dataset_user_release_arg_t *ddura; dsl_holdfunc_t *holdfunc; dsl_pool_t *dp; nvpair_t *pair; if (!dmu_tx_is_syncing(tx)) return (0); dp = dmu_tx_pool(tx); ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock)); ddura = arg; holdfunc = ddura->ddura_holdfunc; for (pair = nvlist_next_nvpair(ddura->ddura_holds, NULL); pair != NULL; pair = nvlist_next_nvpair(ddura->ddura_holds, pair)) { int error; dsl_dataset_t *ds; nvlist_t *holds; const char *snapname = nvpair_name(pair); error = nvpair_value_nvlist(pair, &holds); if (error != 0) error = (SET_ERROR(EINVAL)); else error = holdfunc(dp, snapname, FTAG, &ds); if (error == 0) { error = dsl_dataset_user_release_check_one(ddura, ds, holds, snapname); dsl_dataset_rele(ds, FTAG); } if (error != 0) { if (ddura->ddura_errlist != NULL) { fnvlist_add_int32(ddura->ddura_errlist, snapname, error); } /* * Non-existent snapshots are put on the errlist, * but don't cause an overall failure. */ if (error != ENOENT) return (error); } } return (0); }
static int zfs_crypto_verify_rewrap_nvlist(zfs_handle_t *zhp, nvlist_t *props, nvlist_t **props_out, char *errbuf) { int ret = 0; nvpair_t *elem = NULL; zfs_prop_t prop; nvlist_t *new_props = NULL; new_props = fnvlist_alloc(); /* * loop through all provided properties, we should only have * keyformat, keylocation and pbkdf2iters. The actual validation of * values is done by zfs_valid_proplist(). */ while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { const char *propname = nvpair_name(elem); prop = zfs_name_to_prop(propname); switch (prop) { case ZFS_PROP_PBKDF2_ITERS: case ZFS_PROP_KEYFORMAT: case ZFS_PROP_KEYLOCATION: break; default: ret = EINVAL; zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN, "Only keyformat, keylocation and pbkdf2iters may " "be set with this command.")); goto error; } } new_props = zfs_valid_proplist(zhp->zfs_hdl, zhp->zfs_type, props, zfs_prop_get_int(zhp, ZFS_PROP_ZONED), NULL, zhp->zpool_hdl, B_TRUE, errbuf); if (new_props == NULL) { ret = EINVAL; goto error; } *props_out = new_props; return (0); error: nvlist_free(new_props); *props_out = NULL; return (ret); }
/* * Iterate over all bookmarks */ int zfs_iter_bookmarks(zfs_handle_t *zhp, zfs_iter_f func, void *data) { zfs_handle_t *nzhp; nvlist_t *props = NULL; nvlist_t *bmarks = NULL; int err; nvpair_t *pair; if ((zfs_get_type(zhp) & (ZFS_TYPE_SNAPSHOT | ZFS_TYPE_BOOKMARK)) != 0) return (0); /* Setup the requested properties nvlist. */ props = fnvlist_alloc(); fnvlist_add_boolean(props, zfs_prop_to_name(ZFS_PROP_GUID)); fnvlist_add_boolean(props, zfs_prop_to_name(ZFS_PROP_CREATETXG)); fnvlist_add_boolean(props, zfs_prop_to_name(ZFS_PROP_CREATION)); /* Allocate an nvlist to hold the bookmarks. */ bmarks = fnvlist_alloc(); if ((err = lzc_get_bookmarks(zhp->zfs_name, props, &bmarks)) != 0) goto out; for (pair = nvlist_next_nvpair(bmarks, NULL); pair != NULL; pair = nvlist_next_nvpair(bmarks, pair)) { char name[ZFS_MAXNAMELEN]; char *bmark_name; nvlist_t *bmark_props; bmark_name = nvpair_name(pair); bmark_props = fnvpair_value_nvlist(pair); (void) snprintf(name, sizeof (name), "%s#%s", zhp->zfs_name, bmark_name); nzhp = make_bookmark_handle(zhp, name, bmark_props); if (nzhp == NULL) continue; if ((err = func(nzhp, data)) != 0) goto out; } out: fnvlist_free(props); fnvlist_free(bmarks); return (err); }
static void prof_make_names(struct sdev_node *dir) { char *name; nvpair_t *nvp = NULL; nvlist_t *nvl = dir->sdev_prof.dev_name; int rv; ASSERT(RW_WRITE_HELD(&dir->sdev_contents)); if ((dir->sdev_flags & SDEV_ZONED) != 0) prof_make_names_walk(dir, prof_make_name_zone); if (nvl == NULL) return; if (dir->sdev_prof.has_glob) { prof_make_names_walk(dir, prof_make_name_glob); return; } /* Walk nvlist and lookup corresponding device in global inst */ while (nvp = nvlist_next_nvpair(nvl, nvp)) { int type; rv = nvpair_value_int32(nvp, &type); if (rv != 0) { cmn_err(CE_WARN, sdev_nvp_val_err, rv, nvpair_name(nvp)); break; } if (type == PROFILE_TYPE_EXCLUDE) continue; name = nvpair_name(nvp); (void) prof_lookup_globaldev(dir, dir->sdev_origin, name, name); } }
/* * Create symlinks in the current directory based on profile */ static void prof_make_symlinks(struct sdev_node *dir) { char *tgt, *lnm; nvpair_t *nvp = NULL; nvlist_t *nvl = dir->sdev_prof.dev_symlink; int rv; ASSERT(RW_WRITE_HELD(&dir->sdev_contents)); if (nvl == NULL) return; while (nvp = nvlist_next_nvpair(nvl, nvp)) { lnm = nvpair_name(nvp); rv = nvpair_value_string(nvp, &tgt); if (rv != 0) { cmn_err(CE_WARN, sdev_nvp_val_err, rv, nvpair_name(nvp)); break; } prof_make_sym(dir, lnm, tgt); } }
static void dsl_dataset_user_release_sync(void *arg, dmu_tx_t *tx) { dsl_dataset_user_release_arg_t *ddura = arg; dsl_pool_t *dp = dmu_tx_pool(tx); nvpair_t *pair; for (pair = nvlist_next_nvpair(ddura->ddura_holds, NULL); pair != NULL; pair = nvlist_next_nvpair(ddura->ddura_holds, pair)) { dsl_dataset_t *ds; VERIFY0(dsl_dataset_hold(dp, nvpair_name(pair), FTAG, &ds)); dsl_dataset_user_release_sync_one(ds, fnvpair_value_nvlist(pair), tx); if (nvlist_exists(ddura->ddura_todelete, nvpair_name(pair))) { ASSERT(ds->ds_userrefs == 0 && ds->ds_phys->ds_num_children == 1 && DS_IS_DEFER_DESTROY(ds)); dsl_destroy_snapshot_sync_impl(ds, B_FALSE, tx); } dsl_dataset_rele(ds, FTAG); } }
static void dsl_dataset_user_release_sync_one(dsl_dataset_t *ds, nvlist_t *holds, dmu_tx_t *tx) { dsl_pool_t *dp = ds->ds_dir->dd_pool; objset_t *mos = dp->dp_meta_objset; uint64_t zapobj; int error; nvpair_t *pair; for (pair = nvlist_next_nvpair(holds, NULL); pair != NULL; pair = nvlist_next_nvpair(holds, pair)) { ds->ds_userrefs--; error = dsl_pool_user_release(dp, ds->ds_object, nvpair_name(pair), tx); VERIFY(error == 0 || error == ENOENT); zapobj = ds->ds_phys->ds_userrefs_obj; VERIFY0(zap_remove(mos, zapobj, nvpair_name(pair), tx)); spa_history_log_internal_ds(ds, "release", tx, "tag=%s refs=%lld", nvpair_name(pair), (longlong_t)ds->ds_userrefs); } }
/* * Release "user holds" on snapshots. If the snapshot has been marked for * deferred destroy (by lzc_destroy_snaps(defer=B_TRUE)), it does not have * any clones, and all the user holds are removed, then the snapshot will be * destroyed. * * The keys in the nvlist are snapshot names. * The snapshots must all be in the same pool. * The value is a nvlist whose keys are the holds to remove. * * Holds which failed to release because they didn't exist will have an entry * added to errlist, but will not cause an overall failure. * * The return value will be 0 if the nvl holds was empty or all holds that * existed, were successfully removed. * * Otherwise the return value will be the errno of a (unspecified) hold that * failed to release and no holds will be released. * * In all cases the errlist will have an entry for each hold that failed to * to release. */ int lzc_release(nvlist_t *holds, nvlist_t **errlist) { char pool[MAXNAMELEN]; nvpair_t *elem; /* determine the pool name */ elem = nvlist_next_nvpair(holds, NULL); if (elem == NULL) return (0); (void) strlcpy(pool, nvpair_name(elem), sizeof (pool)); pool[strcspn(pool, "/@")] = '\0'; return (lzc_ioctl(ZFS_IOC_RELEASE, pool, holds, errlist)); }
/* * The bookmarks must all be in the same pool. */ int dsl_bookmark_create(nvlist_t *bmarks, nvlist_t *errors) { nvpair_t *pair; dsl_bookmark_create_arg_t dbca; pair = nvlist_next_nvpair(bmarks, NULL); if (pair == NULL) return (0); dbca.dbca_bmarks = bmarks; dbca.dbca_errors = errors; return (dsl_sync_task(nvpair_name(pair), dsl_bookmark_create_check, dsl_bookmark_create_sync, &dbca, fnvlist_num_pairs(bmarks))); }
/* * holds is nvl of snapname -> holdname * errlist will be filled in with snapname -> error * if cleanup_minor is not 0, the holds will be temporary, cleaned up * when the process exits. * * if any fails, all will fail. */ int dsl_dataset_user_hold(nvlist_t *holds, minor_t cleanup_minor, nvlist_t *errlist) { dsl_dataset_user_hold_arg_t dduha; nvpair_t *pair; pair = nvlist_next_nvpair(holds, NULL); if (pair == NULL) return (0); dduha.dduha_holds = holds; dduha.dduha_errlist = errlist; dduha.dduha_minor = cleanup_minor; return (dsl_sync_task(nvpair_name(pair), dsl_dataset_user_hold_check, dsl_dataset_user_hold_sync, &dduha, fnvlist_num_pairs(holds))); }
static void dsl_dataset_user_hold_sync(void *arg, dmu_tx_t *tx) { dsl_dataset_user_hold_arg_t *dduha = arg; dsl_pool_t *dp = dmu_tx_pool(tx); nvpair_t *pair; uint64_t now = gethrestime_sec(); for (pair = nvlist_next_nvpair(dduha->dduha_holds, NULL); pair != NULL; pair = nvlist_next_nvpair(dduha->dduha_holds, pair)) { dsl_dataset_t *ds; VERIFY0(dsl_dataset_hold(dp, nvpair_name(pair), FTAG, &ds)); dsl_dataset_user_hold_sync_one(ds, fnvpair_value_string(pair), dduha->dduha_minor, now, tx); dsl_dataset_rele(ds, FTAG); } }
/* * This function is invoked when a share is disabled to disconnect trees * and close files. Cleaning up may involve VOP and/or VFS calls, which * may conflict/deadlock with stuck threads if something is amiss with the * file system. Queueing the request for asynchronous processing allows the * call to return immediately so that, if the unshare is being done in the * context of a forced unmount, the forced unmount will always be able to * proceed (unblocking stuck I/O and eventually allowing all blocked unshare * processes to complete). * * The path lookup to find the root vnode of the VFS in question and the * release of this vnode are done synchronously prior to any associated * unmount. Doing these asynchronous to an associated unmount could run * the risk of a spurious EBUSY for a standard unmount or an EIO during * the path lookup due to a forced unmount finishing first. */ int smb_kshare_unexport_list(smb_ioc_share_t *ioc) { smb_server_t *sv = NULL; smb_unshare_t *ux; nvlist_t *shrlist = NULL; nvpair_t *nvp; boolean_t unexport = B_FALSE; char *shrname; int rc; if ((rc = smb_server_lookup(&sv)) != 0) return (rc); if ((rc = nvlist_unpack(ioc->shr, ioc->shrlen, &shrlist, 0)) != 0) goto out; for (nvp = nvlist_next_nvpair(shrlist, NULL); nvp != NULL; nvp = nvlist_next_nvpair(shrlist, nvp)) { if (nvpair_type(nvp) != DATA_TYPE_NVLIST) continue; shrname = nvpair_name(nvp); ASSERT(shrname); if ((rc = smb_kshare_unexport(sv, shrname)) != 0) continue; ux = kmem_cache_alloc(smb_kshare_cache_unexport, KM_SLEEP); (void) strlcpy(ux->us_sharename, shrname, MAXNAMELEN); smb_slist_insert_tail(&sv->sv_export.e_unexport_list, ux); unexport = B_TRUE; } if (unexport) smb_thread_signal(&sv->sv_export.e_unexport_thread); rc = 0; out: if (shrlist != NULL) nvlist_free(shrlist); smb_server_release(sv); return (rc); }
static int dsl_bookmark_destroy_check(void *arg, dmu_tx_t *tx) { dsl_bookmark_destroy_arg_t *dbda = arg; dsl_pool_t *dp = dmu_tx_pool(tx); int rv = 0; nvpair_t *pair; if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARKS)) return (0); for (pair = nvlist_next_nvpair(dbda->dbda_bmarks, NULL); pair != NULL; pair = nvlist_next_nvpair(dbda->dbda_bmarks, pair)) { const char *fullname = nvpair_name(pair); dsl_dataset_t *ds; zfs_bookmark_phys_t bm; int error; char *shortname; error = dsl_bookmark_hold_ds(dp, fullname, &ds, FTAG, &shortname); if (error == ENOENT) { /* ignore it; the bookmark is "already destroyed" */ continue; } if (error == 0) { error = dsl_dataset_bmark_lookup(ds, shortname, &bm); dsl_dataset_rele(ds, FTAG); if (error == ESRCH) { /* * ignore it; the bookmark is * "already destroyed" */ continue; } } if (error == 0) { fnvlist_add_boolean(dbda->dbda_success, fullname); } else { fnvlist_add_int32(dbda->dbda_errors, fullname, error); rv = error; } } return (rv); }
/* * Put new CPU property. * Handle special case of "cpu.status". */ int pool_cpu_propput(processorid_t cpuid, nvpair_t *pair) { int ret = 0; cpu_t *cpu; ASSERT(pool_lock_held()); ASSERT(INGLOBALZONE(curproc)); if (nvpair_type(pair) == DATA_TYPE_STRING && strcmp(nvpair_name(pair), "cpu.status") == 0) { char *val; int status; int old_status; (void) nvpair_value_string(pair, &val); if (strcmp(val, PS_OFFLINE) == 0) status = P_OFFLINE; else if (strcmp(val, PS_ONLINE) == 0) status = P_ONLINE; else if (strcmp(val, PS_NOINTR) == 0) status = P_NOINTR; else if (strcmp(val, PS_FAULTED) == 0) status = P_FAULTED; else if (strcmp(val, PS_SPARE) == 0) status = P_SPARE; else return (EINVAL); ret = p_online_internal(cpuid, status, &old_status); } else { mutex_enter(&cpu_lock); if ((cpu = cpu_get(cpuid)) == NULL) ret = EINVAL; if (cpu->cpu_props == NULL) { (void) nvlist_alloc(&cpu->cpu_props, NV_UNIQUE_NAME, KM_SLEEP); (void) nvlist_add_string(cpu->cpu_props, "cpu.comment", ""); } ret = pool_propput_common(cpu->cpu_props, pair, pool_cpu_props); if (ret == 0) pool_cpu_mod = gethrtime(); mutex_exit(&cpu_lock); } return (ret); }
/* * The bookmarks must all be in the same pool. */ int dsl_bookmark_destroy(nvlist_t *bmarks, nvlist_t *errors) { int rv; dsl_bookmark_destroy_arg_t dbda; nvpair_t *pair = nvlist_next_nvpair(bmarks, NULL); if (pair == NULL) return (0); dbda.dbda_bmarks = bmarks; dbda.dbda_errors = errors; dbda.dbda_success = fnvlist_alloc(); rv = dsl_sync_task(nvpair_name(pair), dsl_bookmark_destroy_check, dsl_bookmark_destroy_sync, &dbda, fnvlist_num_pairs(bmarks)); fnvlist_free(dbda.dbda_success); return (rv); }
/* * Destroys bookmarks. * * The keys in the bmarks nvlist are the bookmarks to be destroyed. * They must all be in the same pool. Bookmarks are specified as * <fs>#<bmark>. * * Bookmarks that do not exist will be silently ignored. * * The return value will be 0 if all bookmarks that existed were destroyed. * * Otherwise the return value will be the errno of a (undetermined) bookmark * that failed, no bookmarks will be destroyed, and the errlist will have an * entry for each bookmarks that failed. The value in the errlist will be * the (int32) error code. */ int lzc_destroy_bookmarks(nvlist_t *bmarks, nvlist_t **errlist) { nvpair_t *elem; int error; char pool[MAXNAMELEN]; /* determine the pool name */ elem = nvlist_next_nvpair(bmarks, NULL); if (elem == NULL) return (0); (void) strlcpy(pool, nvpair_name(elem), sizeof (pool)); pool[strcspn(pool, "/#")] = '\0'; error = lzc_ioctl(ZFS_IOC_DESTROY_BOOKMARKS, pool, bmarks, errlist); return (error); }
/* * Creates bookmarks. * * The bookmarks nvlist maps from name of the bookmark (e.g. "pool/fs#bmark") to * the name of the snapshot (e.g. "pool/fs@snap"). All the bookmarks and * snapshots must be in the same pool. * * The returned results nvlist will have an entry for each bookmark that failed. * The value will be the (int32) error code. * * The return value will be 0 if all bookmarks were created, otherwise it will * be the errno of a (undetermined) bookmarks that failed. */ int lzc_bookmark(nvlist_t *bookmarks, nvlist_t **errlist) { nvpair_t *elem; int error; char pool[ZFS_MAX_DATASET_NAME_LEN]; /* determine the pool name */ elem = nvlist_next_nvpair(bookmarks, NULL); if (elem == NULL) return (0); (void) strlcpy(pool, nvpair_name(elem), sizeof (pool)); pool[strcspn(pool, "/#")] = '\0'; error = lzc_ioctl(ZFS_IOC_BOOKMARK, pool, bookmarks, errlist); return (error); }
/* Process a list of key=value pairs from a login request */ static kv_status_t iser_process_request_nvlist(nvlist_t *request_nvl, nvlist_t *response_nvl, nvlist_t *negotiated_nvl) { const idm_kv_xlate_t *ikvx; char *nvp_name; nvpair_t *nvp; nvpair_t *next_nvp; kv_status_t kvrc = KV_HANDLED; boolean_t transit = B_TRUE; /* Process the list */ nvp = nvlist_next_nvpair(request_nvl, NULL); while (nvp != NULL) { next_nvp = nvlist_next_nvpair(request_nvl, nvp); nvp_name = nvpair_name(nvp); ikvx = idm_lookup_kv_xlate(nvp_name, strlen(nvp_name)); kvrc = iser_handle_key(nvp, ikvx, request_nvl, response_nvl, negotiated_nvl); if (kvrc != KV_HANDLED) { if (kvrc == KV_HANDLED_NO_TRANSIT) { /* we countered, clear the transit flag */ transit = B_FALSE; } else { /* error, bail out */ break; } } nvp = next_nvp; } /* * If the current kv_status_t indicates success, we've handled * the entire list. Explicitly set kvrc to NO_TRANSIT if we've * cleared the transit flag along the way. */ if ((kvrc == KV_HANDLED) && (transit == B_FALSE)) { kvrc = KV_HANDLED_NO_TRANSIT; } return (kvrc); }
static void __dump(nvlist_t *list, int indent) { nvpair_t *pair; for (pair = nvlist_next_nvpair(list, NULL); pair; pair = nvlist_next_nvpair(list, pair)) { fprintf(stderr, "%*sname='%s' type=%d", indent, "", nvpair_name(pair), nvpair_type(pair)); switch (nvpair_type(pair)) { case DATA_TYPE_STRING: fprintf(stderr, "\n%*svalue='%s'\n", indent + INDENT, "", pair2str(pair)); break; case DATA_TYPE_STRING_ARRAY: __dump_string_array(pair, indent + INDENT); break; case DATA_TYPE_BOOLEAN_VALUE: fprintf(stderr, "\n%*svalue=%s\n", indent + INDENT, "", pair2bool(pair) ? "true" : "false"); break; case DATA_TYPE_UINT8: fprintf(stderr, "\n%*svalue='%c'\n", indent + INDENT, "", pair2char(pair)); break; case DATA_TYPE_UINT64: fprintf(stderr, "\n%*svalue=%"PRIu64"\n", indent + INDENT, "", pair2int(pair)); break; case DATA_TYPE_NVLIST: fprintf(stderr, "\n"); __dump(pair2nvl(pair), indent + INDENT); break; case DATA_TYPE_NVLIST_ARRAY: __dump_nvlist_array(pair, indent + INDENT); break; default: fprintf(stderr, "\n"); break; } } }
nvpair_t * nvpair_clone(const nvpair_t *nvp) { nvpair_t *newnvp; const char *name; const void *data; size_t datasize; NVPAIR_ASSERT(nvp); name = nvpair_name(nvp); switch (nvpair_type(nvp)) { case NV_TYPE_NULL: newnvp = nvpair_create_null(name); break; case NV_TYPE_BOOL: newnvp = nvpair_create_bool(name, nvpair_get_bool(nvp)); break; case NV_TYPE_NUMBER: newnvp = nvpair_create_number(name, nvpair_get_number(nvp)); break; case NV_TYPE_STRING: newnvp = nvpair_create_string(name, nvpair_get_string(nvp)); break; case NV_TYPE_NVLIST: newnvp = nvpair_create_nvlist(name, nvpair_get_nvlist(nvp)); break; #ifndef _KERNEL case NV_TYPE_DESCRIPTOR: newnvp = nvpair_create_descriptor(name, nvpair_get_descriptor(nvp)); break; #endif case NV_TYPE_BINARY: data = nvpair_get_binary(nvp, &datasize); newnvp = nvpair_create_binary(name, data, datasize); break; default: PJDLOG_ABORT("Unknown type: %d.", nvpair_type(nvp)); } return (newnvp); }
static int dsl_dataset_user_release_check(void *arg, dmu_tx_t *tx) { dsl_dataset_user_release_arg_t *ddura = arg; dsl_pool_t *dp = dmu_tx_pool(tx); nvpair_t *pair; int rv = 0; if (!dmu_tx_is_syncing(tx)) return (0); for (pair = nvlist_next_nvpair(ddura->ddura_holds, NULL); pair != NULL; pair = nvlist_next_nvpair(ddura->ddura_holds, pair)) { const char *name = nvpair_name(pair); int error; dsl_dataset_t *ds; nvlist_t *holds; error = nvpair_value_nvlist(pair, &holds); if (error != 0) return (EINVAL); error = dsl_dataset_hold(dp, name, FTAG, &ds); if (error == 0) { boolean_t deleteme; error = dsl_dataset_user_release_check_one(ds, holds, &deleteme); if (error == 0 && deleteme) { fnvlist_add_boolean(ddura->ddura_todelete, name); } dsl_dataset_rele(ds, FTAG); } if (error != 0) { if (ddura->ddura_errlist != NULL) { fnvlist_add_int32(ddura->ddura_errlist, name, error); } rv = error; } } return (rv); }
/* * The function obtains size of the nvlist after nvlist_pack(). */ size_t nvlist_size(const nvlist_t *nvl) { const nvlist_t *tmpnvl; const nvpair_t *nvp, *tmpnvp; void *cookie; size_t size; NVLIST_ASSERT(nvl); PJDLOG_ASSERT(nvl->nvl_error == 0); size = sizeof(struct nvlist_header); nvp = nvlist_first_nvpair(nvl); while (nvp != NULL) { size += nvpair_header_size(); size += strlen(nvpair_name(nvp)) + 1; if (nvpair_type(nvp) == NV_TYPE_NVLIST) { size += sizeof(struct nvlist_header); size += nvpair_header_size() + 1; tmpnvl = nvpair_get_nvlist(nvp); PJDLOG_ASSERT(tmpnvl->nvl_error == 0); tmpnvp = nvlist_first_nvpair(tmpnvl); if (tmpnvp != NULL) { nvl = tmpnvl; nvp = tmpnvp; continue; } } else { size += nvpair_size(nvp); } while ((nvp = nvlist_next_nvpair(nvl, nvp)) == NULL) { cookie = NULL; nvl = nvlist_get_parent(nvl, &cookie); if (nvl == NULL) goto out; nvp = cookie; } } out: return (size); }
const char * nvlist_next(const nvlist_t *nvl, int *typep, void **cookiep) { nvpair_t *nvp; NVLIST_ASSERT(nvl); PJDLOG_ASSERT(cookiep != NULL); if (*cookiep == NULL) nvp = nvlist_first_nvpair(nvl); else nvp = nvlist_next_nvpair(nvl, *cookiep); if (nvp == NULL) return (NULL); if (typep != NULL) *typep = nvpair_type(nvp); *cookiep = nvp; return (nvpair_name(nvp)); }
/* * holds is nvl of snapname -> { holdname, ... } * errlist will be filled in with snapname -> error * * if any fails, all will fail. */ int dsl_dataset_user_release(nvlist_t *holds, nvlist_t *errlist) { dsl_dataset_user_release_arg_t ddura; nvpair_t *pair; int error; pair = nvlist_next_nvpair(holds, NULL); if (pair == NULL) return (0); ddura.ddura_holds = holds; ddura.ddura_errlist = errlist; ddura.ddura_todelete = fnvlist_alloc(); error = dsl_sync_task(nvpair_name(pair), dsl_dataset_user_release_check, dsl_dataset_user_release_sync, &ddura, fnvlist_num_pairs(holds)); fnvlist_free(ddura.ddura_todelete); return (error); }
void nvlist_move_nvpair(nvlist_t *nvl, nvpair_t *nvp) { NVPAIR_ASSERT(nvp); PJDLOG_ASSERT(nvpair_nvlist(nvp) == NULL); if (nvlist_error(nvl) != 0) { nvpair_free(nvp); errno = nvlist_error(nvl); return; } if (nvlist_exists(nvl, nvpair_name(nvp))) { nvpair_free(nvp); nvl->nvl_error = errno = EEXIST; return; } nvpair_insert(&nvl->nvl_head, nvp, nvl); }
static int osd_sa_xattr_list(const struct lu_env *env, struct osd_object *obj, struct lu_buf *lb) { nvpair_t *nvp = NULL; int len, counted = 0, remain = lb->lb_len; int rc = 0; if (obj->oo_sa_xattr == NULL) { rc = __osd_xattr_cache(env, obj); if (rc) return rc; } LASSERT(obj->oo_sa_xattr); while ((nvp = nvlist_next_nvpair(obj->oo_sa_xattr, nvp)) != NULL) { const char *name = nvpair_name(nvp); if (!osd_obj2dev(obj)->od_posix_acl && (strcmp(name, POSIX_ACL_XATTR_ACCESS) == 0 || strcmp(name, POSIX_ACL_XATTR_DEFAULT) == 0)) continue; len = strlen(nvpair_name(nvp)); if (lb->lb_buf != NULL) { if (len + 1 > remain) return -ERANGE; memcpy(lb->lb_buf, name, len); lb->lb_buf += len; *((char *)lb->lb_buf) = '\0'; lb->lb_buf++; remain -= len + 1; } counted += len + 1; } return counted; }