/* * Iterate over root datasets, calling the given function for each. The zfs * handle passed each time must be explicitly closed by the callback. */ int zfs_iter_root(libzfs_handle_t *hdl, zfs_iter_f func, void *data) { config_node_t *cn; zfs_handle_t *zhp; int ret; if (namespace_reload(hdl) != 0) return (-1); for (cn = uu_avl_first(hdl->libzfs_ns_avl); cn != NULL; cn = uu_avl_next(hdl->libzfs_ns_avl, cn)) { if (check_restricted(cn->cn_name)) continue; if ((zhp = make_dataset_handle(hdl, cn->cn_name)) == NULL) continue; if ((ret = func(zhp, data)) != 0) return (ret); } return (0); }
/** * Iterate over root datasets * @param p_libzfshd: the libzfs handle * @param func: the function to call for each zfs * @param data: anonymous data to pass along to the callback function * @param ppsz_error: the error message if any * @return 0 in case of success, the error code overwise */ int libzfs_zfs_iter(libzfs_handle_t *p_libzfshd, zfs_iter_f func, void *data, const char **ppsz_error) { config_node_t *p_cn; zfs_handle_t *p_zfs; int i_error; if(namespace_reload(p_libzfshd)) { *ppsz_error = "Unable to reload the namespace"; return 1; } for(p_cn = uu_avl_first(p_libzfshd->libzfs_ns_avl); p_cn; p_cn = uu_avl_next(p_libzfshd->libzfs_ns_avl, p_cn)) { if(!(p_zfs = libzfs_make_dataset_handle(p_libzfshd, p_cn->cn_name))) { *ppsz_error = "Unable to create the zfs_handle for the zfs object"; return 1; } if((i_error = func(p_zfs, data))) { *ppsz_error = "Error in the callback function"; libzfs_zfs_close(p_zfs); return i_error; } libzfs_zfs_close(p_zfs); } return 0; }
/* * Iterate over all pools in the list, executing the callback for each */ int pool_list_iter(zpool_list_t *zlp, int unavail, zpool_iter_f func, void *data) { zpool_node_t *node, *next_node; int ret = 0; for (node = uu_avl_first(zlp->zl_avl); node != NULL; node = next_node) { next_node = uu_avl_next(zlp->zl_avl, node); if (zpool_get_state(node->zn_handle) != POOL_STATE_UNAVAIL || unavail) ret |= func(node->zn_handle, data); } return (ret); }
/** * Iterate over the zpools * @param p_libzfshd: the libzfs handle * @param func: the function to call for each zpool * @param data: anonymous data to pass along to the callback function * @param ppsz_error: the error message if any * @return 0 in case of success, the error code overwise */ int libzfs_zpool_iter(libzfs_handle_t *p_libzfshd, zpool_iter_f func, void *data, const char **ppsz_error) { config_node_t *p_config_node; /* * If someone makes a recursive call to zpool_iter(), we want to avoid * refreshing the namespace because that will invalidate the parent * context. We allow recursive calls, but simply re-use the same * namespace AVL tree. */ if(!p_libzfshd->libzfs_pool_iter && namespace_reload(p_libzfshd)) { *ppsz_error = "unable to reload the namespace"; return -1; } p_libzfshd->libzfs_pool_iter++; for(p_config_node = uu_avl_first(p_libzfshd->libzfs_ns_avl); p_config_node; p_config_node = uu_avl_next(p_libzfshd->libzfs_ns_avl, p_config_node)) { zpool_handle_t *p_zpool = libzfs_zpool_open_canfail(p_libzfshd, p_config_node->cn_name, ppsz_error); if(!p_zpool) continue; /* Call the callback function: it might return 0 */ int i_ret = func(p_zpool, data); libzfs_zpool_close(p_zpool); if(i_ret) { *ppsz_error = "error when calling the callback function"; p_libzfshd->libzfs_pool_iter--; return i_ret; } } p_libzfshd->libzfs_pool_iter--; return 0; }
/* * Iterate over all pools in the system. */ int zpool_iter(libzfs_handle_t *hdl, zpool_iter_f func, void *data) { config_node_t *cn; zpool_handle_t *zhp; int ret; /* * If someone makes a recursive call to zpool_iter(), we want to avoid * refreshing the namespace because that will invalidate the parent * context. We allow recursive calls, but simply re-use the same * namespace AVL tree. */ if (!hdl->libzfs_pool_iter && namespace_reload(hdl) != 0) return (-1); hdl->libzfs_pool_iter++; for (cn = uu_avl_first(hdl->libzfs_ns_avl); cn != NULL; cn = uu_avl_next(hdl->libzfs_ns_avl, cn)) { if (check_restricted(cn->cn_name)) continue; if (zpool_open_silent(hdl, cn->cn_name, &zhp) != 0) { hdl->libzfs_pool_iter--; return (-1); } if (zhp == NULL) continue; if ((ret = func(zhp, data)) != 0) { hdl->libzfs_pool_iter--; return (ret); } } hdl->libzfs_pool_iter--; return (0); }
int zfs_for_each(int argc, char **argv, int flags, zfs_type_t types, zfs_sort_column_t *sortcol, zprop_list_t **proplist, int limit, zfs_iter_cb callback, void *data) { callback_data_t cb = {0}; int ret = 0; zfs_node_t *node; uu_avl_walk_t *walk; avl_pool = uu_avl_pool_create("zfs_pool", sizeof (zfs_node_t), offsetof(zfs_node_t, zn_avlnode), zfs_sort, UU_DEFAULT); if (avl_pool == NULL) nomem(); cb.cb_sortcol = sortcol; cb.cb_flags = flags; cb.cb_proplist = proplist; cb.cb_types = types; cb.cb_depth_limit = limit; /* * If cb_proplist is provided then in the zfs_handles created we * retain only those properties listed in cb_proplist and sortcol. * The rest are pruned. So, the caller should make sure that no other * properties other than those listed in cb_proplist/sortcol are * accessed. * * If cb_proplist is NULL then we retain all the properties. We * always retain the zoned property, which some other properties * need (userquota & friends), and the createtxg property, which * we need to sort snapshots. */ if (cb.cb_proplist && *cb.cb_proplist) { zprop_list_t *p = *cb.cb_proplist; while (p) { if (p->pl_prop >= ZFS_PROP_TYPE && p->pl_prop < ZFS_NUM_PROPS) { cb.cb_props_table[p->pl_prop] = B_TRUE; } p = p->pl_next; } while (sortcol) { if (sortcol->sc_prop >= ZFS_PROP_TYPE && sortcol->sc_prop < ZFS_NUM_PROPS) { cb.cb_props_table[sortcol->sc_prop] = B_TRUE; } sortcol = sortcol->sc_next; } cb.cb_props_table[ZFS_PROP_ZONED] = B_TRUE; cb.cb_props_table[ZFS_PROP_CREATETXG] = B_TRUE; } else { (void) memset(cb.cb_props_table, B_TRUE, sizeof (cb.cb_props_table)); } if ((cb.cb_avl = uu_avl_create(avl_pool, NULL, UU_DEFAULT)) == NULL) nomem(); if (argc == 0) { /* * If given no arguments, iterate over all datasets. */ cb.cb_flags |= ZFS_ITER_RECURSE; ret = zfs_iter_root(g_zfs, zfs_callback, &cb); } else { int i; zfs_handle_t *zhp; zfs_type_t argtype; /* * If we're recursive, then we always allow filesystems as * arguments. If we also are interested in snapshots, then we * can take volumes as well. */ argtype = types; if (flags & ZFS_ITER_RECURSE) { argtype |= ZFS_TYPE_FILESYSTEM; if (types & ZFS_TYPE_SNAPSHOT) argtype |= ZFS_TYPE_VOLUME; } for (i = 0; i < argc; i++) { if (flags & ZFS_ITER_ARGS_CAN_BE_PATHS) { zhp = zfs_path_to_zhandle(g_zfs, argv[i], argtype); } else { zhp = zfs_open(g_zfs, argv[i], argtype); } if (zhp != NULL) ret |= zfs_callback(zhp, &cb); else ret = 1; } } /* * At this point we've got our AVL tree full of zfs handles, so iterate * over each one and execute the real user callback. */ for (node = uu_avl_first(cb.cb_avl); node != NULL; node = uu_avl_next(cb.cb_avl, node)) ret |= callback(node->zn_handle, node->zn_depth, data); /* * Finally, clean up the AVL tree. */ if ((walk = uu_avl_walk_start(cb.cb_avl, UU_WALK_ROBUST)) == NULL) nomem(); while ((node = uu_avl_walk_next(walk)) != NULL) { uu_avl_remove(cb.cb_avl, node); zfs_close(node->zn_handle); free(node); } uu_avl_walk_end(walk); uu_avl_destroy(cb.cb_avl); uu_avl_pool_destroy(avl_pool); return (ret); }
int zfs_for_each(int argc, char **argv, boolean_t recurse, zfs_type_t types, zfs_sort_column_t *sortcol, zprop_list_t **proplist, zfs_iter_f callback, void *data, boolean_t args_can_be_paths) { callback_data_t cb; int ret = 0; zfs_node_t *node; uu_avl_walk_t *walk; avl_pool = uu_avl_pool_create("zfs_pool", sizeof (zfs_node_t), offsetof(zfs_node_t, zn_avlnode), zfs_sort, UU_DEFAULT); if (avl_pool == NULL) { (void) fprintf(stderr, gettext("internal error: out of memory\n")); exit(1); } cb.cb_sortcol = sortcol; cb.cb_recurse = recurse; cb.cb_proplist = proplist; cb.cb_types = types; if ((cb.cb_avl = uu_avl_create(avl_pool, NULL, UU_DEFAULT)) == NULL) { (void) fprintf(stderr, gettext("internal error: out of memory\n")); exit(1); } if (argc == 0) { /* * If given no arguments, iterate over all datasets. */ cb.cb_recurse = 1; ret = zfs_iter_root(g_zfs, zfs_callback, &cb); } else { int i; zfs_handle_t *zhp; zfs_type_t argtype; /* * If we're recursive, then we always allow filesystems as * arguments. If we also are interested in snapshots, then we * can take volumes as well. */ argtype = types; if (recurse) { argtype |= ZFS_TYPE_FILESYSTEM; if (types & ZFS_TYPE_SNAPSHOT) argtype |= ZFS_TYPE_VOLUME; } for (i = 0; i < argc; i++) { if (args_can_be_paths) { zhp = zfs_path_to_zhandle(g_zfs, argv[i], argtype); } else { zhp = zfs_open(g_zfs, argv[i], argtype); } if (zhp != NULL) ret |= zfs_callback(zhp, &cb); else ret = 1; } } /* * At this point we've got our AVL tree full of zfs handles, so iterate * over each one and execute the real user callback. */ for (node = uu_avl_first(cb.cb_avl); node != NULL; node = uu_avl_next(cb.cb_avl, node)) ret |= callback(node->zn_handle, data); /* * Finally, clean up the AVL tree. */ if ((walk = uu_avl_walk_start(cb.cb_avl, UU_WALK_ROBUST)) == NULL) { (void) fprintf(stderr, gettext("internal error: out of memory")); exit(1); } while ((node = uu_avl_walk_next(walk)) != NULL) { uu_avl_remove(cb.cb_avl, node); zfs_close(node->zn_handle); free(node); } uu_avl_walk_end(walk); uu_avl_destroy(cb.cb_avl); uu_avl_pool_destroy(avl_pool); return (ret); }