/* * Get ZFS handle for the specified mount point. */ static zfs_handle_t * mount2zhandle(char *mountpoint) { zfs_mount_data_t cb; cb.match_name = mountpoint; cb.match_handle = NULL; (void) zfs_iter_root(g_zfs, match_mountpoint, &cb); return (cb.match_handle); }
static int update_zfs_shares(sa_handle_impl_t impl_handle, const char *proto) { update_cookie_t udata; if (impl_handle->zfs_libhandle == NULL) return (SA_SYSTEM_ERR); udata.handle = impl_handle; udata.proto = proto; (void) zfs_iter_root(impl_handle->zfs_libhandle, update_zfs_shares_cb, &udata); return (SA_OK); }
static void get_all_filesystems(sa_handle_impl_t impl_handle, zfs_handle_t ***fslist, size_t *count) { get_all_cbdata_t cb = { 0 }; cb.cb_types = ZFS_TYPE_FILESYSTEM; if (impl_handle->zfs_list != NULL) { *fslist = impl_handle->zfs_list; *count = impl_handle->zfs_list_count; return; } (void) zfs_iter_root(impl_handle->zfs_libhandle, get_one_filesystem, &cb); impl_handle->zfs_list = *fslist = cb.cb_handles; impl_handle->zfs_list_count = *count = cb.cb_used; }
int zfs_for_each(int argc, char **argv, int flags, zfs_type_t types, zfs_sort_column_t *sortcol, zprop_list_t **proplist, int limit, zfs_iter_cb callback, void *data) { callback_data_t cb = {0}; int ret = 0; zfs_node_t *node; uu_avl_walk_t *walk; avl_pool = uu_avl_pool_create("zfs_pool", sizeof (zfs_node_t), offsetof(zfs_node_t, zn_avlnode), zfs_sort, UU_DEFAULT); if (avl_pool == NULL) nomem(); cb.cb_sortcol = sortcol; cb.cb_flags = flags; cb.cb_proplist = proplist; cb.cb_types = types; cb.cb_depth_limit = limit; /* * If cb_proplist is provided then in the zfs_handles created we * retain only those properties listed in cb_proplist and sortcol. * The rest are pruned. So, the caller should make sure that no other * properties other than those listed in cb_proplist/sortcol are * accessed. * * If cb_proplist is NULL then we retain all the properties. We * always retain the zoned property, which some other properties * need (userquota & friends), and the createtxg property, which * we need to sort snapshots. */ if (cb.cb_proplist && *cb.cb_proplist) { zprop_list_t *p = *cb.cb_proplist; while (p) { if (p->pl_prop >= ZFS_PROP_TYPE && p->pl_prop < ZFS_NUM_PROPS) { cb.cb_props_table[p->pl_prop] = B_TRUE; } p = p->pl_next; } while (sortcol) { if (sortcol->sc_prop >= ZFS_PROP_TYPE && sortcol->sc_prop < ZFS_NUM_PROPS) { cb.cb_props_table[sortcol->sc_prop] = B_TRUE; } sortcol = sortcol->sc_next; } cb.cb_props_table[ZFS_PROP_ZONED] = B_TRUE; cb.cb_props_table[ZFS_PROP_CREATETXG] = B_TRUE; } else { (void) memset(cb.cb_props_table, B_TRUE, sizeof (cb.cb_props_table)); } if ((cb.cb_avl = uu_avl_create(avl_pool, NULL, UU_DEFAULT)) == NULL) nomem(); if (argc == 0) { /* * If given no arguments, iterate over all datasets. */ cb.cb_flags |= ZFS_ITER_RECURSE; ret = zfs_iter_root(g_zfs, zfs_callback, &cb); } else { int i; zfs_handle_t *zhp; zfs_type_t argtype; /* * If we're recursive, then we always allow filesystems as * arguments. If we also are interested in snapshots, then we * can take volumes as well. */ argtype = types; if (flags & ZFS_ITER_RECURSE) { argtype |= ZFS_TYPE_FILESYSTEM; if (types & ZFS_TYPE_SNAPSHOT) argtype |= ZFS_TYPE_VOLUME; } for (i = 0; i < argc; i++) { if (flags & ZFS_ITER_ARGS_CAN_BE_PATHS) { zhp = zfs_path_to_zhandle(g_zfs, argv[i], argtype); } else { zhp = zfs_open(g_zfs, argv[i], argtype); } if (zhp != NULL) ret |= zfs_callback(zhp, &cb); else ret = 1; } } /* * At this point we've got our AVL tree full of zfs handles, so iterate * over each one and execute the real user callback. */ for (node = uu_avl_first(cb.cb_avl); node != NULL; node = uu_avl_next(cb.cb_avl, node)) ret |= callback(node->zn_handle, node->zn_depth, data); /* * Finally, clean up the AVL tree. */ if ((walk = uu_avl_walk_start(cb.cb_avl, UU_WALK_ROBUST)) == NULL) nomem(); while ((node = uu_avl_walk_next(walk)) != NULL) { uu_avl_remove(cb.cb_avl, node); zfs_close(node->zn_handle); free(node); } uu_avl_walk_end(walk); uu_avl_destroy(cb.cb_avl); uu_avl_pool_destroy(avl_pool); return (ret); }
/* * sa_get_zfs_info(libzfs, path, mountpoint, dataset) * * Find the ZFS dataset and mountpoint for a given path */ int sa_zfs_get_info(libzfs_handle_t *libzfs, char *path, char *mountpointp, char *datasetp) { get_all_cbdata_t cb = { 0 }; int i; char mountpoint[ZFS_MAXPROPLEN]; char dataset[ZFS_MAXPROPLEN]; char canmount[ZFS_MAXPROPLEN]; char *dp; int count; int ret = 0; cb.cb_types = ZFS_TYPE_FILESYSTEM; if (libzfs == NULL) return (0); (void) zfs_iter_root(libzfs, get_one_filesystem, &cb); count = cb.cb_used; qsort(cb.cb_handles, count, sizeof (void *), mountpoint_compare); for (i = 0; i < count; i++) { /* must have a mountpoint */ if (zfs_prop_get(cb.cb_handles[i], ZFS_PROP_MOUNTPOINT, mountpoint, sizeof (mountpoint), NULL, NULL, 0, B_FALSE) != 0) { /* no mountpoint */ continue; } /* mountpoint must be a path */ if (strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) == 0 || strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) == 0) { /* * Search mmttab for mountpoint */ if (get_legacy_mountpoint(path, dataset, ZFS_MAXPROPLEN, mountpoint, ZFS_MAXPROPLEN) == 0) { ret = 1; break; } continue; } /* canmount must be set */ canmount[0] = '\0'; if (zfs_prop_get(cb.cb_handles[i], ZFS_PROP_CANMOUNT, canmount, sizeof (canmount), NULL, NULL, 0, B_FALSE) != 0 || strcmp(canmount, "off") == 0) continue; /* * have a mountable handle but want to skip those marked none * and legacy */ if (strcmp(mountpoint, path) == 0) { dp = (char *)zfs_get_name(cb.cb_handles[i]); if (dp != NULL) { if (datasetp != NULL) (void) strcpy(datasetp, dp); if (mountpointp != NULL) (void) strcpy(mountpointp, mountpoint); ret = 1; } break; } } return (ret); }
int main(int argc, char *argv[]) { config_t cnf; zstatus_t zstat; if(geteuid() != 0) { // Tell user to run app as root, then exit. fprintf(stderr, "you have to run %s as root\n", argv[0]); exit(1); } cnf.zname[0] = '\0'; zstat.err_message[0] = '\0'; zstat.name = cnf.zname; g_zfs = libzfs_init(); init_config(&cnf); init_devlist(&zstat.d); get_config(argc, argv, &cnf); zpool_iter(g_zfs, zpool_get_stats, (void *)&cnf); zfs_iter_root(g_zfs, zfs_get_stats, (void *)&cnf); zpool_iter(g_zfs, zpool_print_vdev, (void *)&zstat); if (cnf.sw == SW_UNDEF) { fprintf(stderr, "show type is not defined\n"); return 1; } else if (cnf.sw == SW_POOLS) { if (cnf.ft == TP_UNDEF) { fprintf(stderr, "undef format type\n"); return 1; } else if (cnf.ft == TP_TEXT) show_zpools(g_zfs); else if (cnf.ft == TP_JSON) show_zpools_json(g_zfs); return 0; } else if (cnf.sw == SW_DEVSTATE) { find_state_in_devlist(&zstat.d, cnf.vdev); free_devlist(&zstat.d); return 0; } else if (cnf.sw == SW_DEVICES) { if (cnf.ft == TP_UNDEF) { fprintf(stderr, "undef format type\n"); return 1; } else if (cnf.ft == TP_TEXT) print_devlist_text(&zstat.d); else if (cnf.ft == TP_JSON) print_devlist_json(&zstat.d); return 0; } if(cnf.zpool.name == NULL || cnf.zfs.name == NULL) { fprintf(stderr, "could not find zpool: %s\n", cnf.zname); return 1; } if (cnf.sw == SW_ALL) print_stats(&cnf); else if (cnf.sw == SW_READ_OPS) print_stats_read_ops(&cnf); else if (cnf.sw == SW_WRITE_OPS) print_stats_write_ops(&cnf); else if (cnf.sw == SW_READ_BTS) print_stats_read_bts(&cnf); else if (cnf.sw == SW_WRITE_BTS) print_stats_write_bts(&cnf); else if (cnf.sw == SW_HEALTH) print_stats_health_bool(&cnf); else if (cnf.sw == SW_LOGICAL) print_stats_logical(&cnf); else if (cnf.sw == SW_COMPRESS) print_stats_compress(&cnf); else if (cnf.sw == SW_USED) print_stats_used(&cnf); else if (cnf.sw == SW_REAL_USED) print_stats_real_used(&cnf); else if (cnf.sw == SW_AVAILABLE) print_stats_available(&cnf); else if (cnf.sw == SW_DEDUPRATIO)print_stats_dedupratio(&cnf); else if (cnf.sw == SW_DDT) print_stats_ddt_memory(&cnf); else if (cnf.sw == SW_ERR_MESSAGE) print_status_message(&zstat); free_devlist(&zstat.d); libzfs_fini(g_zfs); return 0; }
/* * Unshare and unmount all datasets within the given pool. We don't want to * rely on traversing the DSL to discover the filesystems within the pool, * because this may be expensive (if not all of them are mounted), and can fail * arbitrarily (on I/O error, for example). Instead, we walk /etc/mtab and * gather all the filesystems that are currently mounted. */ int zpool_disable_datasets(zpool_handle_t *zhp, boolean_t force) { int used, alloc; struct mnttab entry; size_t namelen; char **mountpoints = NULL; zfs_handle_t **datasets = NULL; libzfs_handle_t *hdl = zhp->zpool_hdl; int i; int ret = -1; int flags = (force ? MS_FORCE : 0); namelen = strlen(zhp->zpool_name); /* Reopen MNTTAB to prevent reading stale data from open file */ if (freopen(MNTTAB, "r", hdl->libzfs_mnttab) == NULL) return (ENOENT); used = alloc = 0; while (getmntent(hdl->libzfs_mnttab, &entry) == 0) { /* * Ignore filesystems not within this pool. */ if (entry.mnt_fstype == NULL || strncmp(entry.mnt_special, zhp->zpool_name, namelen) != 0 || (entry.mnt_special[namelen] != '/' && #ifdef __APPLE__ /* * On OS X, '@' is possible too since we're temporarily * allowing manual snapshot mounting. */ entry.mnt_special[namelen] != '@' && #endif /* __APPLE__ */ entry.mnt_special[namelen] != '\0')) continue; /* * At this point we've found a filesystem within our pool. Add * it to our growing list. */ if (used == alloc) { if (alloc == 0) { if ((mountpoints = zfs_alloc(hdl, 8 * sizeof (void *))) == NULL) goto out; if ((datasets = zfs_alloc(hdl, 8 * sizeof (void *))) == NULL) goto out; alloc = 8; } else { void *ptr; if ((ptr = zfs_realloc(hdl, mountpoints, alloc * sizeof (void *), alloc * 2 * sizeof (void *))) == NULL) goto out; mountpoints = ptr; if ((ptr = zfs_realloc(hdl, datasets, alloc * sizeof (void *), alloc * 2 * sizeof (void *))) == NULL) goto out; datasets = ptr; alloc *= 2; } } if ((mountpoints[used] = zfs_strdup(hdl, entry.mnt_mountp)) == NULL) goto out; /* * This is allowed to fail, in case there is some I/O error. It * is only used to determine if we need to remove the underlying * mountpoint, so failure is not fatal. */ datasets[used] = make_dataset_handle(hdl, entry.mnt_special); used++; } /* * At this point, we have the entire list of filesystems, so sort it by * mountpoint. */ qsort(mountpoints, used, sizeof (char *), mountpoint_compare); /* * Walk through and first unshare everything. */ for (i = 0; i < used; i++) { zfs_share_proto_t *curr_proto; for (curr_proto = share_all_proto; *curr_proto != PROTO_END; curr_proto++) { if (is_shared(hdl, mountpoints[i], *curr_proto) && unshare_one(hdl, mountpoints[i], mountpoints[i], *curr_proto) != 0) goto out; } } /* * Now unmount everything, removing the underlying directories as * appropriate. */ for (i = 0; i < used; i++) { if (unmount_one(hdl, mountpoints[i], flags) != 0) goto out; } for (i = 0; i < used; i++) { if (datasets[i]) remove_mountpoint(datasets[i]); } // Surely there exists a better way to iterate a POOL to find its ZVOLs? zfs_iter_root(hdl, zpool_disable_volumes, (void *) zpool_get_name(zhp)); ret = 0; out: for (i = 0; i < used; i++) { if (datasets[i]) zfs_close(datasets[i]); free(mountpoints[i]); } free(datasets); free(mountpoints); return (ret); }
int zfs_for_each(int argc, char **argv, boolean_t recurse, zfs_type_t types, zfs_sort_column_t *sortcol, zprop_list_t **proplist, zfs_iter_f callback, void *data, boolean_t args_can_be_paths) { callback_data_t cb; int ret = 0; zfs_node_t *node; uu_avl_walk_t *walk; avl_pool = uu_avl_pool_create("zfs_pool", sizeof (zfs_node_t), offsetof(zfs_node_t, zn_avlnode), zfs_sort, UU_DEFAULT); if (avl_pool == NULL) { (void) fprintf(stderr, gettext("internal error: out of memory\n")); exit(1); } cb.cb_sortcol = sortcol; cb.cb_recurse = recurse; cb.cb_proplist = proplist; cb.cb_types = types; if ((cb.cb_avl = uu_avl_create(avl_pool, NULL, UU_DEFAULT)) == NULL) { (void) fprintf(stderr, gettext("internal error: out of memory\n")); exit(1); } if (argc == 0) { /* * If given no arguments, iterate over all datasets. */ cb.cb_recurse = 1; ret = zfs_iter_root(g_zfs, zfs_callback, &cb); } else { int i; zfs_handle_t *zhp; zfs_type_t argtype; /* * If we're recursive, then we always allow filesystems as * arguments. If we also are interested in snapshots, then we * can take volumes as well. */ argtype = types; if (recurse) { argtype |= ZFS_TYPE_FILESYSTEM; if (types & ZFS_TYPE_SNAPSHOT) argtype |= ZFS_TYPE_VOLUME; } for (i = 0; i < argc; i++) { if (args_can_be_paths) { zhp = zfs_path_to_zhandle(g_zfs, argv[i], argtype); } else { zhp = zfs_open(g_zfs, argv[i], argtype); } if (zhp != NULL) ret |= zfs_callback(zhp, &cb); else ret = 1; } } /* * At this point we've got our AVL tree full of zfs handles, so iterate * over each one and execute the real user callback. */ for (node = uu_avl_first(cb.cb_avl); node != NULL; node = uu_avl_next(cb.cb_avl, node)) ret |= callback(node->zn_handle, data); /* * Finally, clean up the AVL tree. */ if ((walk = uu_avl_walk_start(cb.cb_avl, UU_WALK_ROBUST)) == NULL) { (void) fprintf(stderr, gettext("internal error: out of memory")); exit(1); } while ((node = uu_avl_walk_next(walk)) != NULL) { uu_avl_remove(cb.cb_avl, node); zfs_close(node->zn_handle); free(node); } uu_avl_walk_end(walk); uu_avl_destroy(cb.cb_avl); uu_avl_pool_destroy(avl_pool); return (ret); }