void internal_init() { if ((entity_pool = uu_list_pool_create("entities", sizeof (entity_t), offsetof(entity_t, sc_node), entity_cmp, 0)) == NULL) uu_die(gettext("entity list pool creation failed: %s\n"), uu_strerror(uu_error())); if ((pgroup_pool = uu_list_pool_create("property_groups", sizeof (pgroup_t), offsetof(pgroup_t, sc_node), pgroup_cmp, 0)) == NULL) uu_die( gettext("property group list pool creation failed: %s\n"), uu_strerror(uu_error())); if ((property_pool = uu_list_pool_create("properties", sizeof (property_t), offsetof(property_t, sc_node), property_cmp, 0)) == NULL) uu_die(gettext("property list pool creation failed: %s\n"), uu_strerror(uu_error())); if ((value_pool = uu_list_pool_create("property_values", sizeof (value_t), offsetof(value_t, sc_node), value_cmp, 0)) == NULL) uu_die( gettext("property value list pool creation failed: %s\n"), uu_strerror(uu_error())); }
void wait_init() { struct rlimit fd_new; (void) getrlimit(RLIMIT_NOFILE, &init_fd_rlimit); (void) getrlimit(RLIMIT_NOFILE, &fd_new); fd_new.rlim_max = fd_new.rlim_cur = WAIT_FILES; (void) setrlimit(RLIMIT_NOFILE, &fd_new); if ((port_fd = port_create()) == -1) uu_die("wait_init couldn't port_create"); wait_info_pool = uu_list_pool_create("wait_info", sizeof (wait_info_t), offsetof(wait_info_t, wi_link), NULL, UU_LIST_POOL_DEBUG); if (wait_info_pool == NULL) uu_die("wait_init couldn't create wait_info_pool"); wait_info_list = uu_list_create(wait_info_pool, wait_info_list, 0); if (wait_info_list == NULL) uu_die("wait_init couldn't create wait_info_list"); (void) pthread_mutex_init(&wait_info_lock, &mutex_attrs); }
/* * Add the proto list contained in array 'plist' to entry 'entry', storing * aside the scf_value_t's created and added to the entry in a list that the * pointer referenced by sv_list is made to point at. */ static void add_proto_list(scf_transaction_entry_t *entry, scf_handle_t *hdl, char **plist, uu_list_t **sv_list) { scf_val_el_t *sv_el; int i; static uu_list_pool_t *sv_pool = NULL; if ((sv_pool == NULL) && ((sv_pool = uu_list_pool_create("sv_pool", sizeof (scf_val_el_t), offsetof(scf_val_el_t, link), NULL, UU_LIST_POOL_DEBUG)) == NULL)) uu_die(gettext("Error: %s.\n"), uu_strerror(uu_error())); if ((*sv_list = uu_list_create(sv_pool, NULL, 0)) == NULL) uu_die(gettext("Error: %s.\n"), uu_strerror(uu_error())); for (i = 0; plist[i] != NULL; i++) { if ((sv_el = malloc(sizeof (scf_val_el_t))) == NULL) uu_die(gettext("Error:")); if (((sv_el->val = scf_value_create(hdl)) == NULL) || (scf_value_set_astring(sv_el->val, plist[i]) != 0) || (scf_entry_add_value(entry, sv_el->val) != 0)) scfdie(); uu_list_node_init(sv_el, &sv_el->link, sv_pool); (void) uu_list_insert_after(*sv_list, NULL, sv_el); } }
int tlx_init(void) { if ((conn_ind_pool = uu_list_pool_create("conn_ind_pool", sizeof (tlx_conn_ind_t), offsetof(tlx_conn_ind_t, link), NULL, UU_LIST_POOL_DEBUG)) == NULL) { error_msg("%s: %s", gettext("Failed to create uu pool"), uu_strerror(uu_error())); return (-1); } return (0); }
int config_init(void) { if ((rep_handle = scf_handle_create(SCF_VERSION)) == NULL) { error_msg("%s: %s", gettext("Failed to create repository handle"), scf_strerror(scf_error())); return (-1); } else if (make_handle_bound(rep_handle) == -1) { /* let config_fini clean-up */ return (-1); } if ((proto_info_pool = uu_list_pool_create("proto_info_pool", sizeof (proto_info_t), offsetof(proto_info_t, link), proto_info_compare, UU_LIST_POOL_DEBUG)) == NULL) { error_msg(gettext("Failed to create uu list pool: %s"), uu_strerror(uu_error())); return (-1); } return (0); }
/* * Setup structures used for method termination monitoring. * Returns -1 if an allocation failure occurred, else 0. */ int method_init(void) { struct rlimit rl; /* * Save aside the old file limit and impose one large enough to support * all the /proc file handles we could have open. */ (void) getrlimit(RLIMIT_NOFILE, &saved_file_limit); rl.rlim_cur = rl.rlim_max = INETD_NOFILE_LIMIT; if (setrlimit(RLIMIT_NOFILE, &rl) == -1) { error_msg("Failed to set file limit: %s", strerror(errno)); return (-1); } if ((method_pool = uu_list_pool_create("method_pool", sizeof (method_el_t), offsetof(method_el_t, link), NULL, UU_LIST_POOL_DEBUG)) == NULL) { error_msg("%s: %s", gettext("Failed to create method pool"), uu_strerror(uu_error())); return (-1); } if ((method_list = uu_list_create(method_pool, NULL, 0)) == NULL) { error_msg("%s: %s", gettext("Failed to create method list"), uu_strerror(uu_error())); /* let method_fini() clean-up */ return (-1); } return (0); }
/* * Given a ZFS handle and a property, construct a complete list of datasets * that need to be modified as part of this process. For anything but the * 'mountpoint' and 'sharenfs' properties, this just returns an empty list. * Otherwise, we iterate over all children and look for any datasets that * inherit the property. For each such dataset, we add it to the list and * mark whether it was shared beforehand. */ prop_changelist_t * changelist_gather(zfs_handle_t *zhp, zfs_prop_t prop, int gather_flags, int mnt_flags) { prop_changelist_t *clp; prop_changenode_t *cn; zfs_handle_t *temp; char property[ZFS_MAXPROPLEN]; uu_compare_fn_t *compare = NULL; boolean_t legacy = B_FALSE; if ((clp = zfs_alloc(zhp->zfs_hdl, sizeof (prop_changelist_t))) == NULL) return (NULL); /* * For mountpoint-related tasks, we want to sort everything by * mountpoint, so that we mount and unmount them in the appropriate * order, regardless of their position in the hierarchy. */ if (prop == ZFS_PROP_NAME || prop == ZFS_PROP_ZONED || prop == ZFS_PROP_MOUNTPOINT || prop == ZFS_PROP_SHARENFS || prop == ZFS_PROP_SHARESMB) { if (zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, property, sizeof (property), NULL, NULL, 0, B_FALSE) == 0 && (strcmp(property, "legacy") == 0 || strcmp(property, "none") == 0)) { legacy = B_TRUE; } if (!legacy) { compare = compare_mountpoints; clp->cl_sorted = B_TRUE; } } clp->cl_pool = uu_list_pool_create("changelist_pool", sizeof (prop_changenode_t), offsetof(prop_changenode_t, cn_listnode), compare, 0); if (clp->cl_pool == NULL) { assert(uu_error() == UU_ERROR_NO_MEMORY); (void) zfs_error(zhp->zfs_hdl, EZFS_NOMEM, "internal error"); changelist_free(clp); return (NULL); } clp->cl_list = uu_list_create(clp->cl_pool, NULL, clp->cl_sorted ? UU_LIST_SORTED : 0); clp->cl_gflags = gather_flags; clp->cl_mflags = mnt_flags; if (clp->cl_list == NULL) { assert(uu_error() == UU_ERROR_NO_MEMORY); (void) zfs_error(zhp->zfs_hdl, EZFS_NOMEM, "internal error"); changelist_free(clp); return (NULL); } /* * If this is a rename or the 'zoned' property, we pretend we're * changing the mountpoint and flag it so we can catch all children in * change_one(). * * Flag cl_alldependents to catch all children plus the dependents * (clones) that are not in the hierarchy. */ if (prop == ZFS_PROP_NAME) { clp->cl_prop = ZFS_PROP_MOUNTPOINT; clp->cl_alldependents = B_TRUE; } else if (prop == ZFS_PROP_ZONED) { clp->cl_prop = ZFS_PROP_MOUNTPOINT; clp->cl_allchildren = B_TRUE; } else if (prop == ZFS_PROP_CANMOUNT) { clp->cl_prop = ZFS_PROP_MOUNTPOINT; } else if (prop == ZFS_PROP_VOLSIZE) { clp->cl_prop = ZFS_PROP_MOUNTPOINT; } else { clp->cl_prop = prop; } clp->cl_realprop = prop; if (clp->cl_prop != ZFS_PROP_MOUNTPOINT && clp->cl_prop != ZFS_PROP_SHARENFS && clp->cl_prop != ZFS_PROP_SHARESMB) return (clp); /* * If watching SHARENFS or SHARESMB then * also watch its companion property. */ if (clp->cl_prop == ZFS_PROP_SHARENFS) clp->cl_shareprop = ZFS_PROP_SHARESMB; else if (clp->cl_prop == ZFS_PROP_SHARESMB) clp->cl_shareprop = ZFS_PROP_SHARENFS; if (clp->cl_alldependents) { if (zfs_iter_dependents(zhp, B_TRUE, change_one, clp) != 0) { changelist_free(clp); return (NULL); } } else if (zfs_iter_children(zhp, change_one, clp) != 0) { changelist_free(clp); return (NULL); } /* * We have to re-open ourselves because we auto-close all the handles * and can't tell the difference. */ if ((temp = zfs_open(zhp->zfs_hdl, zfs_get_name(zhp), ZFS_TYPE_DATASET)) == NULL) { changelist_free(clp); return (NULL); } /* * Always add ourself to the list. We add ourselves to the end so that * we're the last to be unmounted. */ if ((cn = zfs_alloc(zhp->zfs_hdl, sizeof (prop_changenode_t))) == NULL) { zfs_close(temp); changelist_free(clp); return (NULL); } cn->cn_handle = temp; cn->cn_mounted = (clp->cl_gflags & CL_GATHER_MOUNT_ALWAYS) || zfs_is_mounted(temp, NULL); cn->cn_shared = zfs_is_shared(temp); cn->cn_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED); cn->cn_needpost = B_TRUE; uu_list_node_init(cn, &cn->cn_listnode, clp->cl_pool); if (clp->cl_sorted) { uu_list_index_t idx; (void) uu_list_find(clp->cl_list, cn, NULL, &idx); uu_list_insert(clp->cl_list, cn, idx); } else { /* * Add the target dataset to the end of the list. * The list is not really unsorted. The list will be * in reverse dataset name order. This is necessary * when the original mountpoint is legacy or none. */ verify(uu_list_insert_after(clp->cl_list, uu_list_last(clp->cl_list), cn) == 0); } /* * If the mountpoint property was previously 'legacy', or 'none', * record it as the behavior of changelist_postfix() will be different. */ if ((clp->cl_prop == ZFS_PROP_MOUNTPOINT) && legacy) { /* * do not automatically mount ex-legacy datasets if * we specifically set canmount to noauto */ if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) != ZFS_CANMOUNT_NOAUTO) clp->cl_waslegacy = B_TRUE; } return (clp); }
/* * Given a ZFS handle and a property, construct a complete list of datasets * that need to be modified as part of this process. For anything but the * 'mountpoint' and 'sharenfs' properties, this just returns an empty list. * Otherwise, we iterate over all children and look for any datasets that * inherit the property. For each such dataset, we add it to the list and * mark whether it was shared beforehand. */ prop_changelist_t * changelist_gather(zfs_handle_t *zhp, zfs_prop_t prop, int flags) { prop_changelist_t *clp; prop_changenode_t *cn; zfs_handle_t *temp; char property[ZFS_MAXPROPLEN]; uu_compare_fn_t *compare = NULL; if ((clp = zfs_alloc(zhp->zfs_hdl, sizeof (prop_changelist_t))) == NULL) return (NULL); /* * For mountpoint-related tasks, we want to sort everything by * mountpoint, so that we mount and unmount them in the appropriate * order, regardless of their position in the hierarchy. */ if (prop == ZFS_PROP_NAME || prop == ZFS_PROP_ZONED || prop == ZFS_PROP_MOUNTPOINT || prop == ZFS_PROP_SHARENFS) { compare = compare_mountpoints; clp->cl_sorted = B_TRUE; } clp->cl_pool = uu_list_pool_create("changelist_pool", sizeof (prop_changenode_t), offsetof(prop_changenode_t, cn_listnode), compare, 0); if (clp->cl_pool == NULL) { assert(uu_error() == UU_ERROR_NO_MEMORY); (void) zfs_error(zhp->zfs_hdl, EZFS_NOMEM, "internal error"); changelist_free(clp); return (NULL); } clp->cl_list = uu_list_create(clp->cl_pool, NULL, clp->cl_sorted ? UU_LIST_SORTED : 0); clp->cl_flags = flags; if (clp->cl_list == NULL) { assert(uu_error() == UU_ERROR_NO_MEMORY); (void) zfs_error(zhp->zfs_hdl, EZFS_NOMEM, "internal error"); changelist_free(clp); return (NULL); } /* * If this is a rename or the 'zoned' property, we pretend we're * changing the mountpoint and flag it so we can catch all children in * change_one(). * * Flag cl_alldependents to catch all children plus the dependents * (clones) that are not in the hierarchy. */ if (prop == ZFS_PROP_NAME) { clp->cl_prop = ZFS_PROP_MOUNTPOINT; clp->cl_alldependents = B_TRUE; } else if (prop == ZFS_PROP_ZONED) { clp->cl_prop = ZFS_PROP_MOUNTPOINT; clp->cl_allchildren = B_TRUE; } else if (prop == ZFS_PROP_CANMOUNT) { clp->cl_prop = ZFS_PROP_MOUNTPOINT; } else if (prop == ZFS_PROP_VOLSIZE) { clp->cl_prop = ZFS_PROP_MOUNTPOINT; } else if (prop == ZFS_PROP_VERSION) { clp->cl_prop = ZFS_PROP_MOUNTPOINT; } else { clp->cl_prop = prop; } clp->cl_realprop = prop; if (clp->cl_prop != ZFS_PROP_MOUNTPOINT && clp->cl_prop != ZFS_PROP_SHARENFS && clp->cl_prop != ZFS_PROP_SHAREISCSI) return (clp); if (clp->cl_alldependents) { if (zfs_iter_dependents(zhp, B_TRUE, change_one, clp) != 0) { changelist_free(clp); return (NULL); } } else if (zfs_iter_children(zhp, change_one, clp) != 0) { changelist_free(clp); return (NULL); } /* * We have to re-open ourselves because we auto-close all the handles * and can't tell the difference. */ if ((temp = zfs_open(zhp->zfs_hdl, zfs_get_name(zhp), ZFS_TYPE_ANY)) == NULL) { changelist_free(clp); return (NULL); } /* * Always add ourself to the list. We add ourselves to the end so that * we're the last to be unmounted. */ if ((cn = zfs_alloc(zhp->zfs_hdl, sizeof (prop_changenode_t))) == NULL) { zfs_close(temp); changelist_free(clp); return (NULL); } cn->cn_handle = temp; cn->cn_mounted = zfs_is_mounted(temp, NULL); cn->cn_shared = zfs_is_shared(temp); #ifndef __APPLE__ cn->cn_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED); #endif /*!__APPLE__*/ uu_list_node_init(cn, &cn->cn_listnode, clp->cl_pool); if (clp->cl_sorted) { uu_list_index_t idx; (void) uu_list_find(clp->cl_list, cn, NULL, &idx); uu_list_insert(clp->cl_list, cn, idx); } else { verify(uu_list_insert_after(clp->cl_list, uu_list_last(clp->cl_list), cn) == 0); } /* * If the mountpoint property was previously 'legacy', or 'none', * record it as the behavior of changelist_postfix() will be different. */ if ((clp->cl_prop == ZFS_PROP_MOUNTPOINT) && (zfs_prop_get(zhp, prop, property, sizeof (property), NULL, NULL, 0, B_FALSE) == 0 && (strcmp(property, "legacy") == 0 || strcmp(property, "none") == 0))) clp->cl_waslegacy = B_TRUE; return (clp); }
int main(int argc, char *argv[]) { int c; scf_walk_callback callback; int flags; int err; (void) setlocale(LC_ALL, ""); (void) textdomain(TEXT_DOMAIN); return_code = UU_EXIT_OK; (void) uu_setpname(argv[0]); prop_pool = uu_list_pool_create("properties", sizeof (svcprop_prop_node_t), offsetof(svcprop_prop_node_t, spn_list_node), NULL, 0); if (prop_pool == NULL) uu_die("%s\n", uu_strerror(uu_error())); prop_list = uu_list_create(prop_pool, NULL, 0); hndl = scf_handle_create(SCF_VERSION); if (hndl == NULL) scfdie(); while ((c = getopt(argc, argv, "Ccfp:qs:tvwz:")) != -1) { switch (c) { case 'C': if (cflag || sflag || wait) usage(); /* Not with -c, -s or -w */ Cflag++; snapshot = NULL; break; case 'c': if (Cflag || sflag || wait) usage(); /* Not with -C, -s or -w */ cflag++; snapshot = NULL; break; case 'f': types = 1; fmris = 1; break; case 'p': add_prop(optarg); break; case 'q': quiet = 1; warn = quiet_warn; die = quiet_die; break; case 's': if (Cflag || cflag || wait) usage(); /* Not with -C, -c or -w */ snapshot = optarg; sflag++; break; case 't': types = 1; break; case 'v': verbose = 1; break; case 'w': if (Cflag || cflag || sflag) usage(); /* Not with -C, -c or -s */ wait = 1; break; case 'z': { scf_value_t *zone; scf_handle_t *h = hndl; if (getzoneid() != GLOBAL_ZONEID) uu_die(gettext("svcprop -z may only be used " "from the global zone\n")); if ((zone = scf_value_create(h)) == NULL) scfdie(); if (scf_value_set_astring(zone, optarg) != SCF_SUCCESS) scfdie(); if (scf_handle_decorate(h, "zone", zone) != SCF_SUCCESS) uu_die(gettext("invalid zone '%s'\n"), optarg); scf_value_destroy(zone); break; } case '?': switch (optopt) { case 'p': usage(); default: break; } /* FALLTHROUGH */ default: usage(); } } if (optind == argc) usage(); max_scf_name_length = scf_limit(SCF_LIMIT_MAX_NAME_LENGTH); max_scf_value_length = scf_limit(SCF_LIMIT_MAX_VALUE_LENGTH); max_scf_fmri_length = scf_limit(SCF_LIMIT_MAX_FMRI_LENGTH); if (max_scf_name_length == -1 || max_scf_value_length == -1 || max_scf_fmri_length == -1) scfdie(); if (scf_handle_bind(hndl) == -1) die(gettext("Could not connect to configuration repository: " "%s.\n"), scf_strerror(scf_error())); flags = SCF_WALK_PROPERTY | SCF_WALK_SERVICE | SCF_WALK_EXPLICIT; if (wait) { if (uu_list_numnodes(prop_list) > 1) usage(); if (argc - optind > 1) usage(); callback = do_wait; } else { callback = process_fmri; flags |= SCF_WALK_MULTIPLE; } if ((err = scf_walk_fmri(hndl, argc - optind, argv + optind, flags, callback, NULL, &return_code, warn)) != 0) { warn(gettext("failed to iterate over instances: %s\n"), scf_strerror(err)); return_code = UU_EXIT_FATAL; } scf_handle_destroy(hndl); return (return_code); }