entity_t * internal_service_new(const char *name) { entity_t *s; if ((s = uu_zalloc(sizeof (entity_t))) == NULL) uu_die(gettext("couldn't allocate memory")); uu_list_node_init(s, &s->sc_node, entity_pool); s->sc_name = name; s->sc_fmri = uu_msprintf("svc:/%s", name); if (s->sc_fmri == NULL) uu_die(gettext("couldn't allocate memory")); s->sc_etype = SVCCFG_SERVICE_OBJECT; s->sc_pgroups = uu_list_create(pgroup_pool, s, 0); s->sc_dependents = uu_list_create(pgroup_pool, s, 0); s->sc_u.sc_service.sc_service_type = SVCCFG_UNKNOWN_SERVICE; s->sc_u.sc_service.sc_service_instances = uu_list_create(entity_pool, s, 0); return (s); }
/* * Read back the persistent representation of an active case. */ static zfs_case_t * zfs_case_unserialize(fmd_hdl_t *hdl, fmd_case_t *cp) { zfs_case_t *zcp; zcp = fmd_hdl_zalloc(hdl, sizeof (zfs_case_t), FMD_SLEEP); zcp->zc_case = cp; fmd_buf_read(hdl, cp, CASE_DATA, &zcp->zc_data, sizeof (zcp->zc_data)); if (zcp->zc_data.zc_version > CASE_DATA_VERSION_SERD) { fmd_hdl_free(hdl, zcp, sizeof (zfs_case_t)); return (NULL); } /* * fmd_buf_read() will have already zeroed out the remainder of the * buffer, so we don't have to do anything special if the version * doesn't include the SERD engine name. */ if (zcp->zc_data.zc_has_remove_timer) zcp->zc_remove_timer = fmd_timer_install(hdl, zcp, NULL, zfs_remove_timeout); uu_list_node_init(zcp, &zcp->zc_node, zfs_case_pool); (void) uu_list_insert_before(zfs_cases, NULL, zcp); fmd_case_setspecific(hdl, cp, zcp); return (zcp); }
/* * Add the proto list contained in array 'plist' to entry 'entry', storing * aside the scf_value_t's created and added to the entry in a list that the * pointer referenced by sv_list is made to point at. */ static void add_proto_list(scf_transaction_entry_t *entry, scf_handle_t *hdl, char **plist, uu_list_t **sv_list) { scf_val_el_t *sv_el; int i; static uu_list_pool_t *sv_pool = NULL; if ((sv_pool == NULL) && ((sv_pool = uu_list_pool_create("sv_pool", sizeof (scf_val_el_t), offsetof(scf_val_el_t, link), NULL, UU_LIST_POOL_DEBUG)) == NULL)) uu_die(gettext("Error: %s.\n"), uu_strerror(uu_error())); if ((*sv_list = uu_list_create(sv_pool, NULL, 0)) == NULL) uu_die(gettext("Error: %s.\n"), uu_strerror(uu_error())); for (i = 0; plist[i] != NULL; i++) { if ((sv_el = malloc(sizeof (scf_val_el_t))) == NULL) uu_die(gettext("Error:")); if (((sv_el->val = scf_value_create(hdl)) == NULL) || (scf_value_set_astring(sv_el->val, plist[i]) != 0) || (scf_entry_add_value(entry, sv_el->val) != 0)) scfdie(); uu_list_node_init(sv_el, &sv_el->link, sv_pool); (void) uu_list_insert_after(*sv_list, NULL, sv_el); } }
/* * int wait_register(pid_t, char *, int, int) * wait_register is called after we have called fork(2), and know which pid we * wish to monitor. However, since the child may have already exited by the * time we are called, we must handle the error cases from open(2) * appropriately. The am_parent flag is recorded to handle waitpid(2) * behaviour on removal; similarly, the direct flag is passed through to a * potential call to wait_remove() to govern its behaviour in different * contexts. * * Returns 0 if registration successful, 1 if child pid did not exist, and -1 * if a different error occurred. */ int wait_register(pid_t pid, const char *inst_fmri, int am_parent, int direct) { char *fname = uu_msprintf("/proc/%ld/psinfo", pid); int fd; wait_info_t *wi; assert(pid != 0); if (fname == NULL) return (-1); wi = startd_alloc(sizeof (wait_info_t)); uu_list_node_init(wi, &wi->wi_link, wait_info_pool); wi->wi_fd = -1; wi->wi_pid = pid; wi->wi_fmri = inst_fmri; wi->wi_parent = am_parent; wi->wi_ignore = 0; MUTEX_LOCK(&wait_info_lock); (void) uu_list_insert_before(wait_info_list, NULL, wi); MUTEX_UNLOCK(&wait_info_lock); if ((fd = open(fname, O_RDONLY)) == -1) { if (errno == ENOENT) { /* * Child has already exited. */ wait_remove(wi, direct); uu_free(fname); return (1); } else { log_error(LOG_WARNING, "open %s failed; not monitoring %s: %s\n", fname, inst_fmri, strerror(errno)); uu_free(fname); return (-1); } } uu_free(fname); wi->wi_fd = fd; if (port_associate(port_fd, PORT_SOURCE_FD, fd, 0, wi)) { log_error(LOG_WARNING, "initial port_association of %d / %s failed: %s\n", fd, inst_fmri, strerror(errno)); return (-1); } log_framework(LOG_DEBUG, "monitoring PID %ld on fd %d (%s)\n", pid, fd, inst_fmri); return (0); }
value_t * internal_value_new() { value_t *v; if ((v = uu_zalloc(sizeof (value_t))) == NULL) uu_die(gettext("couldn't allocate memory")); uu_list_node_init(v, &v->sc_node, value_pool); return (v); }
static void add_prop(char *property) { svcprop_prop_node_t *p, *last; char *slash; const char * const invalid_component_emsg = gettext("Invalid component name `%s'.\n"); /* FMRI syntax knowledge. */ slash = strchr(property, '/'); if (slash != NULL) { if (strchr(slash + 1, '/') != NULL) { uu_warn(gettext("-p argument `%s' has too many " "components.\n"), property); usage(); } } if (slash != NULL) *slash = '\0'; p = safe_malloc(sizeof (svcprop_prop_node_t)); uu_list_node_init(p, &p->spn_list_node, prop_pool); p->spn_comp1 = property; p->spn_comp2 = (slash == NULL) ? NULL : slash + 1; if (uu_check_name(p->spn_comp1, UU_NAME_DOMAIN) == -1) uu_xdie(UU_EXIT_USAGE, invalid_component_emsg, p->spn_comp1); if (p->spn_comp2 != NULL && uu_check_name(p->spn_comp2, UU_NAME_DOMAIN) == -1) uu_xdie(UU_EXIT_USAGE, invalid_component_emsg, p->spn_comp2); last = uu_list_last(prop_list); if (last != NULL) { if ((last->spn_comp2 == NULL) ^ (p->spn_comp2 == NULL)) { /* * The -p options have mixed numbers of components. * If they both turn out to be valid, then the * single-component ones will specify property groups, * so we need to turn on types to keep the output of * display_prop() consistent with display_pg(). */ types = 1; } } (void) uu_list_insert_after(prop_list, NULL, p); }
property_t * internal_property_new() { property_t *p; if ((p = uu_zalloc(sizeof (property_t))) == NULL) uu_die(gettext("couldn't allocate memory")); uu_list_node_init(p, &p->sc_node, property_pool); p->sc_property_values = uu_list_create(value_pool, p, UU_LIST_SORTED); p->sc_property_name = "<unset>"; return (p); }
entity_t * internal_template_new() { entity_t *t; if ((t = uu_zalloc(sizeof (entity_t))) == NULL) uu_die(gettext("couldn't allocate memory")); uu_list_node_init(t, &t->sc_node, entity_pool); t->sc_etype = SVCCFG_TEMPLATE_OBJECT; t->sc_pgroups = uu_list_create(pgroup_pool, t, 0); return (t); }
/* Add 'call' to the connection indication queue 'queue'. */ int queue_conind(uu_list_t *queue, struct t_call *call) { tlx_conn_ind_t *ci; if ((ci = malloc(sizeof (tlx_conn_ind_t))) == NULL) { error_msg(strerror(errno)); return (-1); } ci->call = call; uu_list_node_init(ci, &ci->link, conn_ind_pool); (void) uu_list_insert_after(queue, NULL, ci); return (0); }
entity_t * internal_instance_new(const char *name) { entity_t *i; if ((i = uu_zalloc(sizeof (entity_t))) == NULL) uu_die(gettext("couldn't allocate memory")); uu_list_node_init(i, &i->sc_node, entity_pool); i->sc_name = name; /* Can't set i->sc_fmri until we're attached to a service. */ i->sc_etype = SVCCFG_INSTANCE_OBJECT; i->sc_pgroups = uu_list_create(pgroup_pool, i, 0); i->sc_dependents = uu_list_create(pgroup_pool, i, 0); return (i); }
pgroup_t * internal_pgroup_new() { pgroup_t *p; if ((p = uu_zalloc(sizeof (pgroup_t))) == NULL) uu_die(gettext("couldn't allocate memory")); uu_list_node_init(p, &p->sc_node, pgroup_pool); p->sc_pgroup_props = uu_list_create(property_pool, p, UU_LIST_SORTED); if (p->sc_pgroup_props == NULL) { uu_die(gettext("Unable to create list for properties. %s\n"), uu_strerror(uu_error())); } p->sc_pgroup_name = "<unset>"; p->sc_pgroup_type = "<unset>"; return (p); }
entity_t * internal_entity_new(entity_type_t entity) { entity_t *e; if ((e = uu_zalloc(sizeof (entity_t))) == NULL) uu_die(gettext("couldn't allocate memory")); uu_list_node_init(e, &e->sc_node, entity_pool); e->sc_etype = entity; e->sc_pgroups = uu_list_create(pgroup_pool, e, 0); e->sc_op = SVCCFG_OP_NONE; if (e->sc_pgroups == NULL) { uu_die(gettext("Unable to create list for entity property " "groups. %s\n"), uu_strerror(uu_error())); } return (e); }
property_t * internal_property_new() { property_t *p; if ((p = uu_zalloc(sizeof (property_t))) == NULL) uu_die(gettext("couldn't allocate memory")); uu_list_node_init(p, &p->sc_node, property_pool); p->sc_property_values = uu_list_create(value_pool, p, 0); if (p->sc_property_values == NULL) { uu_die(gettext("Unable to create list for property values. " "%s\n"), uu_strerror(uu_error())); } p->sc_property_name = "<unset>"; tmpl_property_init(p); return (p); }
/* * Given a ZFS handle and a property, construct a complete list of datasets * that need to be modified as part of this process. For anything but the * 'mountpoint' and 'sharenfs' properties, this just returns an empty list. * Otherwise, we iterate over all children and look for any datasets that * inherit the property. For each such dataset, we add it to the list and * mark whether it was shared beforehand. */ prop_changelist_t * changelist_gather(zfs_handle_t *zhp, zfs_prop_t prop, int gather_flags, int mnt_flags) { prop_changelist_t *clp; prop_changenode_t *cn; zfs_handle_t *temp; char property[ZFS_MAXPROPLEN]; uu_compare_fn_t *compare = NULL; boolean_t legacy = B_FALSE; if ((clp = zfs_alloc(zhp->zfs_hdl, sizeof (prop_changelist_t))) == NULL) return (NULL); /* * For mountpoint-related tasks, we want to sort everything by * mountpoint, so that we mount and unmount them in the appropriate * order, regardless of their position in the hierarchy. */ if (prop == ZFS_PROP_NAME || prop == ZFS_PROP_ZONED || prop == ZFS_PROP_MOUNTPOINT || prop == ZFS_PROP_SHARENFS || prop == ZFS_PROP_SHARESMB) { if (zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, property, sizeof (property), NULL, NULL, 0, B_FALSE) == 0 && (strcmp(property, "legacy") == 0 || strcmp(property, "none") == 0)) { legacy = B_TRUE; } if (!legacy) { compare = compare_mountpoints; clp->cl_sorted = B_TRUE; } } clp->cl_pool = uu_list_pool_create("changelist_pool", sizeof (prop_changenode_t), offsetof(prop_changenode_t, cn_listnode), compare, 0); if (clp->cl_pool == NULL) { assert(uu_error() == UU_ERROR_NO_MEMORY); (void) zfs_error(zhp->zfs_hdl, EZFS_NOMEM, "internal error"); changelist_free(clp); return (NULL); } clp->cl_list = uu_list_create(clp->cl_pool, NULL, clp->cl_sorted ? UU_LIST_SORTED : 0); clp->cl_gflags = gather_flags; clp->cl_mflags = mnt_flags; if (clp->cl_list == NULL) { assert(uu_error() == UU_ERROR_NO_MEMORY); (void) zfs_error(zhp->zfs_hdl, EZFS_NOMEM, "internal error"); changelist_free(clp); return (NULL); } /* * If this is a rename or the 'zoned' property, we pretend we're * changing the mountpoint and flag it so we can catch all children in * change_one(). * * Flag cl_alldependents to catch all children plus the dependents * (clones) that are not in the hierarchy. */ if (prop == ZFS_PROP_NAME) { clp->cl_prop = ZFS_PROP_MOUNTPOINT; clp->cl_alldependents = B_TRUE; } else if (prop == ZFS_PROP_ZONED) { clp->cl_prop = ZFS_PROP_MOUNTPOINT; clp->cl_allchildren = B_TRUE; } else if (prop == ZFS_PROP_CANMOUNT) { clp->cl_prop = ZFS_PROP_MOUNTPOINT; } else if (prop == ZFS_PROP_VOLSIZE) { clp->cl_prop = ZFS_PROP_MOUNTPOINT; } else { clp->cl_prop = prop; } clp->cl_realprop = prop; if (clp->cl_prop != ZFS_PROP_MOUNTPOINT && clp->cl_prop != ZFS_PROP_SHARENFS && clp->cl_prop != ZFS_PROP_SHARESMB) return (clp); /* * If watching SHARENFS or SHARESMB then * also watch its companion property. */ if (clp->cl_prop == ZFS_PROP_SHARENFS) clp->cl_shareprop = ZFS_PROP_SHARESMB; else if (clp->cl_prop == ZFS_PROP_SHARESMB) clp->cl_shareprop = ZFS_PROP_SHARENFS; if (clp->cl_alldependents) { if (zfs_iter_dependents(zhp, B_TRUE, change_one, clp) != 0) { changelist_free(clp); return (NULL); } } else if (zfs_iter_children(zhp, change_one, clp) != 0) { changelist_free(clp); return (NULL); } /* * We have to re-open ourselves because we auto-close all the handles * and can't tell the difference. */ if ((temp = zfs_open(zhp->zfs_hdl, zfs_get_name(zhp), ZFS_TYPE_DATASET)) == NULL) { changelist_free(clp); return (NULL); } /* * Always add ourself to the list. We add ourselves to the end so that * we're the last to be unmounted. */ if ((cn = zfs_alloc(zhp->zfs_hdl, sizeof (prop_changenode_t))) == NULL) { zfs_close(temp); changelist_free(clp); return (NULL); } cn->cn_handle = temp; cn->cn_mounted = (clp->cl_gflags & CL_GATHER_MOUNT_ALWAYS) || zfs_is_mounted(temp, NULL); cn->cn_shared = zfs_is_shared(temp); cn->cn_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED); cn->cn_needpost = B_TRUE; uu_list_node_init(cn, &cn->cn_listnode, clp->cl_pool); if (clp->cl_sorted) { uu_list_index_t idx; (void) uu_list_find(clp->cl_list, cn, NULL, &idx); uu_list_insert(clp->cl_list, cn, idx); } else { /* * Add the target dataset to the end of the list. * The list is not really unsorted. The list will be * in reverse dataset name order. This is necessary * when the original mountpoint is legacy or none. */ verify(uu_list_insert_after(clp->cl_list, uu_list_last(clp->cl_list), cn) == 0); } /* * If the mountpoint property was previously 'legacy', or 'none', * record it as the behavior of changelist_postfix() will be different. */ if ((clp->cl_prop == ZFS_PROP_MOUNTPOINT) && legacy) { /* * do not automatically mount ex-legacy datasets if * we specifically set canmount to noauto */ if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) != ZFS_CANMOUNT_NOAUTO) clp->cl_waslegacy = B_TRUE; } return (clp); }
static int change_one(zfs_handle_t *zhp, void *data) { prop_changelist_t *clp = data; char property[ZFS_MAXPROPLEN]; char where[64]; prop_changenode_t *cn; zprop_source_t sourcetype; zprop_source_t share_sourcetype; /* * We only want to unmount/unshare those filesystems that may inherit * from the target filesystem. If we find any filesystem with a * locally set mountpoint, we ignore any children since changing the * property will not affect them. If this is a rename, we iterate * over all children regardless, since we need them unmounted in * order to do the rename. Also, if this is a volume and we're doing * a rename, then always add it to the changelist. */ if (!(ZFS_IS_VOLUME(zhp) && clp->cl_realprop == ZFS_PROP_NAME) && zfs_prop_get(zhp, clp->cl_prop, property, sizeof (property), &sourcetype, where, sizeof (where), B_FALSE) != 0) { zfs_close(zhp); return (0); } /* * If we are "watching" sharenfs or sharesmb * then check out the companion property which is tracked * in cl_shareprop */ if (clp->cl_shareprop != ZPROP_INVAL && zfs_prop_get(zhp, clp->cl_shareprop, property, sizeof (property), &share_sourcetype, where, sizeof (where), B_FALSE) != 0) { zfs_close(zhp); return (0); } if (clp->cl_alldependents || clp->cl_allchildren || sourcetype == ZPROP_SRC_DEFAULT || sourcetype == ZPROP_SRC_INHERITED || (clp->cl_shareprop != ZPROP_INVAL && (share_sourcetype == ZPROP_SRC_DEFAULT || share_sourcetype == ZPROP_SRC_INHERITED))) { if ((cn = zfs_alloc(zfs_get_handle(zhp), sizeof (prop_changenode_t))) == NULL) { zfs_close(zhp); return (-1); } cn->cn_handle = zhp; cn->cn_mounted = (clp->cl_gflags & CL_GATHER_MOUNT_ALWAYS) || zfs_is_mounted(zhp, NULL); cn->cn_shared = zfs_is_shared(zhp); cn->cn_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED); cn->cn_needpost = B_TRUE; /* Indicate if any child is exported to a local zone. */ if (getzoneid() == GLOBAL_ZONEID && cn->cn_zoned) clp->cl_haszonedchild = B_TRUE; uu_list_node_init(cn, &cn->cn_listnode, clp->cl_pool); if (clp->cl_sorted) { uu_list_index_t idx; (void) uu_list_find(clp->cl_list, cn, NULL, &idx); uu_list_insert(clp->cl_list, cn, idx); } else { /* * Add this child to beginning of the list. Children * below this one in the hierarchy will get added above * this one in the list. This produces a list in * reverse dataset name order. * This is necessary when the original mountpoint * is legacy or none. */ verify(uu_list_insert_before(clp->cl_list, uu_list_first(clp->cl_list), cn) == 0); } if (!clp->cl_alldependents) return (zfs_iter_children(zhp, change_one, data)); } else { zfs_close(zhp); } return (0); }
/* * Given a ZFS handle and a property, construct a complete list of datasets * that need to be modified as part of this process. For anything but the * 'mountpoint' and 'sharenfs' properties, this just returns an empty list. * Otherwise, we iterate over all children and look for any datasets that * inherit the property. For each such dataset, we add it to the list and * mark whether it was shared beforehand. */ prop_changelist_t * changelist_gather(zfs_handle_t *zhp, zfs_prop_t prop, int flags) { prop_changelist_t *clp; prop_changenode_t *cn; zfs_handle_t *temp; char property[ZFS_MAXPROPLEN]; uu_compare_fn_t *compare = NULL; if ((clp = zfs_alloc(zhp->zfs_hdl, sizeof (prop_changelist_t))) == NULL) return (NULL); /* * For mountpoint-related tasks, we want to sort everything by * mountpoint, so that we mount and unmount them in the appropriate * order, regardless of their position in the hierarchy. */ if (prop == ZFS_PROP_NAME || prop == ZFS_PROP_ZONED || prop == ZFS_PROP_MOUNTPOINT || prop == ZFS_PROP_SHARENFS) { compare = compare_mountpoints; clp->cl_sorted = B_TRUE; } clp->cl_pool = uu_list_pool_create("changelist_pool", sizeof (prop_changenode_t), offsetof(prop_changenode_t, cn_listnode), compare, 0); if (clp->cl_pool == NULL) { assert(uu_error() == UU_ERROR_NO_MEMORY); (void) zfs_error(zhp->zfs_hdl, EZFS_NOMEM, "internal error"); changelist_free(clp); return (NULL); } clp->cl_list = uu_list_create(clp->cl_pool, NULL, clp->cl_sorted ? UU_LIST_SORTED : 0); clp->cl_flags = flags; if (clp->cl_list == NULL) { assert(uu_error() == UU_ERROR_NO_MEMORY); (void) zfs_error(zhp->zfs_hdl, EZFS_NOMEM, "internal error"); changelist_free(clp); return (NULL); } /* * If this is a rename or the 'zoned' property, we pretend we're * changing the mountpoint and flag it so we can catch all children in * change_one(). * * Flag cl_alldependents to catch all children plus the dependents * (clones) that are not in the hierarchy. */ if (prop == ZFS_PROP_NAME) { clp->cl_prop = ZFS_PROP_MOUNTPOINT; clp->cl_alldependents = B_TRUE; } else if (prop == ZFS_PROP_ZONED) { clp->cl_prop = ZFS_PROP_MOUNTPOINT; clp->cl_allchildren = B_TRUE; } else if (prop == ZFS_PROP_CANMOUNT) { clp->cl_prop = ZFS_PROP_MOUNTPOINT; } else if (prop == ZFS_PROP_VOLSIZE) { clp->cl_prop = ZFS_PROP_MOUNTPOINT; } else if (prop == ZFS_PROP_VERSION) { clp->cl_prop = ZFS_PROP_MOUNTPOINT; } else { clp->cl_prop = prop; } clp->cl_realprop = prop; if (clp->cl_prop != ZFS_PROP_MOUNTPOINT && clp->cl_prop != ZFS_PROP_SHARENFS && clp->cl_prop != ZFS_PROP_SHAREISCSI) return (clp); if (clp->cl_alldependents) { if (zfs_iter_dependents(zhp, B_TRUE, change_one, clp) != 0) { changelist_free(clp); return (NULL); } } else if (zfs_iter_children(zhp, change_one, clp) != 0) { changelist_free(clp); return (NULL); } /* * We have to re-open ourselves because we auto-close all the handles * and can't tell the difference. */ if ((temp = zfs_open(zhp->zfs_hdl, zfs_get_name(zhp), ZFS_TYPE_ANY)) == NULL) { changelist_free(clp); return (NULL); } /* * Always add ourself to the list. We add ourselves to the end so that * we're the last to be unmounted. */ if ((cn = zfs_alloc(zhp->zfs_hdl, sizeof (prop_changenode_t))) == NULL) { zfs_close(temp); changelist_free(clp); return (NULL); } cn->cn_handle = temp; cn->cn_mounted = zfs_is_mounted(temp, NULL); cn->cn_shared = zfs_is_shared(temp); #ifndef __APPLE__ cn->cn_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED); #endif /*!__APPLE__*/ uu_list_node_init(cn, &cn->cn_listnode, clp->cl_pool); if (clp->cl_sorted) { uu_list_index_t idx; (void) uu_list_find(clp->cl_list, cn, NULL, &idx); uu_list_insert(clp->cl_list, cn, idx); } else { verify(uu_list_insert_after(clp->cl_list, uu_list_last(clp->cl_list), cn) == 0); } /* * If the mountpoint property was previously 'legacy', or 'none', * record it as the behavior of changelist_postfix() will be different. */ if ((clp->cl_prop == ZFS_PROP_MOUNTPOINT) && (zfs_prop_get(zhp, prop, property, sizeof (property), NULL, NULL, 0, B_FALSE) == 0 && (strcmp(property, "legacy") == 0 || strcmp(property, "none") == 0))) clp->cl_waslegacy = B_TRUE; return (clp); }
static int change_one(zfs_handle_t *zhp, void *data) { prop_changelist_t *clp = data; char property[ZFS_MAXPROPLEN]; char where[64]; prop_changenode_t *cn; zfs_source_t sourcetype; /* * We only want to unmount/unshare those filesystems that may inherit * from the target filesystem. If we find any filesystem with a * locally set mountpoint, we ignore any children since changing the * property will not affect them. If this is a rename, we iterate * over all children regardless, since we need them unmounted in * order to do the rename. Also, if this is a volume and we're doing * a rename, then always add it to the changelist. */ if (!(ZFS_IS_VOLUME(zhp) && clp->cl_realprop == ZFS_PROP_NAME) && zfs_prop_get(zhp, clp->cl_prop, property, sizeof (property), &sourcetype, where, sizeof (where), B_FALSE) != 0) { zfs_close(zhp); return (0); } if (clp->cl_alldependents || clp->cl_allchildren || sourcetype == ZFS_SRC_DEFAULT || sourcetype == ZFS_SRC_INHERITED) { if ((cn = zfs_alloc(zfs_get_handle(zhp), sizeof (prop_changenode_t))) == NULL) { zfs_close(zhp); return (-1); } cn->cn_handle = zhp; cn->cn_mounted = zfs_is_mounted(zhp, NULL); cn->cn_shared = zfs_is_shared(zhp); #ifndef __APPLE__ cn->cn_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED); /* Indicate if any child is exported to a local zone. */ if (getzoneid() == GLOBAL_ZONEID && cn->cn_zoned) clp->cl_haszonedchild = B_TRUE; #endif /*!__APPLE__*/ uu_list_node_init(cn, &cn->cn_listnode, clp->cl_pool); if (clp->cl_sorted) { uu_list_index_t idx; (void) uu_list_find(clp->cl_list, cn, NULL, &idx); uu_list_insert(clp->cl_list, cn, idx); } else { ASSERT(!clp->cl_alldependents); verify(uu_list_insert_before(clp->cl_list, uu_list_first(clp->cl_list), cn) == 0); } if (!clp->cl_alldependents) return (zfs_iter_children(zhp, change_one, data)); } else { zfs_close(zhp); } return (0); }
/* * valid_props validates all the properties in an array of inetd_prop_t's, * marking each property as valid or invalid. If any properties are invalid, * it returns B_FALSE, otherwise it returns B_TRUE. Note that some properties * are interdependent, so if one is invalid, it leaves others in an * indeterminate state (such as ISRPC and SVC_NAME). In this case, the * indeterminate property will be marked valid. IE, the only properties * marked invalid are those that are KNOWN to be invalid. * * Piggy-backed onto this validation if 'fmri' is non-NULL is the construction * of a structured configuration, a basic_cfg_t, which is used by inetd. * If 'fmri' is set then the latter three parameters need to be set to * non-NULL values, and if the configuration is valid, the storage referenced * by cfgpp is set to point at an initialized basic_cfg_t. */ boolean_t valid_props(inetd_prop_t *prop, const char *fmri, basic_cfg_t **cfgpp, uu_list_pool_t *proto_info_pool, uu_list_pool_t *tlx_ci_pool) { char *bufp, *cp; boolean_t ret = B_TRUE; int i; long uidl; boolean_t isrpc; int sock_type_id; int rpc_pnum; int rpc_lv, rpc_hv; basic_cfg_t *cfg; char *proto = NULL; int pi; char **netids = NULL; int ni = 0; if (fmri != NULL) assert((cfgpp != NULL) && (proto_info_pool != NULL) && (tlx_ci_pool != NULL)); /* * Set all checkable properties to valid as a baseline. We'll be * marking all invalid properties. */ for (i = 0; prop[i].ip_name != NULL; i++) { if (prop[i].ip_error != IVE_UNSET) prop[i].ip_error = IVE_VALID; } if (((cfg = calloc(1, sizeof (basic_cfg_t))) == NULL) || ((fmri != NULL) && ((cfg->proto_list = uu_list_create(proto_info_pool, NULL, 0)) == NULL))) { free(cfg); return (B_FALSE); } /* Check a service name was supplied */ if ((prop[PT_SVC_NAME_INDEX].ip_error == IVE_UNSET) || ((cfg->svc_name = strdup(prop[PT_SVC_NAME_INDEX].ip_value.iv_string)) == NULL)) prop[PT_SVC_NAME_INDEX].ip_error = IVE_INVALID; /* Check that iswait and isrpc have valid boolean values */ if ((prop[PT_ISWAIT_INDEX].ip_error == IVE_UNSET) || (((cfg->iswait = prop[PT_ISWAIT_INDEX].ip_value.iv_boolean) != B_TRUE) && (cfg->iswait != B_FALSE))) prop[PT_ISWAIT_INDEX].ip_error = IVE_INVALID; if ((prop[PT_ISRPC_INDEX].ip_error == IVE_UNSET) || (((isrpc = prop[PT_ISRPC_INDEX].ip_value.iv_boolean) != B_TRUE) && (isrpc != B_FALSE))) { prop[PT_ISRPC_INDEX].ip_error = IVE_INVALID; } else if (isrpc) { /* * This is an RPC service, so ensure that the RPC version * numbers are zero or greater, that the low version isn't * greater than the high version and a valid program name * is supplied. */ if ((prop[PT_RPC_LW_VER_INDEX].ip_error == IVE_UNSET) || ((rpc_lv = prop[PT_RPC_LW_VER_INDEX].ip_value.iv_int) < 0)) prop[PT_RPC_LW_VER_INDEX].ip_error = IVE_INVALID; if ((prop[PT_RPC_HI_VER_INDEX].ip_error == IVE_UNSET) || ((rpc_hv = prop[PT_RPC_HI_VER_INDEX].ip_value.iv_int) < 0)) prop[PT_RPC_HI_VER_INDEX].ip_error = IVE_INVALID; if ((prop[PT_RPC_LW_VER_INDEX].ip_error != IVE_INVALID) && (prop[PT_RPC_HI_VER_INDEX].ip_error != IVE_INVALID) && (rpc_lv > rpc_hv)) { prop[PT_RPC_LW_VER_INDEX].ip_error = IVE_INVALID; prop[PT_RPC_HI_VER_INDEX].ip_error = IVE_INVALID; } if ((cfg->svc_name != NULL) && ((rpc_pnum = get_rpc_prognum(cfg->svc_name)) == -1)) prop[PT_SVC_NAME_INDEX].ip_error = IVE_INVALID; } /* Check that the socket type is one of the acceptable values. */ cfg->istlx = B_FALSE; if ((prop[PT_SOCK_TYPE_INDEX].ip_error == IVE_UNSET) || ((sock_type_id = get_sock_type_id( prop[PT_SOCK_TYPE_INDEX].ip_value.iv_string)) == -1) && !(cfg->istlx = is_tlx_service(prop))) prop[PT_SOCK_TYPE_INDEX].ip_error = IVE_INVALID; /* Get the bind address */ if (!cfg->istlx && prop[PT_BIND_ADDR_INDEX].ip_error != IVE_UNSET && (cfg->bind_addr = strdup(prop[PT_BIND_ADDR_INDEX].ip_value.iv_string)) == NULL) prop[PT_BIND_ADDR_INDEX].ip_error = IVE_INVALID; /* * Iterate through all the different protos/netids resulting from the * proto property and check that they're valid and perform checks on * other fields that are tied-in with the proto. */ pi = 0; do { socket_info_t *si = NULL; tlx_info_t *ti = NULL; proto_info_t *p_inf = NULL; boolean_t v6only = B_FALSE; char *only; boolean_t invalid_proto = B_FALSE; char **protos; struct protoent pe; char gpbuf[1024]; struct netconfig *nconf = NULL; /* * If we don't know whether it's an rpc service or its * endpoint type, we can't do any of the proto checks as we * have no context; break out. */ if ((prop[PT_ISRPC_INDEX].ip_error != IVE_VALID) || (prop[PT_SOCK_TYPE_INDEX].ip_error != IVE_VALID)) break; /* skip proto specific processing if the proto isn't set. */ if (prop[PT_PROTO_INDEX].ip_error == IVE_UNSET) { invalid_proto = B_TRUE; goto past_proto_processing; } protos = prop[PT_PROTO_INDEX].ip_value.iv_string_list; /* * Get the next netid/proto. */ if (!cfg->istlx || !isrpc) { proto = protos[pi++]; /* * This is a TLI/RPC service, so get the next netid, expanding * any supplied nettype. */ } else if ((netids == NULL) || ((proto = netids[ni++]) == NULL)) { /* * Either this is the first time around or * we've exhausted the last set of netids, so * try and get the next set using the currently * indexed proto entry. */ if (netids != NULL) { destroy_strings(netids); netids = NULL; } if (protos[pi] != NULL) { if ((netids = get_netids(protos[pi++])) == NULL) { invalid_proto = B_TRUE; proto = protos[pi - 1]; } else { ni = 0; proto = netids[ni++]; } } else { proto = NULL; } } if (proto == NULL) break; if (invalid_proto) goto past_proto_processing; /* strip a trailing only to simplify further processing */ only = proto + strlen(proto) - (sizeof ("6only") - 1); if ((only > proto) && (strcmp(only, "6only") == 0)) { *++only = '\0'; v6only = B_TRUE; } /* validate the proto/netid */ if (!cfg->istlx) { if (!valid_socket_proto(proto)) invalid_proto = B_TRUE; } else { /* * Check if we've got a valid netid. If * getnetconfigent() fails, we check to see whether * we've got a v6 netid that may have been rejected * because no IPv6 interface was configured before * flagging 'proto' as invalid. If the latter condition * holds, we don't flag the proto as invalid, and * leave inetd to handle the value appropriately * when it tries to listen on behalf of the service. */ if (((nconf = getnetconfigent(proto)) == NULL) && !v6_proto(proto)) invalid_proto = B_TRUE; } if (invalid_proto) goto past_proto_processing; /* * dissallow datagram type nowait services */ if ((prop[PT_ISWAIT_INDEX].ip_error == IVE_VALID) && !cfg->iswait) { if (strncmp(proto, SOCKET_PROTO_UDP, sizeof (SOCKET_PROTO_UDP) - 1) == 0) { invalid_proto = B_TRUE; } else if (cfg->istlx && (nconf != NULL) && (nconf->nc_semantics == NC_TPI_CLTS)) { invalid_proto = B_TRUE; } if (invalid_proto) { prop[PT_ISWAIT_INDEX].ip_error = IVE_INVALID; goto past_proto_processing; } } /* * We're running in validate only mode. Don't bother creating * any proto structures (they don't do any further validation). */ if (fmri == NULL) goto past_proto_processing; /* * Create the apropriate transport info structure. */ if (cfg->istlx) { if ((ti = create_tlx_info(proto, tlx_ci_pool)) != NULL) p_inf = (proto_info_t *)ti; } else { struct sockaddr_storage *ss; if ((si = calloc(1, sizeof (socket_info_t))) != NULL) { p_inf = (proto_info_t *)si; si->type = sock_type_id; ss = &si->local_addr; if (v6_socket_proto(proto)) { ss->ss_family = AF_INET6; /* already in network order */ ((struct sockaddr_in6 *)ss)->sin6_addr = in6addr_any; } else { ss->ss_family = AF_INET; ((struct sockaddr_in *)ss)->sin_addr. s_addr = htonl(INADDR_ANY); } if (set_bind_addr(ss, cfg->bind_addr) != 0) { prop[PT_BIND_ADDR_INDEX].ip_error = IVE_INVALID; } } } if (p_inf == NULL) { invalid_proto = B_TRUE; goto past_proto_processing; } p_inf->v6only = v6only; /* * Store the supplied proto string for error reporting, * re-attaching the 'only' suffix if one was taken off. */ if ((p_inf->proto = malloc(strlen(proto) + 5)) == NULL) { invalid_proto = B_TRUE; goto past_proto_processing; } else { (void) strlcpy(p_inf->proto, proto, strlen(proto) + 5); if (v6only) (void) strlcat(p_inf->proto, "only", strlen(proto) + 5); } /* * Validate and setup RPC/non-RPC specifics. */ if (isrpc) { rpc_info_t *ri; if ((rpc_pnum != -1) && (rpc_lv != -1) && (rpc_hv != -1)) { if ((ri = create_rpc_info(proto, rpc_pnum, rpc_lv, rpc_hv)) == NULL) { invalid_proto = B_TRUE; } else { p_inf->ri = ri; } } } past_proto_processing: /* validate non-RPC service name */ if (!isrpc && (cfg->svc_name != NULL)) { struct servent se; char gsbuf[NSS_BUFLEN_SERVICES]; char *gsproto = proto; if (invalid_proto) { /* * Make getservbyname_r do its lookup without a * proto. */ gsproto = NULL; } else if (gsproto != NULL) { /* * Since getservbyname & getprotobyname don't * support tcp6, udp6 or sctp6 take off the 6 * digit from protocol. */ if (v6_socket_proto(gsproto)) gsproto[strlen(gsproto) - 1] = '\0'; } if (getservbyname_r(cfg->svc_name, gsproto, &se, gsbuf, sizeof (gsbuf)) == NULL) { if (gsproto != NULL) invalid_proto = B_TRUE; prop[PT_SVC_NAME_INDEX].ip_error = IVE_INVALID; } else if (cfg->istlx && (ti != NULL)) { /* LINTED E_BAD_PTR_CAST_ALIGN */ SS_SETPORT(*(struct sockaddr_storage *) ti->local_addr.buf, se.s_port); } else if (!cfg->istlx && (si != NULL)) { if ((gsproto != NULL) && getprotobyname_r(gsproto, &pe, gpbuf, sizeof (gpbuf)) == NULL) { invalid_proto = B_TRUE; } else { si->protocol = pe.p_proto; } SS_SETPORT(si->local_addr, se.s_port); } } if (p_inf != NULL) { p_inf->listen_fd = -1; /* add new proto entry to proto_list */ uu_list_node_init(p_inf, &p_inf->link, proto_info_pool); (void) uu_list_insert_after(cfg->proto_list, NULL, p_inf); } if (nconf != NULL) freenetconfigent(nconf); if (invalid_proto) prop[PT_PROTO_INDEX].ip_error = IVE_INVALID; } while (proto != NULL); /* while just processed a proto */ /* * Check that the exec string for the start method actually exists and * that the user is either a valid username or uid. Note we don't * mandate the setting of these fields, and don't do any checks * for arg0, hence its absence. */ if (prop[PT_EXEC_INDEX].ip_error != IVE_UNSET) { /* Don't pass any arguments to access() */ if ((bufp = strdup( prop[PT_EXEC_INDEX].ip_value.iv_string)) == NULL) { prop[PT_EXEC_INDEX].ip_error = IVE_INVALID; } else { if ((cp = strpbrk(bufp, " \t")) != NULL) *cp = '\0'; if ((access(bufp, F_OK) == -1) && (errno == ENOENT)) prop[PT_EXEC_INDEX].ip_error = IVE_INVALID; free(bufp); } } if (prop[PT_USER_INDEX].ip_error != IVE_UNSET) { char pw_buf[NSS_BUFLEN_PASSWD]; struct passwd pw; if (getpwnam_r(prop[PT_USER_INDEX].ip_value.iv_string, &pw, pw_buf, NSS_BUFLEN_PASSWD) == NULL) { errno = 0; uidl = strtol(prop[PT_USER_INDEX].ip_value.iv_string, &bufp, 10); if ((errno != 0) || (*bufp != '\0') || (getpwuid_r(uidl, &pw, pw_buf, NSS_BUFLEN_PASSWD) == NULL)) prop[PT_USER_INDEX].ip_error = IVE_INVALID; } } /* * Iterate through the properties in the array verifying that any * default properties are valid, and setting the return boolean * according to whether any properties were marked invalid. */ for (i = 0; prop[i].ip_name != NULL; i++) { if (prop[i].ip_error == IVE_UNSET) continue; if (prop[i].ip_default && !valid_default_prop(prop[i].ip_name, &prop[i].ip_value)) prop[i].ip_error = IVE_INVALID; if (prop[i].ip_error == IVE_INVALID) ret = B_FALSE; } /* pass back the basic_cfg_t if requested and it's a valid config */ if ((cfgpp != NULL) && ret) { *cfgpp = cfg; } else { destroy_basic_cfg(cfg); } return (ret); }
/* * Registers the attributes of a running method passed as arguments so that * the method's termination is noticed and any further processing of the * associated instance is carried out. The function also sets up any * necessary timers so we can detect hung methods. * Returns -1 if either it failed to open the /proc psinfo file which is used * to monitor the method process, it failed to setup a required timer or * memory allocation failed; else 0. */ int register_method(instance_t *ins, pid_t pid, ctid_t cid, instance_method_t mthd, char *proto_name) { char path[MAXPATHLEN]; int fd; method_el_t *me; /* open /proc psinfo file of process to listen for POLLHUP events on */ (void) snprintf(path, sizeof (path), "/proc/%u/psinfo", pid); for (;;) { if ((fd = open(path, O_RDONLY)) >= 0) { break; } else if (errno != EINTR) { /* * Don't output an error for ENOENT; we get this * if a method has gone away whilst we were stopped, * and we're now trying to re-listen for it. */ if (errno != ENOENT) { error_msg(gettext("Failed to open %s: %s"), path, strerror(errno)); } return (-1); } } /* add method record to in-memory list */ if ((me = calloc(1, sizeof (method_el_t))) == NULL) { error_msg(strerror(errno)); (void) close(fd); return (-1); } me->fd = fd; me->inst = (instance_t *)ins; me->method = mthd; me->pid = pid; me->cid = cid; if (proto_name != NULL) { if ((me->proto_name = strdup(proto_name)) == NULL) { error_msg(strerror(errno)); free(me); (void) close(fd); return (-1); } } else me->proto_name = NULL; /* register a timeout for the method, if required */ if (mthd != IM_START) { method_info_t *mi = ins->config->methods[mthd]; if (mi->timeout > 0) { assert(ins->timer_id == -1); ins->timer_id = iu_schedule_timer(timer_queue, mi->timeout, method_timeout, me); if (ins->timer_id == -1) { error_msg(gettext( "Failed to schedule method timeout")); if (me->proto_name != NULL) free(me->proto_name); free(me); (void) close(fd); return (-1); } } } /* * Add fd of psinfo file to poll set, but pass 0 for events to * poll for, so we should only get a POLLHUP event on the fd. */ if (set_pollfd(fd, 0) == -1) { cancel_inst_timer(ins); if (me->proto_name != NULL) free(me->proto_name); free(me); (void) close(fd); return (-1); } uu_list_node_init(me, &me->link, method_pool); (void) uu_list_insert_after(method_list, NULL, me); return (0); }