/*ARGSUSED*/ static int zfs_mark_pool(zpool_handle_t *zhp, void *unused) { zfs_case_t *zcp; uint64_t pool_guid; uint64_t *tod; er_timeval_t loaded = { 0 }; nvlist_t *config, *vd; uint_t nelem = 0; int ret; pool_guid = zpool_get_prop_int(zhp, ZPOOL_PROP_GUID, NULL); /* * Mark any cases associated with just this pool. */ for (zcp = uu_list_first(zfs_cases); zcp != NULL; zcp = uu_list_next(zfs_cases, zcp)) { if (zcp->zc_data.zc_pool_guid == pool_guid && zcp->zc_data.zc_vdev_guid == 0) zcp->zc_present = B_TRUE; } if ((config = zpool_get_config(zhp, NULL)) == NULL) { zpool_close(zhp); return (-1); } (void) nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_LOADED_TIME, &tod, &nelem); if (nelem == 2) { loaded.ertv_sec = tod[0]; loaded.ertv_nsec = tod[1]; for (zcp = uu_list_first(zfs_cases); zcp != NULL; zcp = uu_list_next(zfs_cases, zcp)) { if (zcp->zc_data.zc_pool_guid == pool_guid && zcp->zc_data.zc_vdev_guid == 0) { zcp->zc_when = loaded; } } } ret = nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &vd); if (ret) { zpool_close(zhp); return (-1); } zfs_mark_vdev(pool_guid, vd, &loaded); zpool_close(zhp); return (0); }
void wait_postfork(pid_t pid) { wait_info_t *wi; MUTEX_UNLOCK(&wait_info_lock); if (pid != 0) return; /* * Close all of the child's wait-related fds. The wait_thread() is * gone, so no need to worry about returning events. We always exec(2) * after a fork request, so we needn't free the list elements * themselves. */ for (wi = uu_list_first(wait_info_list); wi != NULL; wi = uu_list_next(wait_info_list, wi)) { if (wi->wi_fd != -1) startd_close(wi->wi_fd); } startd_close(port_fd); (void) setrlimit(RLIMIT_NOFILE, &init_fd_rlimit); }
/* * Returns B_TRUE if the bind configuration of the two instance_cfg_t * structures are equivalent, else B_FALSE. */ boolean_t bind_config_equal(const basic_cfg_t *c1, const basic_cfg_t *c2) { proto_info_t *pi; if ((c1->iswait != c2->iswait) || (c1->istlx != c2->istlx)) return (B_FALSE); if (uu_list_numnodes(c1->proto_list) != uu_list_numnodes(c2->proto_list)) return (B_FALSE); /* * For each element in the first configuration's socket/tlx list, * check there's a matching one in the other list. */ for (pi = uu_list_first(c1->proto_list); pi != NULL; pi = uu_list_next(c1->proto_list, pi)) { uu_list_index_t idx; if (uu_list_find(c2->proto_list, pi, (void *)&c1->istlx, &idx) == NULL) return (B_FALSE); } return (B_TRUE); }
/*ARGSUSED*/ static int zfs_mark_pool(zpool_handle_t *zhp, void *unused) { zfs_case_t *zcp; uint64_t pool_guid; nvlist_t *config, *vd; int ret; pool_guid = zpool_get_prop_int(zhp, ZPOOL_PROP_GUID, NULL); /* * Mark any cases associated with just this pool. */ for (zcp = uu_list_first(zfs_cases); zcp != NULL; zcp = uu_list_next(zfs_cases, zcp)) { if (zcp->zc_data.zc_pool_guid == pool_guid && zcp->zc_data.zc_vdev_guid == 0) zcp->zc_present = B_TRUE; } if ((config = zpool_get_config(zhp, NULL)) == NULL) { zpool_close(zhp); return (-1); } ret = nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &vd); assert(ret == 0); zfs_mark_vdev(pool_guid, vd); zpool_close(zhp); return (0); }
/* * If we rename a filesystem, child filesystem handles are no longer valid * since we identify each dataset by its name in the ZFS namespace. As a * result, we have to go through and fix up all the names appropriately. We * could do this automatically if libzfs kept track of all open handles, but * this is a lot less work. */ void changelist_rename(prop_changelist_t *clp, const char *src, const char *dst) { prop_changenode_t *cn; char newname[ZFS_MAXNAMELEN]; for (cn = uu_list_first(clp->cl_list); cn != NULL; cn = uu_list_next(clp->cl_list, cn)) { /* * Do not rename a clone that's not in the source hierarchy. */ if (!isa_child_of(cn->cn_handle->zfs_name, src)) continue; /* * Destroy the previous mountpoint if needed. */ remove_mountpoint(cn->cn_handle); (void) strlcpy(newname, dst, sizeof (newname)); (void) strcat(newname, cn->cn_handle->zfs_name + strlen(src)); (void) strlcpy(cn->cn_handle->zfs_name, newname, sizeof (cn->cn_handle->zfs_name)); } }
/* * Unregister all methods associated with instance 'inst'. */ void unregister_instance_methods(const instance_t *inst) { method_el_t *me = uu_list_first(method_list); while (me != NULL) { if (me->inst == inst) { method_el_t *tmp = me; me = uu_list_next(method_list, me); unregister_method(tmp); } else { me = uu_list_next(method_list, me); } } }
/* * If the property is 'mountpoint', go through and unmount filesystems as * necessary. We don't do the same for 'sharenfs', because we can just re-share * with different options without interrupting service. We do handle 'sharesmb' * since there may be old resource names that need to be removed. */ int changelist_prefix(prop_changelist_t *clp) { prop_changenode_t *cn; int ret = 0; if (clp->cl_prop != ZFS_PROP_MOUNTPOINT && clp->cl_prop != ZFS_PROP_SHARESMB) return (0); for (cn = uu_list_first(clp->cl_list); cn != NULL; cn = uu_list_next(clp->cl_list, cn)) { /* if a previous loop failed, set the remaining to false */ if (ret == -1) { cn->cn_needpost = B_FALSE; continue; } /* * If we are in the global zone, but this dataset is exported * to a local zone, do nothing. */ if (getzoneid() == GLOBAL_ZONEID && cn->cn_zoned) continue; if (!ZFS_IS_VOLUME(cn->cn_handle)) { /* * Do the property specific processing. */ switch (clp->cl_prop) { case ZFS_PROP_MOUNTPOINT: if (zfs_unmount(cn->cn_handle, NULL, clp->cl_mflags) != 0) { ret = -1; cn->cn_needpost = B_FALSE; } break; case ZFS_PROP_SHARESMB: (void) zfs_unshare_smb(cn->cn_handle, NULL); break; default: break; } } } if (ret == -1) { #if defined(HAVE_ZPL) (void) changelist_postfix(clp); #else ret = changelist_postfix(clp); #endif } return (ret); }
property_t * internal_property_find(pgroup_t *pg, const char *name) { property_t *p; for (p = uu_list_first(pg->sc_pgroup_props); p != NULL; p = uu_list_next(pg->sc_pgroup_props, p)) if (strcmp(p->sc_property_name, name) == 0) return (p); return (NULL); }
/* * If the property is 'mountpoint', go through and unmount filesystems as * necessary. We don't do the same for 'sharenfs', because we can just re-share * with different options without interrupting service. */ int changelist_prefix(prop_changelist_t *clp) { prop_changenode_t *cn; int ret = 0; if (clp->cl_prop != ZFS_PROP_MOUNTPOINT) return (0); for (cn = uu_list_first(clp->cl_list); cn != NULL; cn = uu_list_next(clp->cl_list, cn)) { #ifndef __APPLE__ /* * If we are in the global zone, but this dataset is exported * to a local zone, do nothing. */ if (getzoneid() == GLOBAL_ZONEID && cn->cn_zoned) continue; #endif /*!__APPLE__*/ if (ZFS_IS_VOLUME(cn->cn_handle)) { switch (clp->cl_realprop) { case ZFS_PROP_NAME: /* * If this was a rename, unshare the zvol, and * remove the /dev/zvol links. */ (void) zfs_unshare_iscsi(cn->cn_handle); if (zvol_remove_link(cn->cn_handle->zfs_hdl, cn->cn_handle->zfs_name) != 0) ret = -1; break; case ZFS_PROP_VOLSIZE: /* * If this was a change to the volume size, we * need to unshare and reshare the volume. */ (void) zfs_unshare_iscsi(cn->cn_handle); break; } } else if (zfs_unmount(cn->cn_handle, NULL, clp->cl_flags) != 0) { ret = -1; } } return (ret); }
/* * Remove a node from a gathered list. */ void changelist_remove(prop_changelist_t *clp, const char *name) { prop_changenode_t *cn; for (cn = uu_list_first(clp->cl_list); cn != NULL; cn = uu_list_next(clp->cl_list, cn)) { if (strcmp(cn->cn_handle->zfs_name, name) == 0) { uu_list_remove(clp->cl_list, cn); zfs_close(cn->cn_handle); free(cn); return; } } }
/* * Given a gathered changelist for the 'sharenfs' property, unshare all the * datasets in the list. */ int changelist_unshare(prop_changelist_t *clp) { prop_changenode_t *cn; int ret = 0; if (clp->cl_prop != ZFS_PROP_SHARENFS) return (0); for (cn = uu_list_first(clp->cl_list); cn != NULL; cn = uu_list_next(clp->cl_list, cn)) { if (zfs_unshare_nfs(cn->cn_handle, NULL) != 0) ret = -1; } return (ret); }
static void zfs_purge_cases(fmd_hdl_t *hdl) { zfs_case_t *zcp; uu_list_walk_t *walk; libzfs_handle_t *zhdl = fmd_hdl_getspecific(hdl); /* * There is no way to open a pool by GUID, or lookup a vdev by GUID. No * matter what we do, we're going to have to stomach an O(vdevs * cases) * algorithm. In reality, both quantities are likely so small that * neither will matter. Given that iterating over pools is more * expensive than iterating over the in-memory case list, we opt for a * 'present' flag in each case that starts off cleared. We then iterate * over all pools, marking those that are still present, and removing * those that aren't found. * * Note that we could also construct an FMRI and rely on * fmd_nvl_fmri_present(), but this would end up doing the same search. */ /* * Mark the cases as not present. */ for (zcp = uu_list_first(zfs_cases); zcp != NULL; zcp = uu_list_next(zfs_cases, zcp)) zcp->zc_present = B_FALSE; /* * Iterate over all pools and mark the pools and vdevs found. If this * fails (most probably because we're out of memory), then don't close * any of the cases and we cannot be sure they are accurate. */ if (zpool_iter(zhdl, zfs_mark_pool, NULL) != 0) return; /* * Remove those cases which were not found. */ walk = uu_list_walk_start(zfs_cases, UU_WALK_ROBUST); while ((zcp = uu_list_walk_next(walk)) != NULL) { if (!zcp->zc_present) fmd_case_close(hdl, zcp->zc_case); } uu_list_walk_end(walk); }
/* * Iterate over any active cases. If any cases are associated with a pool or * vdev which is no longer present on the system, close the associated case. */ static void zfs_mark_vdev(uint64_t pool_guid, nvlist_t *vd, er_timeval_t *loaded) { uint64_t vdev_guid; uint_t c, children; nvlist_t **child; zfs_case_t *zcp; int ret; ret = nvlist_lookup_uint64(vd, ZPOOL_CONFIG_GUID, &vdev_guid); assert(ret == 0); /* * Mark any cases associated with this (pool, vdev) pair. */ for (zcp = uu_list_first(zfs_cases); zcp != NULL; zcp = uu_list_next(zfs_cases, zcp)) { if (zcp->zc_data.zc_pool_guid == pool_guid && zcp->zc_data.zc_vdev_guid == vdev_guid) { zcp->zc_present = B_TRUE; zcp->zc_when = *loaded; } } /* * Iterate over all children. */ if (nvlist_lookup_nvlist_array(vd, ZPOOL_CONFIG_CHILDREN, &child, &children) == 0) { for (c = 0; c < children; c++) zfs_mark_vdev(pool_guid, child[c], loaded); } if (nvlist_lookup_nvlist_array(vd, ZPOOL_CONFIG_L2CACHE, &child, &children) == 0) { for (c = 0; c < children; c++) zfs_mark_vdev(pool_guid, child[c], loaded); } if (nvlist_lookup_nvlist_array(vd, ZPOOL_CONFIG_SPARES, &child, &children) == 0) { for (c = 0; c < children; c++) zfs_mark_vdev(pool_guid, child[c], loaded); } }
/* * void wait_ignore_by_fmri(const char *) * wait_ignore_by_fmri is called when svc.startd is going to stop the * instance. Since we need to wait on the process and close the utmpx record, * we're going to set the wi_ignore flag, so that when the process exits we * clean up, but don't tell the restarter to stop it. */ void wait_ignore_by_fmri(const char *fmri) { wait_info_t *wi; MUTEX_LOCK(&wait_info_lock); for (wi = uu_list_first(wait_info_list); wi != NULL; wi = uu_list_next(wait_info_list, wi)) { if (strcmp(wi->wi_fmri, fmri) == 0) break; } if (wi != NULL) { wi->wi_ignore = 1; } MUTEX_UNLOCK(&wait_info_lock); }
static pgroup_t * find_pgroup(uu_list_t *list, const char *name, const char *type) { pgroup_t *pg; for (pg = uu_list_first(list); pg != NULL; pg = uu_list_next(list, pg)) { if (strcmp(pg->sc_pgroup_name, name) != 0) continue; if (type == NULL) return (pg); if (strcmp(pg->sc_pgroup_type, type) == 0) return (pg); } return (NULL); }
/* * If the property is 'mountpoint', go through and unmount filesystems as * necessary. We don't do the same for 'sharenfs', because we can just re-share * with different options without interrupting service. */ int changelist_prefix(prop_changelist_t *clp) { prop_changenode_t *cn; int ret = 0; if (clp->cl_prop != ZFS_PROP_MOUNTPOINT) return (0); for (cn = uu_list_first(clp->cl_list); cn != NULL; cn = uu_list_next(clp->cl_list, cn)) { /* if a previous loop failed, set the remaining to false */ if (ret == -1) { cn->cn_needpost = B_FALSE; continue; } /* * If we are in the global zone, but this dataset is exported * to a local zone, do nothing. */ if (getzoneid() == GLOBAL_ZONEID && cn->cn_zoned) continue; if (ZFS_IS_VOLUME(cn->cn_handle)) { switch (clp->cl_realprop) { case ZFS_PROP_NAME: /* * If this was a rename, unshare the zvol, and * remove the /dev/zvol links. */ (void) zfs_unshare_iscsi(cn->cn_handle); if (zvol_remove_link(cn->cn_handle->zfs_hdl, cn->cn_handle->zfs_name) != 0) { ret = -1; cn->cn_needpost = B_FALSE; (void) zfs_share_iscsi(cn->cn_handle); } break; case ZFS_PROP_VOLSIZE: /* * If this was a change to the volume size, we * need to unshare and reshare the volume. */ (void) zfs_unshare_iscsi(cn->cn_handle); break; } } else if (zfs_unmount(cn->cn_handle, NULL, clp->cl_flags) != 0) { ret = -1; cn->cn_needpost = B_FALSE; } } if (ret == -1) (void) changelist_postfix(clp); return (ret); }
/* * Entity (service or instance): If there are -p options, * display_{pg,prop}() the named property groups and/or properties. Otherwise * display_pg() all property groups. */ static void process_ent(scf_entityp_t ent) { scf_snapshot_t *snap = NULL; scf_propertygroup_t *pg; scf_property_t *prop; scf_iter_t *iter; svcprop_prop_node_t *spn; int ret, err; if (uu_list_numnodes(prop_list) == 0) { if (quiet) return; if ((pg = scf_pg_create(hndl)) == NULL || (iter = scf_iter_create(hndl)) == NULL) scfdie(); if (cflag || Cflag || ent.type != ENT_INSTANCE) { if (scf_iter_entity_pgs(iter, ent) == -1) scfdie(); } else { if (snapshot != NULL) snap = get_snapshot(ent.u.inst, snapshot); if (scf_iter_instance_pgs_composed(iter, ent.u.inst, snap) == -1) scfdie(); if (snap) scf_snapshot_destroy(snap); } while ((ret = scf_iter_next_pg(iter, pg)) == 1) display_pg(pg); if (ret == -1) scfdie(); /* * In normal usage, i.e. against the running snapshot, * we must iterate over the current non-persistent * pg's. */ if (sflag == 0 && snap != NULL) { scf_iter_reset(iter); if (scf_iter_instance_pgs_composed(iter, ent.u.inst, NULL) == -1) scfdie(); while ((ret = scf_iter_next_pg(iter, pg)) == 1) { uint32_t flags; if (scf_pg_get_flags(pg, &flags) == -1) scfdie(); if (flags & SCF_PG_FLAG_NONPERSISTENT) display_pg(pg); } } if (ret == -1) scfdie(); scf_iter_destroy(iter); scf_pg_destroy(pg); return; } if ((pg = scf_pg_create(hndl)) == NULL || (prop = scf_property_create(hndl)) == NULL) scfdie(); if (ent.type == ENT_INSTANCE && snapshot != NULL) snap = get_snapshot(ent.u.inst, snapshot); for (spn = uu_list_first(prop_list); spn != NULL; spn = uu_list_next(prop_list, spn)) { if (ent.type == ENT_INSTANCE) { if (Cflag) ret = scf_instance_get_pg(ent.u.inst, spn->spn_comp1, pg); else ret = scf_instance_get_pg_composed(ent.u.inst, snap, spn->spn_comp1, pg); err = scf_error(); /* * If we didn't find it in the specified snapshot, use * the current values if the pg is nonpersistent. */ if (ret == -1 && !Cflag &&snap != NULL && err == SCF_ERROR_NOT_FOUND) { ret = scf_instance_get_pg_composed( ent.u.inst, NULL, spn->spn_comp1, pg); if (ret == 0) { uint32_t flags; if (scf_pg_get_flags(pg, &flags) == -1) scfdie(); if ((flags & SCF_PG_FLAG_NONPERSISTENT) == 0) { ret = -1; } } } } else { /* * If we are displaying properties for a service, * treat it as though it were a composed, current * lookup. (implicit cflag) However, if a snapshot * was specified, fail. */ if (sflag) die(gettext("Only instances have " "snapshots.\n")); ret = scf_entity_get_pg(ent, spn->spn_comp1, pg); err = scf_error(); } if (ret == -1) { if (err != SCF_ERROR_NOT_FOUND) scfdie(); if (PRINT_NOPROP_ERRORS) { char *buf; buf = safe_malloc(max_scf_fmri_length + 1); if (scf_entity_to_fmri(ent, buf, max_scf_fmri_length + 1) == -1) scfdie(); uu_warn(gettext("Couldn't find property group " "`%s' for %s `%s'.\n"), spn->spn_comp1, SCF_ENTITY_TYPE_NAME(ent), buf); free(buf); } noprop_common_action(); continue; } if (spn->spn_comp2 == NULL) { if (!quiet) display_pg(pg); continue; } if (scf_pg_get_property(pg, spn->spn_comp2, prop) == -1) { if (scf_error() != SCF_ERROR_NOT_FOUND) scfdie(); if (PRINT_NOPROP_ERRORS) { char *buf; buf = safe_malloc(max_scf_fmri_length + 1); if (scf_entity_to_fmri(ent, buf, max_scf_fmri_length + 1) == -1) scfdie(); /* FMRI syntax knowledge */ uu_warn(gettext("Couldn't find property " "`%s/%s' for %s `%s'.\n"), spn->spn_comp1, spn->spn_comp2, SCF_ENTITY_TYPE_NAME(ent), buf); free(buf); } noprop_common_action(); continue; } if (!quiet) display_prop(pg, prop); } scf_property_destroy(prop); scf_pg_destroy(pg); if (snap) scf_snapshot_destroy(snap); }
/* * Process any terminated methods. For each method determined to have * terminated, the function determines its return value and calls the * appropriate handling function, depending on the type of the method. */ void process_terminated_methods(void) { method_el_t *me = uu_list_first(method_list); while (me != NULL) { struct pollfd *pfd; pid_t pid; int status; int ret; method_el_t *tmp; pfd = find_pollfd(me->fd); /* * We expect to get a POLLHUP back on the fd of the process's * open psinfo file from /proc when the method terminates. * A POLLERR could(?) mask a POLLHUP, so handle this * also. */ if ((pfd->revents & (POLLHUP|POLLERR)) == 0) { me = uu_list_next(method_list, me); continue; } /* get the method's exit code (no need to loop for EINTR) */ pid = waitpid(me->pid, &status, WNOHANG); switch (pid) { case 0: /* child still around */ /* * Either poll() is sending us invalid POLLHUP events * or is flagging a POLLERR on the fd. Neither should * happen, but in the event they do, ignore this fd * this time around and wait out the termination * of its associated method. This may result in * inetd swiftly looping in event_loop(), but means * we don't miss the termination of a method. */ me = uu_list_next(method_list, me); continue; case -1: /* non-existent child */ assert(errno == ECHILD); /* * the method must not be owned by inetd due to it * persisting over an inetd restart. Let's assume the * best, that it was successful. */ ret = IMRET_SUCCESS; break; default: /* child terminated */ if (WIFEXITED(status)) { ret = WEXITSTATUS(status); debug_msg("process %ld of instance %s returned " "%d", pid, me->inst->fmri, ret); } else if (WIFSIGNALED(status)) { /* * Terminated by signal. This may be due * to a kill that we sent from a disable or * offline event. We flag it as a failure, but * this flagged failure will only be processed * in the case of non-start methods, or when * the instance is still enabled. */ debug_msg("process %ld of instance %s exited " "due to signal %d", pid, me->inst->fmri, WTERMSIG(status)); ret = IMRET_FAILURE; } else { /* * Can we actually get here? Don't think so. * Treat it as a failure, anyway. */ debug_msg("waitpid() for %s method of " "instance %s returned %d", methods[me->method].name, me->inst->fmri, status); ret = IMRET_FAILURE; } } remove_method_ids(me->inst, me->pid, me->cid, me->method); /* continue state transition processing of the instance */ if (me->method != IM_START) { process_non_start_term(me->inst, ret); } else { process_start_term(me->inst, me->proto_name); } if (me->cid != -1) (void) abandon_contract(me->cid); tmp = me; me = uu_list_next(method_list, me); unregister_method(tmp); } }
int engine_import(uu_list_t *args) { int ret, argc, i, o; bundle_t *b; char *file, *pname; uchar_t hash[MHASH_SIZE]; char **argv; string_list_t *slp; boolean_t verify = B_FALSE; uint_t flags = SCI_GENERALLAST; argc = uu_list_numnodes(args); if (argc < 1) return (-2); argv = calloc(argc + 1, sizeof (char *)); if (argv == NULL) uu_die(gettext("Out of memory.\n")); for (slp = uu_list_first(args), i = 0; slp != NULL; slp = uu_list_next(args, slp), ++i) argv[i] = slp->str; argv[i] = NULL; opterr = 0; optind = 0; /* Remember, no argv[0]. */ for (;;) { o = getopt(argc, argv, "nV"); if (o == -1) break; switch (o) { case 'n': flags |= SCI_NOREFRESH; break; case 'V': verify = B_TRUE; break; case '?': free(argv); return (-2); default: bad_error("getopt", o); } } argc -= optind; if (argc != 1) { free(argv); return (-2); } file = argv[optind]; free(argv); lscf_prep_hndl(); ret = mhash_test_file(g_hndl, file, 0, &pname, hash); if (ret != MHASH_NEWFILE) return (ret); /* Load */ b = internal_bundle_new(); if (lxml_get_bundle_file(b, file, 0) != 0) { internal_bundle_free(b); return (-1); } /* Import */ if (lscf_bundle_import(b, file, flags) != 0) { internal_bundle_free(b); return (-1); } internal_bundle_free(b); if (g_verbose) warn(gettext("Successful import.\n")); if (pname) { char *errstr; if (mhash_store_entry(g_hndl, pname, hash, &errstr)) { if (errstr) semerr(errstr); else semerr(gettext("Unknown error from " "mhash_store_entry()\n")); } free(pname); } /* Verify */ if (verify) warn(gettext("import -V not implemented.\n")); return (0); }
/* * Handle a TLOOK notification received during a t_accept() call. * Returns -1 on failure, else 0. */ static int process_tlook(const char *fmri, tlx_info_t *tlx_info) { int event; int fd = tlx_info->pr_info.listen_fd; debug_msg("Entering process_tlook:"); switch (event = t_look(fd)) { case T_LISTEN: { struct t_call *call; debug_msg("process_tlook: T_LISTEN event"); if ((call = get_new_conind(fd)) == NULL) return (-1); if (queue_conind(tlx_info->conn_ind_queue, call) == -1) { error_msg(gettext("Failed to queue connection " "indication for instance %s"), fmri); (void) t_free((char *)call, T_CALL); return (-1); } break; } case T_DISCONNECT: { /* * Note: In Solaris 2.X (SunOS 5.X) bundled * connection-oriented transport drivers * [ e.g /dev/tcp and /dev/ticots and * /dev/ticotsord (tl)] we do not send disconnect * indications to listening endpoints. * So this will not be seen with endpoints on Solaris * bundled transport devices. However, Streams TPI * allows for this (broken?) behavior and so we account * for it here because of the possibility of unbundled * transport drivers causing this. */ tlx_conn_ind_t *cip; struct t_discon *discon; debug_msg("process_tlook: T_DISCONNECT event"); /* LINTED */ if ((discon = (struct t_discon *) t_alloc(fd, T_DIS, T_ALL)) == NULL) { error_msg("t_alloc: %s", t_strerror(t_errno)); return (-1); } if (t_rcvdis(fd, discon) < 0) { error_msg("t_rcvdis: %s", t_strerror(t_errno)); (void) t_free((char *)discon, T_DIS); return (-1); } /* * Find any queued connection pending that matches this * disconnect notice and remove from the pending queue. */ cip = uu_list_first(tlx_info->conn_ind_queue); while ((cip != NULL) && (cip->call->sequence != discon->sequence)) { cip = uu_list_next(tlx_info->conn_ind_queue, cip); } if (cip != NULL) { /* match found */ uu_list_remove(tlx_info->conn_ind_queue, cip); (void) t_free((char *)cip->call, T_CALL); free(cip); } (void) t_free((char *)discon, T_DIS); break; } case -1: error_msg("t_look: %s", t_errno); return (-1); default: error_msg(gettext("do_tlook: unexpected t_look event: %d"), event); return (-1); } return (0); }
/* * Without -p options, just call display_pg(). Otherwise display_prop() the * named properties of the property group. */ static void process_pg(scf_propertygroup_t *pg) { scf_property_t *prop; svcprop_prop_node_t *spn; if (uu_list_first(prop_list) == NULL) { if (quiet) return; display_pg(pg); return; } prop = scf_property_create(hndl); if (prop == NULL) scfdie(); for (spn = uu_list_first(prop_list); spn != NULL; spn = uu_list_next(prop_list, spn)) { if (spn->spn_comp2 != NULL) { char *buf; buf = safe_malloc(max_scf_fmri_length + 1); if (scf_pg_to_fmri(pg, buf, max_scf_fmri_length + 1) == -1) scfdie(); uu_xdie(UU_EXIT_USAGE, gettext("-p argument `%s/%s' " "has too many components for property " "group `%s'.\n"), spn->spn_comp1, spn->spn_comp2, buf); free(buf); } if (scf_pg_get_property(pg, spn->spn_comp1, prop) == 0) { if (!quiet) display_prop(pg, prop); continue; } if (scf_error() != SCF_ERROR_NOT_FOUND) scfdie(); if (PRINT_NOPROP_ERRORS) { char *buf; buf = safe_malloc(max_scf_fmri_length + 1); if (scf_pg_to_fmri(pg, buf, max_scf_fmri_length + 1) == -1) scfdie(); uu_warn(gettext("Couldn't find property `%s' in " "property group `%s'.\n"), spn->spn_comp1, buf); free(buf); } noprop_common_action(); } }