void uu_avl_destroy(uu_avl_t *ap) { uu_avl_pool_t *pp = ap->ua_pool; if (ap->ua_debug) { if (avl_numnodes(&ap->ua_tree) != 0) { uu_panic("uu_avl_destroy(%p): tree not empty\n", (void *)ap); } if (ap->ua_null_walk.uaw_next != &ap->ua_null_walk || ap->ua_null_walk.uaw_prev != &ap->ua_null_walk) { uu_panic("uu_avl_destroy(%p): outstanding walkers\n", (void *)ap); } } (void) pthread_mutex_lock(&pp->uap_lock); UU_AVL_PTR(ap->ua_next_enc)->ua_prev_enc = ap->ua_prev_enc; UU_AVL_PTR(ap->ua_prev_enc)->ua_next_enc = ap->ua_next_enc; (void) pthread_mutex_unlock(&pp->uap_lock); ap->ua_prev_enc = UU_PTR_ENCODE(NULL); ap->ua_next_enc = UU_PTR_ENCODE(NULL); ap->ua_pool = NULL; avl_destroy(&ap->ua_tree); uu_free(ap); }
void uu_dprintf_destroy(uu_dprintf_t *D) { if (D->uud_name) free(D->uud_name); uu_free(D); }
/* * Parse the data of a REP_PROTOCOL_PROPERTYGRP_TX_COMMIT message into a * more useful form. The data in the message will be represented by a * tx_commit_data_t structure which is allocated by this function. The * address of the allocated structure is returned to *tx_data and must be * freed by calling tx_commit_data_free(). * * Parameters: * cmds_arg Address of the commands in the * REP_PROTOCOL_PROPERTYGRP_TX_COMMIT message. * * cmds_sz Number of message bytes at cmds_arg. * * tx_data Points to the place to receive the address of the * allocated memory. * * Fails with * _BAD_REQUEST * _NO_RESOURCES */ int tx_commit_data_new(const void *cmds_arg, size_t cmds_sz, tx_commit_data_t **tx_data) { const struct rep_protocol_transaction_cmd *cmds; tx_commit_data_t *data; uintptr_t loc; uint32_t count; uint32_t sz; int ret; /* * First, verify that the reported sizes make sense, and count * the number of commands. */ count = 0; loc = (uintptr_t)cmds_arg; while (cmds_sz > 0) { cmds = (struct rep_protocol_transaction_cmd *)loc; if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE) return (REP_PROTOCOL_FAIL_BAD_REQUEST); sz = cmds->rptc_size; if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE) return (REP_PROTOCOL_FAIL_BAD_REQUEST); sz = TX_SIZE(sz); if (sz > cmds_sz) return (REP_PROTOCOL_FAIL_BAD_REQUEST); loc += sz; cmds_sz -= sz; count++; } data = uu_zalloc(TX_COMMIT_DATA_SIZE(count)); if (data == NULL) return (REP_PROTOCOL_FAIL_NO_RESOURCES); /* * verify that everything looks okay, and set up our command * datastructures. */ data->txc_count = count; ret = tx_check_and_setup(data, cmds_arg, count); if (ret == REP_PROTOCOL_SUCCESS) { *tx_data = data; } else { *tx_data = NULL; uu_free(data); } return (ret); }
void internal_pgroup_free(pgroup_t *pg) { property_t *prop; void *cookie = NULL; while ((prop = uu_list_teardown(pg->sc_pgroup_props, &cookie)) != NULL) internal_property_free(prop); uu_free(pg); }
void internal_pgroup_free(pgroup_t *pg) { property_t *prop; void *cookie = NULL; /* * Templates validation code should clean up this reference when * the validation is finished. */ assert(pg->sc_pgroup_composed == NULL); while ((prop = uu_list_teardown(pg->sc_pgroup_props, &cookie)) != NULL) internal_property_free(prop); uu_free(pg); }
void internal_instance_free(entity_t *i) { pgroup_t *pg; void *cookie = NULL; entity_t *rs; rs = i->sc_u.sc_instance.sc_instance_restarter; if (rs != NULL) internal_instance_free(rs); while ((pg = uu_list_teardown(i->sc_pgroups, &cookie)) != NULL) internal_pgroup_free(pg); cookie = NULL; while ((pg = uu_list_teardown(i->sc_dependents, &cookie)) != NULL) internal_pgroup_free(pg); uu_free((void *)i->sc_fmri); free(i); }
void uu_list_pool_destroy(uu_list_pool_t *pp) { if (pp->ulp_debug) { if (pp->ulp_null_list.ul_next_enc != UU_PTR_ENCODE(&pp->ulp_null_list) || pp->ulp_null_list.ul_prev_enc != UU_PTR_ENCODE(&pp->ulp_null_list)) { uu_panic("uu_list_pool_destroy: Pool \"%.*s\" (%p) has " "outstanding lists, or is corrupt.\n", sizeof (pp->ulp_name), pp->ulp_name, pp); } } (void) pthread_mutex_lock(&uu_lpool_list_lock); pp->ulp_next->ulp_prev = pp->ulp_prev; pp->ulp_prev->ulp_next = pp->ulp_next; (void) pthread_mutex_unlock(&uu_lpool_list_lock); pp->ulp_prev = NULL; pp->ulp_next = NULL; uu_free(pp); }
void uu_avl_pool_destroy(uu_avl_pool_t *pp) { if (pp->uap_debug) { if (pp->uap_null_avl.ua_next_enc != UU_PTR_ENCODE(&pp->uap_null_avl) || pp->uap_null_avl.ua_prev_enc != UU_PTR_ENCODE(&pp->uap_null_avl)) { uu_panic("uu_avl_pool_destroy: Pool \"%.*s\" (%p) has " "outstanding avls, or is corrupt.\n", (int)sizeof (pp->uap_name), pp->uap_name, (void *)pp); } } (void) pthread_mutex_lock(&uu_apool_list_lock); pp->uap_next->uap_prev = pp->uap_prev; pp->uap_prev->uap_next = pp->uap_next; (void) pthread_mutex_unlock(&uu_apool_list_lock); pp->uap_prev = NULL; pp->uap_next = NULL; uu_free(pp); }
void uu_avl_walk_end(uu_avl_walk_t *wp) { _avl_walk_fini(wp); uu_free(wp); }
/* * Load the instance for fmri from the repository into memory. The * property groups that define the instances pg_patterns and prop_patterns * are also loaded. * * Returns 0 on success and non-zero on failure. */ int load_instance(const char *fmri, const char *name, entity_t **inst_ptr) { entity_t *e = NULL; scf_instance_t *inst; pgroup_t *ipg; int rc; char *type = NULL; ssize_t tsize; assert(inst_ptr != NULL); if ((inst = scf_instance_create(g_hndl)) == NULL) { switch (scf_error()) { case SCF_ERROR_NO_MEMORY: case SCF_ERROR_NO_RESOURCES: rc = EAGAIN; goto errout; default: bad_error("scf_instance_create", scf_error()); } } if (scf_handle_decode_fmri(g_hndl, fmri, NULL, NULL, inst, NULL, NULL, SCF_DECODE_FMRI_EXACT|SCF_DECODE_FMRI_REQUIRE_INSTANCE) != 0) { switch (scf_error()) { case SCF_ERROR_CONNECTION_BROKEN: rc = ECONNABORTED; goto errout; case SCF_ERROR_DELETED: case SCF_ERROR_NOT_FOUND: rc = ENOENT; goto errout; case SCF_ERROR_INVALID_ARGUMENT: rc = EINVAL; goto errout; case SCF_ERROR_CONSTRAINT_VIOLATED: rc = ENOTSUP; goto errout; default: bad_error("scf_handle_decode_fmri", scf_error()); } } if (scf_iter_instance_pgs_composed(load_pgiter, inst, NULL) != 0) { switch (scf_error()) { case SCF_ERROR_DELETED: rc = ECANCELED; goto errout; case SCF_ERROR_CONNECTION_BROKEN: rc = ECONNABORTED; goto errout; default: bad_error("scf_iter_instance_pgs_composed", scf_error()); } } tsize = scf_limit(SCF_LIMIT_MAX_PG_TYPE_LENGTH); type = uu_zalloc(tsize); if (type == NULL) { rc = ENOMEM; goto errout; } /* * Initialize our entity structure. */ e = internal_instance_new(name); if (e == NULL) { rc = ENOMEM; goto errout; } e->sc_fmri = uu_strdup(fmri); if (e->sc_fmri == NULL) { rc = ENOMEM; goto errout; } /* * Walk through the property group's of the instance and capture * the property groups that are of type * SCF_GROUP_TEMPLATE_PG_PATTERN and * SCF_GROUP_TEMPLATE_PROP_PATTERN. In other words grab the * pg_pattern and prop_pattern property groups. */ while ((rc = scf_iter_next_pg(load_pgiter, load_pgroup)) == 1) { if (scf_pg_get_type(load_pgroup, type, tsize) <= 0) { switch (scf_error()) { case SCF_ERROR_DELETED: rc = ENOENT; break; case SCF_ERROR_CONNECTION_BROKEN: rc = ECONNABORTED; break; default: bad_error("scf_pg_get_type", scf_error()); } goto errout; } if ((strcmp(type, SCF_GROUP_TEMPLATE_PG_PATTERN) != 0) && (strcmp(type, SCF_GROUP_TEMPLATE_PROP_PATTERN) != 0)) { continue; } if ((rc = load_pg(load_pgroup, &ipg, fmri, NULL)) != 0) { switch (rc) { case ECANCELED: case ECONNABORTED: case EACCES: case ENOMEM: break; default: bad_error("load_pg", rc); } goto errout; } if (internal_attach_pgroup(e, ipg) != 0) { rc = EBADF; goto errout; } } if (rc == -1) { /* Error in iteration. */ switch (scf_error()) { case SCF_ERROR_CONNECTION_BROKEN: rc = ECONNABORTED; break; case SCF_ERROR_DELETED: rc = ENOENT; break; case SCF_ERROR_NO_RESOURCES: rc = EAGAIN; break; default: bad_error("scf_iter_next_pg", scf_error()); } goto errout; } *inst_ptr = e; scf_instance_destroy(inst); return (0); errout: if (type != NULL) uu_free(type); if (inst != NULL) scf_instance_destroy(inst); if (e != NULL) internal_instance_free(e); return (rc); }
/* * Free the memory associated with a tx_commit_data structure. */ void tx_commit_data_free(tx_commit_data_t *tx_data) { uu_free(tx_data); }