/** * Dispose of the leaks accumulated. */ static void leak_close(leak_set_t *ls) { leak_set_check(ls); htable_foreach(ls->places, leak_free_kv, NULL); htable_foreach(ls->stacks, leak_free_v, NULL); htable_free_null(&ls->places); htable_free_null(&ls->stacks); ls->magic = 0; xfree(ls); }
void nodes_gui_update_display(time_t now) { g_object_freeze_notify(G_OBJECT(treeview_nodes)); htable_foreach(nodes_handles, update_row, &now); g_object_thaw_notify(G_OBJECT(treeview_nodes)); }
/** * Iterate on each item of the map, applying callback. */ void map_foreach(const map_t *m, keyval_fn_t cb, void *u) { map_check(m); g_assert(cb); switch (m->type) { case MAP_HASH: htable_foreach(m->u.ht, (ckeyval_fn_t) cb, u); break; case MAP_ORDERED_HASH: ohash_table_foreach(m->u.ot, cb, u); break; case MAP_PATRICIA: { struct pat_foreach ctx; ctx.cb = cb; ctx.u = u; patricia_foreach(m->u.pt, pat_foreach_wrapper, &ctx); } break; case MAP_MAXTYPE: g_assert_not_reached(); } }
/** * Unregister callbacks in the backend and clean up. */ void nodes_gui_shutdown(void) { tree_view_motion_clear_callback(&tvm_nodes); tree_view_save_widths(treeview_nodes, PROP_NODES_COL_WIDTHS); tree_view_save_visibility(treeview_nodes, PROP_NODES_COL_VISIBLE); guc_node_remove_node_added_listener(nodes_gui_node_added); guc_node_remove_node_removed_listener(nodes_gui_node_removed); guc_node_remove_node_info_changed_listener(nodes_gui_node_info_changed); guc_node_remove_node_flags_changed_listener(nodes_gui_node_flags_changed); gtk_list_store_clear(nodes_model); g_object_unref(G_OBJECT(nodes_model)); nodes_model = NULL; gtk_tree_view_set_model(treeview_nodes, NULL); htable_foreach(nodes_handles, free_node_data, NULL); htable_free_null(&nodes_handles); hset_foreach(ht_node_info_changed, free_node_id, NULL); hset_free_null(&ht_node_info_changed); hset_foreach(ht_node_flags_changed, free_node_id, NULL); hset_free_null(&ht_node_flags_changed); hset_foreach(ht_pending_lookups, free_node_id, NULL); hset_free_null(&ht_pending_lookups); }
/** * Shutdown the wait queue layer. */ void wq_close(void) { g_assert(waitqueue != NULL); /* * At close time, all registered events should have been removed from * the queue: any remaining entry is leaking and will be flagged as such. */ htable_foreach(waitqueue, wq_free_kv, NULL); htable_free_null(&waitqueue); }
/** * Clear all upload statistic entries from the GtkTreeModel. * */ void upload_stats_gui_clear_model(void) { GtkListStore *store; store = GTK_LIST_STORE(gtk_tree_view_get_model(upload_stats_treeview)); if (store) { gtk_list_store_clear(store); } if (ht_uploads) { htable_foreach(ht_uploads, free_upload_data, NULL); htable_free_null(&ht_uploads); ht_uploads = NOT_LEAKING(htable_create(HASH_KEY_SELF, 0)); } }
/** * Dump the links sorted by decreasing leak size. */ G_GNUC_COLD void leak_dump(const leak_set_t *ls) { int count; struct filler filler; int i; leak_set_check(ls); count = htable_count(ls->stacks); if (count == 0) goto leaks_by_place; /* * Linearize hash table into an array before sorting it by * decreasing leak size. */ filler.leaks = xpmalloc(sizeof(struct leak) * count); filler.count = count; filler.idx = 0; filler.kt = LEAK_KEY_STACK; htable_foreach(ls->stacks, fill_array, &filler); xqsort(filler.leaks, count, sizeof(struct leak), leak_size_cmp); /* * Dump the leaks by allocation place. */ g_warning("leak summary by stackframe and total decreasing size:"); g_warning("distinct calling stacks found: %d", count); for (i = 0; i < count; i++) { struct leak *l = &filler.leaks[i]; size_t avg = l->lr->size / (0 == l->lr->count ? 1 : l->lr->count); g_warning("%zu bytes (%zu block%s, average %zu byte%s) from:", l->lr->size, l->lr->count, l->lr->count == 1 ? "" : "s", avg, 1 == avg ? "" : "s"); stacktrace_atom_decorate(stderr, l->u.sa, STACKTRACE_F_ORIGIN | STACKTRACE_F_SOURCE); } xfree(filler.leaks); leaks_by_place: count = htable_count(ls->places); if (count == 0) return; /* * Linearize hash table into an array before sorting it by * decreasing leak size. */ filler.leaks = xpmalloc(sizeof(struct leak) * count); filler.count = count; filler.idx = 0; filler.kt = LEAK_KEY_PLACE; htable_foreach(ls->places, fill_array, &filler); xqsort(filler.leaks, count, sizeof(struct leak), leak_size_cmp); /* * Dump the leaks by allocation place. */ g_warning("leak summary by origin and total decreasing size:"); g_warning("distinct allocation points found: %d", count); for (i = 0; i < count; i++) { struct leak *l = &filler.leaks[i]; size_t avg = l->lr->size / (0 == l->lr->count ? 1 : l->lr->count); g_warning("%zu bytes (%zu block%s, average %zu byte%s) from \"%s\"", l->lr->size, l->lr->count, l->lr->count == 1 ? "" : "s", avg, 1 == avg ? "" : "s", l->u.place); } xfree(filler.leaks); }
/** * Extended XML formatting of a tree. * * Namespaces, if any, are automatically assigned a prefix, whose format * is "ns%u", the counter being incremented from 0. * * Users can supply a vector mapping namespaces to prefixes, so that they * can force specific prefixes for a given well-known namespace. * * If there is a default namespace, all the tags belonging to that namespace * are emitted without any prefix. * * The output stream must be explicitly closed by the user upon return. * * Options can be supplied to tune the output: * * - XFMT_O_SKIP_BLANKS will skip pure white space nodes. * - XFMT_O_COLLAPSE_BLANKS will replace consecutive blanks with 1 space * - XFMT_O_NO_INDENT requests that no indentation of the tree be made. * - XFMT_O_PROLOGUE emits a leading <?xml?> prologue. * - XFMT_O_FORCE_10 force generation of XML 1.0 * - XFMT_O_SINGLE_LINE emits XML as one big line (implies XFMT_O_NO_INDENT). * * @param root the root of the tree to dump * @param os the output stream where tree is dumped * @param options formatting options, as documented above * @param pvec a vector of prefixes to be used for namespaces * @param pvcnt amount of entries in vector * @param default_ns default namespace to install at root element * * @return TRUE on success. */ bool xfmt_tree_extended(const xnode_t *root, ostream_t *os, uint32 options, const struct xfmt_prefix *pvec, size_t pvcnt, const char *default_ns) { struct xfmt_pass1 xp1; struct xfmt_pass2 xp2; struct xfmt_invert_ctx ictx; const char *dflt_ns; g_assert(root != NULL); g_assert(os != NULL); if (options & XFMT_O_COLLAPSE_BLANKS) { /* FIXME */ g_carp("XFMT_O_COLLAPSE_BLANKS not supported yet"); stacktrace_where_print(stderr); } if (options & XFMT_O_SINGLE_LINE) options |= XFMT_O_NO_INDENT; /* * First pass: look at namespaces and construct a table recording the * earliest tree depth at which a namespace is used. */ ZERO(&xp1); xp1.uri2node = htable_create(HASH_KEY_STRING, 0); xp1.uri2prefix = nv_table_make(FALSE); if (default_ns != NULL) xp1.attr_uris = hset_create(HASH_KEY_STRING, 0); htable_insert_const(xp1.uri2node, VXS_XML_URI, root); xnode_tree_enter_leave(deconstify_pointer(root), xfmt_handle_pass1_enter, xfmt_handle_pass1_leave, &xp1); g_assert(0 == xp1.depth); /* Sound traversal */ /* * If there was a default namespace, make sure it is used in the tree. * Otherwise, discard it. */ if (default_ns != NULL) { if (NULL == htable_lookup(xp1.uri2node, default_ns)) { g_carp("XFMT default namespace '%s' is not needed", default_ns); dflt_ns = NULL; } else { dflt_ns = default_ns; } } else { dflt_ns = NULL; } /* * Prepare context for second pass. */ ZERO(&xp2); xp2.node2uri = htable_create(HASH_KEY_SELF, 0); xp2.os = os; xp2.options = options; xp2.default_ns = dflt_ns; xp2.attr_uris = xp1.attr_uris; xp2.uri2prefix = xp1.uri2prefix; xp2.uris = symtab_make(); xp2.prefixes = symtab_make(); xp2.depth = 0; xp2.pcount = 0; xp2.last_was_nl = TRUE; /* * Iterate over the hash table we've built to create a table indexed * by tree node and listing the namespaces to declare for that node. */ ictx.uri2node = xp1.uri2node; ictx.node2uri = xp2.node2uri; htable_foreach(xp1.uri2node, xfmt_invert_uri_kv, &ictx); htable_free_null(&xp1.uri2node); /* * Emit prologue if requested. */ if (options & XFMT_O_PROLOGUE) { if (options & XFMT_O_FORCE_10) { ostream_write(os, XFMT_DECL_10, CONST_STRLEN(XFMT_DECL_10)); } else { ostream_write(os, XFMT_DECL, CONST_STRLEN(XFMT_DECL)); } if (!(options & XFMT_O_SINGLE_LINE)) { ostream_putc(os, '\n'); } } xfmt_prefix_declare(&xp2, VXS_XML_URI, VXS_XML); /* * Prepare user-defined URI -> prefix mappings. */ if (pvcnt != 0) { size_t i; for (i = 0; i < pvcnt; i++) { const struct xfmt_prefix *p = &pvec[i]; xfmt_prefix_declare(&xp2, p->uri, p->prefix); } } /* * Second pass: generation. */ xnode_tree_enter_leave(deconstify_pointer(root), xfmt_handle_pass2_enter, xfmt_handle_pass2_leave, &xp2); g_assert(0 == xp2.depth); /* Sound traversal */ /* * Done, cleanup. */ nv_table_free_null(&xp2.uri2prefix); symtab_free_null(&xp2.prefixes); symtab_free_null(&xp2.uris); htable_free_null(&xp2.node2uri); hset_free_null(&xp2.attr_uris); return !ostream_has_ioerr(os); }
/* * Shutdown the UDP RPC layer. */ void urpc_close(void) { htable_foreach(pending, urpc_free_kv, NULL); htable_free_null(&pending); }