/** * Periodic heartbeat, to rotate the hash tables every half-life period. */ void dh_timer(time_t now) { htable_t *tmp; if (delta_time(now, last_rotation) < DH_HALF_LIFE) return; /* * Rotate the hash tables. */ tmp = by_muid; dh_table_clear(by_muid_old); by_muid = by_muid_old; by_muid_old = tmp; last_rotation = now; if (GNET_PROPERTY(dh_debug) > 19) { g_debug("DH rotated tables, current has %zu, old has %zu", htable_count(by_muid), htable_count(by_muid_old)); } }
/** * Clear all results from search. */ void search_gui_clear_search(search_t *search) { g_assert(search); g_assert(search->dups); search_gui_clear_tree(search); search_gui_clear_queue(search); g_assert(0 == hset_count(search->dups)); g_assert(0 == htable_count(search->parents)); }
void test_htable_erase() { htable *ht = htable_create(); htable_insert(ht, "one", "1st value"); htable_insert(ht, "two", "2nd value"); htable_erase(ht, "one"); assert(0 == htable_count(ht, "one") && "Bucket with key one should not exist."); htable_destroy(ht); }
/** @brief API to get the amount of streams stored in a session **/ unsigned int ntoh_tcp_count_streams ( pntoh_tcp_session_t session ) { unsigned int ret = 0; if ( !session ) return ret; lock_access( &session->lock ); ret = htable_count ( session->streams ); unlock_access( &session->lock ); return ret; }
/** * @return amount of items held in map. */ size_t map_count(const map_t *m) { map_check(m); switch (m->type) { case MAP_HASH: return htable_count(m->u.ht); case MAP_ORDERED_HASH: return ohash_table_count(m->u.ot); case MAP_PATRICIA: return patricia_count(m->u.pt); case MAP_MAXTYPE: g_assert_not_reached(); } return 0; }
/** * Dump the links sorted by decreasing leak size. */ G_GNUC_COLD void leak_dump(const leak_set_t *ls) { int count; struct filler filler; int i; leak_set_check(ls); count = htable_count(ls->stacks); if (count == 0) goto leaks_by_place; /* * Linearize hash table into an array before sorting it by * decreasing leak size. */ filler.leaks = xpmalloc(sizeof(struct leak) * count); filler.count = count; filler.idx = 0; filler.kt = LEAK_KEY_STACK; htable_foreach(ls->stacks, fill_array, &filler); xqsort(filler.leaks, count, sizeof(struct leak), leak_size_cmp); /* * Dump the leaks by allocation place. */ g_warning("leak summary by stackframe and total decreasing size:"); g_warning("distinct calling stacks found: %d", count); for (i = 0; i < count; i++) { struct leak *l = &filler.leaks[i]; size_t avg = l->lr->size / (0 == l->lr->count ? 1 : l->lr->count); g_warning("%zu bytes (%zu block%s, average %zu byte%s) from:", l->lr->size, l->lr->count, l->lr->count == 1 ? "" : "s", avg, 1 == avg ? "" : "s"); stacktrace_atom_decorate(stderr, l->u.sa, STACKTRACE_F_ORIGIN | STACKTRACE_F_SOURCE); } xfree(filler.leaks); leaks_by_place: count = htable_count(ls->places); if (count == 0) return; /* * Linearize hash table into an array before sorting it by * decreasing leak size. */ filler.leaks = xpmalloc(sizeof(struct leak) * count); filler.count = count; filler.idx = 0; filler.kt = LEAK_KEY_PLACE; htable_foreach(ls->places, fill_array, &filler); xqsort(filler.leaks, count, sizeof(struct leak), leak_size_cmp); /* * Dump the leaks by allocation place. */ g_warning("leak summary by origin and total decreasing size:"); g_warning("distinct allocation points found: %d", count); for (i = 0; i < count; i++) { struct leak *l = &filler.leaks[i]; size_t avg = l->lr->size / (0 == l->lr->count ? 1 : l->lr->count); g_warning("%zu bytes (%zu block%s, average %zu byte%s) from \"%s\"", l->lr->size, l->lr->count, l->lr->count == 1 ? "" : "s", avg, 1 == avg ? "" : "s", l->u.place); } xfree(filler.leaks); }
/** * Do we have pending UDP RPCs? */ bool urpc_pending(void) { return 0 != htable_count(pending); }