/** * Frees all memory allocated by the cache and returns NULL. */ static void adns_cache_free(adns_cache_t **cache_ptr) { adns_cache_t *cache = *cache_ptr; unsigned i; /* If adns is not used it will not be initialized */ if (NULL == cache) return; g_assert(cache); g_assert(cache->ht); for (i = 0; i < G_N_ELEMENTS(cache->entries); i++) { adns_cache_free_entry(cache, i); } hikset_free_null(&cache->ht); XFREE_NULL(cache); }
void G_COLD map_test(void) { sha1_t *keys; map_t *mh, *mp; int i; size_t count; int tests; struct { unsigned insertion, contains, removal; } faster = { 0, 0, 0}; bool verbose = common_stats > 1; if (common_stats <= 0) return; XMALLOC_ARRAY(keys, ITEM_COUNT); for (i = 0; i < ITEM_COUNT; i++) random_bytes(keys[i].data, SHA1_RAW_SIZE); mh = map_create_hash(sha1_hash, sha1_eq); mp = map_create_patricia(KEYBITS); timeit(test_map_insert, mh, keys, ITEM_COUNT, LOOPS, "map hash insertion", verbose); timeit(test_map_insert, mp, keys, ITEM_COUNT, LOOPS, "map PATRICIA insertion", verbose); map_destroy(mh); map_destroy(mp); for (tests = 0, count = ITEM_COUNT; count > 1; count /= 10) { double htime; double ptime; tests++; mh = map_create_hash(sha1_hash, sha1_eq); mp = map_create_patricia(KEYBITS); htime = timeit(test_map_insert, mh, keys, count, 1, "map hash reloading", verbose); ptime = timeit(test_map_insert, mp, keys, count, 1, "map PATRICIA reloading", verbose); if (verbose) g_info("PATRICIA insertion %s than hash with %zu items", ptime < htime ? "faster" : "slower", count); if (ptime < htime) faster.insertion++; htime = timeit(test_map_contains, mh, keys, count, LOOPS, "map hash contains", verbose); ptime = timeit(test_map_contains, mp, keys, count, LOOPS, "map PATRICIA contains", verbose); if (verbose) g_info("PATRICIA contains %s than hash with %zu items", ptime < htime ? "faster" : "slower", count); if (ptime < htime) faster.contains++; htime = timeit(test_map_remove, mh, keys, count, 1, "map hash remove", verbose); ptime = timeit(test_map_remove, mp, keys, count, 1, "map PATRICIA remove", verbose); if (verbose) g_info("PATRICIA remove %s than hash with %zu items", ptime < htime ? "faster" : "slower", count); if (ptime < htime) faster.removal++; map_destroy(mh); map_destroy(mp); } if (faster.insertion) g_info("PATRICIA insert was faster than hash in %d out of %d tests", faster.insertion, tests); if (faster.contains) g_info( "PATRICIA contains was faster than hash in %d out of %d tests", faster.contains, tests); if (faster.removal) g_info("PATRICIA remove was faster than hash in %d out of %d tests", faster.removal, tests); XFREE_NULL(keys); }
/** * Free the callout queue and all contained event objects. */ static void cq_free(cqueue_t *cq) { cevent_t *ev; cevent_t *ev_next; int i; struct chash *ch; cqueue_check(cq); if (cq->cq_current != NULL) { s_carp("%s(): %squeue \"%s\" still within cq_clock()", G_STRFUNC, CSUBQUEUE_MAGIC == cq->cq_magic ? "sub" : "", cq->cq_name); } mutex_lock(&cq->cq_lock); for (ch = cq->cq_hash, i = 0; i < HASH_SIZE; i++, ch++) { for (ev = ch->ch_head; ev; ev = ev_next) { ev_next = ev->ce_bnext; ev->ce_magic = 0; WFREE(ev); } } if (cq->cq_periodic) { hset_foreach_remove(cq->cq_periodic, cq_free_periodic, NULL); hset_free_null(&cq->cq_periodic); } if (cq->cq_idle) { hset_foreach_remove(cq->cq_idle, cq_free_idle, cq); hset_free_null(&cq->cq_idle); } XFREE_NULL(cq->cq_hash); atom_str_free_null(&cq->cq_name); /* * Unlocking the cq->cq_lock mutex (taken above) prevents a loud warning in * mutex_destroy() in case the mutex was already locked by our thread, * meaning we were already in cq_clock(). In that situation however, * we already warned upon entry, and therefore there is no need for a * second warning. * * If the mutex was not taken and someone else attempts to grab it at that * stage, there will be a slight window which fortunately will be loudly * detected by mutex_destroy(), as a case of a mutex being destroyed * whilst owned by another thread. * * No valid application code should attempt to sneak in at this stage to * grab that mutex anyway, so our logic is safe and we will be copiously * warned if something unexpected happens. * --RAM, 2012-12-04. */ mutex_unlock(&cq->cq_lock); mutex_destroy(&cq->cq_lock); mutex_destroy(&cq->cq_idle_lock); /* * If freeing a sub-queue, the object is a bit larger than a queue, * and we have more cleanup to do... */ if (CSUBQUEUE_MAGIC == cq->cq_magic) { cq_subqueue_free((struct csubqueue *) cq); } else { cq->cq_magic = 0; WFREE(cq); } }