/** * Destroys the UDP TX scheduler, which must no longer be attached to anything. */ void udp_sched_free(udp_sched_t *us) { udp_sched_check(us); unsigned i; /* * TX stacks are asynchronously collected, so we need to force collection * now to make sure nobody references us any longer. */ tx_collect(); g_assert(0 == hash_list_length(us->stacks)); for (i = 0; i < N_ITEMS(us->lifo); i++) { udp_sched_drop_all(us, &us->lifo[i]); } udp_sched_tx_release(us); udp_sched_seen_clear(us); pool_free(us->txpool); hset_free_null(&us->seen); hash_list_free(&us->stacks); udp_sched_clear_sockets(us); us->magic = 0; WFREE(us); }
/** * Callout queue callback fired when waiting event times out. */ static void wq_timed_out(cqueue_t *cq, void *arg) { wq_event_t *we = arg; hash_list_t *hl; wq_status_t status; wq_event_check(we); g_assert(we->tm != NULL); cq_zero(cq, &we->tm->timeout_ev); hl = htable_lookup(waitqueue, we->key); g_assert(hl != NULL); /* * Invoke the callback with the sentinel data signalling a timeout. */ status = (*we->cb)(we->arg, WQ_TIMED_OUT); /* * When the callback returns WQ_SLEEP, we re-instantiate the initial * timeout. * * Otherwise the event is discarded (removed from the wait queue) and * the callback will never be invoked again for this event. */ switch (status) { case WQ_SLEEP: we->tm->timeout_ev = cq_main_insert(we->tm->delay, wq_timed_out, we); return; case WQ_EXCLUSIVE: s_critical("weird status WQ_EXCLUSIVE on timeout invocation of %s()", stacktrace_function_name(we->cb)); /* FALL THROUGH */ case WQ_REMOVE: hash_list_remove(hl, we); /* * Cleanup the table if it ends-up being empty. */ if (0 == hash_list_length(hl)) { hash_list_free(&hl); htable_remove(waitqueue, we->key); } wq_event_free(we); return; } g_assert_not_reached(); }
/** * Final cleanup when application terminates. */ void udp_close(void) { if (udp_pings) { udp_ping_expire(TRUE); hash_list_free(&udp_pings); } cq_periodic_remove(&udp_ping_ev); aging_destroy(&udp_aging_pings); }
/** * Free data structures used by the page cache. */ static void free_cache(struct lru_cache *cache) { hash_list_free(&cache->used); slist_free(&cache->available); htable_free_null(&cache->pagnum); VMM_FREE_NULL(cache->arena, cache->pages * DBM_PBLKSIZ); WFREE_ARRAY_NULL(cache->numpag, cache->pages); WFREE_NULL(cache->dirty, cache->pages); cache->pages = cache->next = 0; }
/** * Free data structures used by the page cache. */ static void free_cache(struct lru_cache *cache) { hash_list_free(&cache->used); slist_free(&cache->available); gm_hash_table_destroy_null(&cache->pagnum); VMM_FREE_NULL(cache->arena, cache->pages * DBM_PBLKSIZ); WFREE_NULL(cache->numpag, cache->pages * sizeof(long)); WFREE_NULL(cache->dirty, cache->pages); cache->pages = cache->next = 0; }
/** * Hash table iterator to free registered waiting events. */ static void wq_free_kv(const void *unused_key, void *value, void *unused_data) { hash_list_t *hl = value; (void) unused_key; (void) unused_data; hash_list_foreach_remove(hl, wq_free_waiting, NULL); hash_list_free(&hl); }
/** * Destroy the DBM wrapper, optionally closing the underlying DB map. */ void dbmw_destroy(dbmw_t *dw, bool close_map) { dbmw_check(dw); if (common_stats) { s_debug("DBMW destroying \"%s\" with %s back-end " "(read cache hits = %.2f%% on %s request%s, " "write cache hits = %.2f%% on %s request%s)", dw->name, dbmw_map_type(dw) == DBMAP_SDBM ? "sdbm" : "map", dw->r_hits * 100.0 / MAX(1, dw->r_access), uint64_to_string(dw->r_access), plural(dw->r_access), dw->w_hits * 100.0 / MAX(1, dw->w_access), uint64_to_string2(dw->w_access), plural(dw->w_access)); } if (dbg_ds_debugging(dw->dbg, 1, DBG_DSF_DESTROY)) { dbg_ds_log(dw->dbg, dw, "%s: with %s back-end " "(read cache hits = %.2f%% on %s request%s, " "write cache hits = %.2f%% on %s request%s)", G_STRFUNC, dbmw_map_type(dw) == DBMAP_SDBM ? "sdbm" : "map", dw->r_hits * 100.0 / MAX(1, dw->r_access), uint64_to_string(dw->r_access), plural(dw->r_access), dw->w_hits * 100.0 / MAX(1, dw->w_access), uint64_to_string2(dw->w_access), plural(dw->w_access)); } /* * If we close the map and we're volatile, there's no need to flush * the cache as the data is going to be gone soon anyway. */ if (!close_map || !dw->is_volatile) { dbmw_sync(dw, DBMW_SYNC_CACHE); } dbmw_clear_cache(dw); hash_list_free(&dw->keys); map_destroy(dw->values); if (dw->mb) pmsg_free(dw->mb); bstr_free(&dw->bs); if (close_map) dbmap_destroy(dw->dm); WFREE_TYPE_NULL(dw->dbmap_dbg); dw->magic = 0; WFREE(dw); }
/** * Cleanup during process termination. */ void G_COLD uhc_close(void) { cq_cancel(&uhc_ctx.timeout_ev); uhc_connecting = FALSE; if (uhc_list) { struct uhc *uhc; while (NULL != (uhc = hash_list_shift(uhc_list))) { uhc_free(&uhc); } hash_list_free(&uhc_list); } }
/** * Dispose of all the items remaining in the list, applying the supplied * free callback on all the items, then freeing the hash_list_t container * and nullifying its pointer. */ void hash_list_free_all(hash_list_t **hl_ptr, free_fn_t freecb) { g_assert(hl_ptr != NULL); g_assert(freecb != NULL); if (*hl_ptr != NULL) { hash_list_t *hl = *hl_ptr; hash_list_check(hl); elist_foreach(&hl->list, hash_list_freecb_wrapper, cast_func_to_pointer(freecb)); hash_list_free(hl_ptr); } }
/** * Notify wake-up condition to sleepers on the key. * * @param key the rendez-vous point * @param data additional data to supply to woken-up parties */ void wq_wakeup(const void *key, void *data) { hash_list_t *hl; hl = htable_lookup(waitqueue, key); if (hl != NULL) { wq_notify(hl, data); /* * Cleanup the table if it ends-up being empty. */ if (0 == hash_list_length(hl)) { hash_list_free(&hl); htable_remove(waitqueue, key); } } }
/** * Clear all the upload stats data structure. */ static G_GNUC_COLD void upload_stats_free_all(void) { if (upload_stats_list) { struct ul_stats *s; while (NULL != (s = hash_list_head(upload_stats_list))) { hash_list_remove(upload_stats_list, s); atom_str_free_null(&s->pathname); atom_str_free_null(&s->filename); if (s->sha1) g_hash_table_remove(upload_stats_by_sha1, s->sha1); atom_sha1_free_null(&s->sha1); WFREE(s); } hash_list_free(&upload_stats_list); gm_hash_table_destroy_null(&upload_stats_by_sha1); } dirty = TRUE; }
/** * Remove an event from the queue. */ static void wq_remove(wq_event_t *we) { hash_list_t *hl; wq_event_check(we); hl = htable_lookup(waitqueue, we->key); if (NULL == hl) { s_critical("attempt to remove event %s() on unknown key %p", stacktrace_function_name(we->cb), we->key); } if (NULL == hash_list_remove(hl, we)) { s_critical("attempt to remove unknown event %s() on %p", stacktrace_function_name(we->cb), we->key); } else if (0 == hash_list_length(hl)) { hash_list_free(&hl); htable_remove(waitqueue, we->key); } wq_event_free(we); }
/** * Destroy the DBM wrapper, optionally closing the underlying DB map. */ void dbmw_destroy(dbmw_t *dw, gboolean close_map) { dbmw_check(dw); if (common_stats) g_debug("DBMW destroying \"%s\" with %s back-end " "(read cache hits = %.2f%% on %s request%s, " "write cache hits = %.2f%% on %s request%s)", dw->name, dbmw_map_type(dw) == DBMAP_SDBM ? "sdbm" : "map", dw->r_hits * 100.0 / MAX(1, dw->r_access), uint64_to_string(dw->r_access), 1 == dw->r_access ? "" : "s", dw->w_hits * 100.0 / MAX(1, dw->w_access), uint64_to_string2(dw->w_access), 1 == dw->w_access ? "" : "s"); /* * If we close the map and we're volatile, there's no need to flush * the cache as the data is going to be gone soon anyway. */ if (!close_map || !dw->is_volatile) { dbmw_sync(dw, DBMW_SYNC_CACHE); } dbmw_clear_cache(dw); hash_list_free(&dw->keys); map_destroy(dw->values); if (dw->mb) pmsg_free(dw->mb); bstr_free(&dw->bs); if (close_map) dbmap_destroy(dw->dm); dw->magic = 0; WFREE(dw); }