/** * Launch idle events for the queue. */ static void cq_run_idle(cqueue_t *cq) { cqueue_check(cq); if (cq->cq_idle != NULL) { mutex_lock(&cq->cq_idle_lock); hset_foreach_remove(cq->cq_idle, cq_idle_trampoline, NULL); mutex_unlock(&cq->cq_idle_lock); } }
/** * Unregister callbacks in the backend and clean up. */ G_GNUC_COLD void nodes_gui_shutdown(void) { GtkCList *clist; clist = GTK_CLIST(gui_main_window_lookup("clist_nodes")); clist_save_visibility(clist, PROP_NODES_COL_VISIBLE); clist_save_widths(clist, PROP_NODES_COL_WIDTHS); guc_node_remove_node_added_listener(nodes_gui_node_added); guc_node_remove_node_removed_listener(nodes_gui_node_removed); guc_node_remove_node_info_changed_listener(nodes_gui_node_info_changed); guc_node_remove_node_flags_changed_listener(nodes_gui_node_flags_changed); hset_foreach_remove(hs_node_info_changed, free_node_id, NULL); hset_free_null(&hs_node_info_changed); hset_foreach_remove(hs_node_flags_changed, free_node_id, NULL); hset_free_null(&hs_node_flags_changed); nodes_gui_remove_all_nodes(); }
/** * Free the callout queue and all contained event objects. */ static void cq_free(cqueue_t *cq) { cevent_t *ev; cevent_t *ev_next; int i; struct chash *ch; cqueue_check(cq); if (cq->cq_current != NULL) { s_carp("%s(): %squeue \"%s\" still within cq_clock()", G_STRFUNC, CSUBQUEUE_MAGIC == cq->cq_magic ? "sub" : "", cq->cq_name); } mutex_lock(&cq->cq_lock); for (ch = cq->cq_hash, i = 0; i < HASH_SIZE; i++, ch++) { for (ev = ch->ch_head; ev; ev = ev_next) { ev_next = ev->ce_bnext; ev->ce_magic = 0; WFREE(ev); } } if (cq->cq_periodic) { hset_foreach_remove(cq->cq_periodic, cq_free_periodic, NULL); hset_free_null(&cq->cq_periodic); } if (cq->cq_idle) { hset_foreach_remove(cq->cq_idle, cq_free_idle, cq); hset_free_null(&cq->cq_idle); } XFREE_NULL(cq->cq_hash); atom_str_free_null(&cq->cq_name); /* * Unlocking the cq->cq_lock mutex (taken above) prevents a loud warning in * mutex_destroy() in case the mutex was already locked by our thread, * meaning we were already in cq_clock(). In that situation however, * we already warned upon entry, and therefore there is no need for a * second warning. * * If the mutex was not taken and someone else attempts to grab it at that * stage, there will be a slight window which fortunately will be loudly * detected by mutex_destroy(), as a case of a mutex being destroyed * whilst owned by another thread. * * No valid application code should attempt to sneak in at this stage to * grab that mutex anyway, so our logic is safe and we will be copiously * warned if something unexpected happens. * --RAM, 2012-12-04. */ mutex_unlock(&cq->cq_lock); mutex_destroy(&cq->cq_lock); mutex_destroy(&cq->cq_idle_lock); /* * If freeing a sub-queue, the object is a bit larger than a queue, * and we have more cleanup to do... */ if (CSUBQUEUE_MAGIC == cq->cq_magic) { cq_subqueue_free((struct csubqueue *) cq); } else { cq->cq_magic = 0; WFREE(cq); } }