void idnode_done(void) { idclass_link_t *il; while ((il = RB_FIRST(&idclasses)) != NULL) { RB_REMOVE(&idclasses, il, link); free(il); } while ((il = RB_FIRST(&idrootclasses)) != NULL) { RB_REMOVE(&idrootclasses, il, link); free(il); } SKEL_FREE(idclasses_skel); }
/* Get language element */ lang_str_ele_t *lang_str_get2 ( lang_str_t *ls, const char *lang ) { int i; const char **langs; lang_str_ele_t skel, *e = NULL; if (!ls) return NULL; /* Check config/requested langs */ if ((langs = lang_code_split(lang))) { i = 0; while (langs[i]) { skel.lang = langs[i]; if ((e = RB_FIND(ls, &skel, link, _lang_cmp))) break; i++; } free(langs); } /* Use first available */ if (!e) e = RB_FIRST(ls); /* Return */ return e; }
/* * Generate a REDO SYNC record. At least one such record must be generated * in the nominal recovery span for the recovery code to be able to run * REDOs outside of the span. * * The SYNC record contains the aggregate earliest UNDO/REDO FIFO offset * for all inodes with active REDOs. This changes dynamically as inodes * get flushed. * * During recovery stage2 any new flush cycles must specify the original * redo sync offset. That way a crash will re-run the REDOs, at least * up to the point where the UNDO FIFO does not overwrite the area. */ void hammer_generate_redo_sync(hammer_transaction_t trans) { hammer_mount_t hmp = trans->hmp; hammer_inode_t ip; hammer_off_t redo_fifo_start; if (hmp->flags & HAMMER_MOUNT_REDO_RECOVERY_RUN) { ip = NULL; redo_fifo_start = hmp->recover_stage2_offset; } else { ip = RB_FIRST(hammer_redo_rb_tree, &hmp->rb_redo_root); if (ip) redo_fifo_start = ip->redo_fifo_start; else redo_fifo_start = 0; } if (redo_fifo_start) { if (hammer_debug_io & 0x0004) { kprintf("SYNC IP %p %016jx\n", ip, (intmax_t)redo_fifo_start); } hammer_generate_redo(trans, NULL, redo_fifo_start, HAMMER_REDO_SYNC, NULL, 0); trans->hmp->flags |= HAMMER_MOUNT_REDO_SYNC; } }
void api_done ( void ) { api_link_t *t; while ((t = RB_FIRST(&api_hook_tree)) != NULL) { RB_REMOVE(&api_hook_tree, t, link); free(t); } SKEL_FREE(api_skel); }
static void lang_code_free( lang_code_lookup_t *l ) { lang_code_lookup_element_t *element; if (l == NULL) return; while ((element = RB_FIRST(l)) != NULL) { RB_REMOVE(l, element, link); free(element); } free(l); }
/* Destroy (free memory) */ void lang_str_destroy ( lang_str_t *ls ) { lang_str_ele_t *e; if (ls == NULL) return; while ((e = RB_FIRST(ls))) { if (e->str) free(e->str); RB_REMOVE(ls, e, link); free(e); } free(ls); }
void idnode_done(void) { idclass_link_t *il; pthread_cond_signal(&idnode_cond); pthread_join(idnode_tid, NULL); pthread_mutex_lock(&idnode_mutex); htsmsg_destroy(idnode_queue); idnode_queue = NULL; pthread_mutex_unlock(&idnode_mutex); while ((il = RB_FIRST(&idclasses)) != NULL) { RB_REMOVE(&idclasses, il, link); free(il); } while ((il = RB_FIRST(&idrootclasses)) != NULL) { RB_REMOVE(&idrootclasses, il, link); free(il); } SKEL_FREE(idclasses_skel); }
void intlconv_done( void ) { intlconv_cache_t *ic; pthread_mutex_lock(&intlconv_lock); intlconv_last_ic = NULL; while ((ic = RB_FIRST(&intlconv_all)) != NULL) { iconv_close(ic->ic_handle); free(ic->ic_charset_id); RB_REMOVE(&intlconv_all, ic, ic_link); free(ic); } intlconv_last_src_ic = NULL; while ((ic = RB_FIRST(&intlconv_src_all)) != NULL) { iconv_close(ic->ic_handle); free(ic->ic_charset_id); RB_REMOVE(&intlconv_src_all, ic, ic_link); free(ic); } pthread_mutex_unlock(&intlconv_lock); }