int storage_trunk_destroy_ex(const bool bNeedSleep) { int result; if (trunk_init_flag != STORAGE_TRUNK_INIT_FLAG_DONE) { logWarning("file: "__FILE__", line: %d, " \ "trunk not inited!", __LINE__); return 0; } trunk_init_flag = STORAGE_TRUNK_INIT_FLAG_DESTROYING; if (bNeedSleep) { sleep(1); } logDebug("file: "__FILE__", line: %d, " \ "storage trunk destroy", __LINE__); result = storage_trunk_save(); avl_tree_destroy(&tree_info_by_size); trunk_free_block_checker_destroy(); fast_mblock_destroy(&free_blocks_man); fast_mblock_destroy(&tree_nodes_man); pthread_mutex_destroy(&trunk_file_lock); pthread_mutex_destroy(&trunk_mem_lock); trunk_init_flag = STORAGE_TRUNK_INIT_FLAG_NONE; return result; }
void flat_skiplist_destroy(FlatSkiplist *sl) { int i; FlatSkiplistNode *node; FlatSkiplistNode *deleted; if (sl->mblocks == NULL) { return; } if (sl->free_func != NULL) { node = sl->top->links[0]; while (node != sl->tail) { deleted = node; node = node->links[0]; sl->free_func(deleted->data); } } for (i=0; i<sl->level_count; i++) { fast_mblock_destroy(sl->mblocks + i); } free(sl->mblocks); sl->mblocks = NULL; }
static void region_destroy(struct fast_allocator_context *acontext, struct fast_region_info *region) { int element_size; struct fast_allocator_info *allocator; allocator = region->allocators; for (element_size=region->start+region->step; element_size<=region->end; element_size+=region->step,allocator++) { fast_mblock_destroy(&allocator->mblock); } free(region->allocators); region->allocators = NULL; }