/* Destroy an entire hash table */ int ht_destroy(struct hashtable *t) { unsigned int i; struct hashtable copy = *t; /* Free all the elements */ for (i = 0; i < t->size && t->used > 0; i++) { if (t->table[i] != NULL && t->table[i] != ht_free_element) { if (t->key_destructor) t->key_destructor(t->table[i]->key); if (t->val_destructor) t->val_destructor(t->table[i]->data); #ifndef AHT_USE_SLAB free(t->table[i]); #endif t->used--; } } #ifdef AHT_USE_SLAB slab_destroy(t->cache); #endif /* Free the table and the allocated cache structure */ free(t->table); #ifdef AHT_USE_SLAB free(t->cache); #endif /* Re-initialize the table */ ht_init(t); /* Restore methods */ t->hashf = copy.hashf; t->key_destructor = copy.key_destructor; t->val_destructor = copy.val_destructor; t->key_compare = copy.key_compare; return HT_OK; /* It can't fail ht_destroy never fails */ }
//Destroy all resources connected to a cache object void destroy_cache_obj(cache_real_obj *cache) { printf("Cache was destroyed.\n"); //Free buffer slab_destroy(cache->slab_manager); int i=0; for (i; i < cache->num_buckets; i++) LL_destroy(cache->buckets[i]); free(cache); }
void slab_free(void* ptr) { // Null pointer check if (knot_unlikely(!ptr)) { return; } // Get slab start address slab_t* slab = slab_from_ptr(ptr); assert(slab); // Check if it exists in directory if (slab->magic == SLAB_MAGIC) { // Return buf to slab *((void**)ptr) = (void*)slab->head; slab->head = (void**)ptr; ++slab->bufs_free; #ifdef MEM_DEBUG // Increment statistics __sync_add_and_fetch(&slab->cache->stat_frees, 1); #endif // Return to partial if(knot_unlikely(slab->bufs_free == 1)) { slab_list_move(&slab->cache->slabs_free, slab); } else { #ifdef MEM_SLAB_CAP // Recycle if empty if(knot_unlikely(slab_isempty(slab))) { if(slab->cache->empty == MEM_SLAB_CAP) { slab_destroy(&slab); } else { ++slab->cache->empty; } } #endif } } else { // Pointer is not a slab // Presuming it's a large block slab_obj_t* bs = (slab_obj_t*)ptr - 1; #ifdef MEM_POISON // Remove memory barrier mprotect(ptr + bs->size, sizeof(int), PROT_READ|PROT_WRITE); #endif // Unmap dbg_mem("%s: unmapping large block of %zu bytes at %p\n", __func__, bs->size, ptr); free(bs); } }
/*! * \brief Free all slabs from a slab cache. * \return Number of freed slabs. */ static inline int slab_cache_free_slabs(slab_t* slab) { int count = 0; while (slab) { slab_t* next = slab->next; slab_destroy(&slab); ++count; slab = next; } return count; }
int slab_cache_reap(slab_cache_t* cache) { // For now, just free empty slabs slab_t* slab = cache->slabs_free; int count = 0; while (slab) { slab_t* next = slab->next; if (slab_isempty(slab)) { slab_destroy(&slab); ++count; } slab = next; } cache->empty = 0; return count; }
static void ilo_context_destroy(struct pipe_context *pipe) { struct ilo_context *ilo = ilo_context(pipe); ilo_state_vector_cleanup(&ilo->state_vector); if (ilo->uploader) u_upload_destroy(ilo->uploader); if (ilo->blitter) ilo_blitter_destroy(ilo->blitter); if (ilo->render) ilo_render_destroy(ilo->render); if (ilo->shader_cache) ilo_shader_cache_destroy(ilo->shader_cache); if (ilo->cp) ilo_cp_destroy(ilo->cp); slab_destroy(&ilo->transfer_mempool); FREE(ilo); }