/* * Do any necessary cleanup for the logs - See record-replay.h for full * routine header. */ void replay_term(global_state_t *g) { // Free memory for the record/replay log file name, if we've got one if (g->record_replay_file_name) __cilkrts_free(g->record_replay_file_name); // Per-worker cleanup for(int i = 0; i < g->total_workers; ++i) { __cilkrts_worker *w = g->workers[i]; // Close the log files, if we've opened them if(w->l->record_replay_fptr) fclose(w->l->record_replay_fptr); if (w->l->replay_list_root) { // We should have consumed the entire list CILK_ASSERT(ped_type_last == w->l->replay_list_entry->m_type); replay_entry_t *entry = w->l->replay_list_root; while (ped_type_last != entry->m_type) { // Free the pedigree memory for each entry entry->unload(); entry++; } __cilkrts_free(w->l->replay_list_root); w->l->replay_list_root = NULL; w->l->replay_list_entry = NULL; } } }
void cilk_fiber_pool_destroy(cilk_fiber_pool* pool) { CILK_ASSERT(cilk_fiber_pool_sanity_check(pool, "pool_destroy")); // Lock my own pool, if I need to. if (pool->lock) { spin_mutex_lock(pool->lock); } // Give any remaining fibers to parent pool. if (pool->parent) { cilk_fiber_pool_move_fibers_to_parent_pool(pool, 0); } // Unlock pool. if (pool->lock) { spin_mutex_unlock(pool->lock); } // If I have any left in my pool, just free them myself. // This method may acquire the pool lock. cilk_fiber_pool_free_fibers_from_pool(pool, 0, NULL); // Destroy the lock if there is one. if (pool->lock) { spin_mutex_destroy(pool->lock); } __cilkrts_free(pool->fibers); }
int cilk_fiber::remove_reference_from_thread() { int ref_count = dec_ref_count(); if (ref_count == 0) { cilk_fiber_sysdep* self = this->sysdep(); self->~cilk_fiber_sysdep(); __cilkrts_free(self); } return ref_count; }
// This destructor is called when a pthread dies to deallocate the // pedigree node. static void __cilkrts_pedigree_leaf_destructor(void* pedigree_tls_ptr) { __cilkrts_pedigree* pedigree_tls = (__cilkrts_pedigree*)pedigree_tls_ptr; if (pedigree_tls) { // Assert that we have either one or two nodes // left in the pedigree chain. // If we have more, then something is going wrong... CILK_ASSERT(!pedigree_tls->parent || !pedigree_tls->parent->parent); __cilkrts_free(pedigree_tls); } }
void __cilkrts_frame_malloc_global_cleanup(global_state_t *g) { struct pool_cons *c; if (g->frame_malloc.check_for_leaks) { size_t memory_in_global_list = count_memory_in_global_list(g); // TBD: This check is weak. Short of memory corruption, // I don't see how we have more memory in the free list // than allocated from the os. // Ideally, we should count the memory in the global free list // and check that we have it all. But I believe the runtime // itself also uses some memory, which is not being tracked. if (memory_in_global_list > g->frame_malloc.allocated_from_os) { __cilkrts_bug("\nError. The Cilk runtime data structures may have been corrupted.\n"); } } while ((c = g->frame_malloc.pool_list)) { g->frame_malloc.pool_list = c->cdr; __cilkrts_free(c->p); __cilkrts_free(c); } __cilkrts_mutex_destroy(0, &g->frame_malloc.lock); // Check that all the memory moved from the global pool into // workers has been returned to the global pool. if (g->frame_malloc.check_for_leaks && (g->frame_malloc.allocated_from_global_pool != 0)) { __cilkrts_bug("\n" "---------------------------" "\n" " MEMORY LEAK DETECTED!!! " "\n" "---------------------------" "\n" "\n" ); } }
/* * Free saved TBB interop memory. Should only be called when the thread is * not bound. */ void cilk_fiber_tbb_interop_free_stack_op_info(void) { __cilk_tbb_stack_op_thunk *saved_thunk = __cilkrts_get_tls_tbb_interop(); // If we haven't allocated a TBB interop index, we don't have any saved info if (NULL == saved_thunk) return; DBG_STACK_OPS ("tbb_interop_free_stack_op_info - freeing saved info\n"); // Free the memory and wipe out the TLS value __cilkrts_free(saved_thunk); __cilkrts_set_tls_tbb_interop(NULL); }
int cilk_fiber::deallocate_from_thread() { CILK_ASSERT(this->is_allocated_from_thread()); #if SUPPORT_GET_CURRENT_FIBER CILK_ASSERT(this == cilkos_get_tls_cilk_fiber()); // Reverse of "allocate_from_thread". cilkos_set_tls_cilk_fiber(NULL); #endif this->assert_ref_count_at_least(2); // Suspending the fiber should conceptually decrement the ref // count by 1. cilk_fiber_sysdep* self = this->sysdep(); self->convert_fiber_back_to_thread(); // Then, freeing the fiber itself decrements the ref count again. int ref_count = this->sub_from_ref_count(2); if (ref_count == 0) { self->~cilk_fiber_sysdep(); __cilkrts_free(self); } return ref_count; }
/** * Release any allocated resources */ void unload() { __cilkrts_free(m_reverse_pedigree); m_reverse_pedigree = NULL; }
void spin_mutex_destroy(struct spin_mutex *m) { __cilkrts_free(m); }
void cilk_fiber::deallocate_to_heap() { cilk_fiber_sysdep* self = this->sysdep(); self->~cilk_fiber_sysdep(); __cilkrts_free(self); }