void GCLargeAlloc::Free(const void *item) { LargeBlock *b = GetLargeBlock(item); #ifdef GCDEBUG // RCObject have contract that they must clean themselves, since they // have to scan themselves to decrement other RCObjects they might as well // clean themselves too, better than suffering a memset later if(b->rcobject) m_gc->RCObjectZeroCheck((RCObject*)GetUserPointer(item)); #endif // We can't allow free'ing something during Sweeping, otherwise alloc counters // get decremented twice and destructors will be called twice. GCAssert(m_gc->collecting == false || m_gc->marking == true); if (m_gc->marking && (m_gc->collecting || IsProtectedAgainstFree(b))) { m_gc->AbortFree(GetUserPointer(item)); return; } m_gc->policy.signalFreeWork(b->size); #ifdef MMGC_HOOKS GCHeap* heap = GCHeap::GetGCHeap(); if(heap->HooksEnabled()) { const void* p = GetUserPointer(item); size_t userSize = GC::Size(p); #ifdef MMGC_MEMORY_PROFILER if(heap->GetProfiler()) m_totalAskSize -= heap->GetProfiler()->GetAskSize(p); #endif heap->FinalizeHook(p, userSize); heap->FreeHook(p, userSize, uint8_t(GCHeap::GCFreedPoison)); } #endif if(b->flags[0] & kHasWeakRef) m_gc->ClearWeakRef(GetUserPointer(item)); LargeBlock **prev = &m_blocks; while(*prev) { if(b == *prev) { *prev = Next(b); size_t numBlocks = b->GetNumBlocks(); m_totalAllocatedBytes -= b->size; VALGRIND_MEMPOOL_FREE(b, b); VALGRIND_MEMPOOL_FREE(b, item); VALGRIND_DESTROY_MEMPOOL(b); m_gc->FreeBlock(b, (uint32_t)numBlocks, m_partitionIndex); return; } prev = (LargeBlock**)(&(*prev)->next); } GCAssertMsg(false, "Bad free!"); }
/*static*/ bool GCLargeAlloc::IsWhite(const void *item) { if(!IsLargeBlock(item)) return false; return (GetLargeBlock(item)->flags[0] & (kMark|kQueued)) == 0; }
/* static */ bool GCLargeAlloc::ConservativeGetMark(const void *item, bool bogusPointerReturnValue) { if(!IsLargeBlock(item)) return bogusPointerReturnValue; return (GetLargeBlock(item)->flags[0] & kMark) != 0; }