void GCAlloc::CheckMarks() { GCBlock *b = m_firstBlock; while (b) { GCBlock *next = Next(b); GCAssertMsg(!b->needsSweeping, "All needsSweeping should have been swept at this point."); // TODO: MMX version for IA32 uint32_t *bits = b->GetBits(); uint32_t count = b->nextItem ? GetIndex(b, b->nextItem) : m_itemsPerBlock; // round up to eight uint32_t numInts = ((count+7)&~7) >> 3; for(uint32_t i=0; i < numInts; i++) { uint32_t marks = bits[i]; // hmm, is it better to screw around with exact counts or just examine // 8 items on each pass, with the later we open the door to unrolling uint32_t subCount = i==(numInts-1) ? ((count-1)&7)+1 : 8; for(uint32_t j=0; j<subCount;j++,marks>>=4) { uint32_t m = marks&kFreelist; GCAssertMsg(m == 0 || m == kFreelist, "All items should be free or clear, nothing should be marked or queued."); } } // Advance to next block b = next; } }
static bool CommitMemory(void* address, size_t size) { address = VirtualAlloc(address, size, MEM_COMMIT #ifdef _WIN64 | MEM_TOP_DOWN #endif //#ifdef _WIN64 , PAGE_READWRITE); #ifdef _DEBUG if (address == NULL) { MEMORY_BASIC_INFORMATION mbi; VirtualQuery(address, &mbi, sizeof(MEMORY_BASIC_INFORMATION)); LPVOID lpMsgBuf; FormatMessage( FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, GetLastError(), MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // Default language (LPTSTR) &lpMsgBuf, 0, NULL ); GCAssertMsg(false, (const char*)lpMsgBuf); //todo rishit } #endif //_DEBUG return address != NULL; }
bool InitDbgHelp() { static vmpi_spin_lock_t lock; static bool inited = false; // We must hold the lock for the entire initialization process: // - if we set inited to true and Release the lock then other // threads may forge ahead without initialization having occured // - if we leave it false and Release then other threads // may try to perform initialization as well. MMGC_LOCK(lock); if(!inited) { #ifndef UNDER_CE if(!g_DbgHelpDll.m_SymInitialize || !(*g_DbgHelpDll.m_SymInitialize)(GetCurrentProcess(), NULL, true)) { LPVOID lpMsgBuf; if(FormatMessage( FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, GetLastError(), MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // Default language (LPTSTR) &lpMsgBuf, 0, NULL )) { GCAssertMsg(false, "See lpMsgBuf"); LocalFree(lpMsgBuf); } return false; } #endif // ifn UNDER_CE inited = true; } return true; }
void GCLargeAlloc::Free(const void *item) { LargeBlock *b = GetLargeBlock(item); #ifdef GCDEBUG // RCObject have contract that they must clean themselves, since they // have to scan themselves to decrement other RCObjects they might as well // clean themselves too, better than suffering a memset later if(b->rcobject) m_gc->RCObjectZeroCheck((RCObject*)GetUserPointer(item)); #endif // We can't allow free'ing something during Sweeping, otherwise alloc counters // get decremented twice and destructors will be called twice. GCAssert(m_gc->collecting == false || m_gc->marking == true); if (m_gc->marking && (m_gc->collecting || IsProtectedAgainstFree(b))) { m_gc->AbortFree(GetUserPointer(item)); return; } m_gc->policy.signalFreeWork(b->size); #ifdef MMGC_HOOKS GCHeap* heap = GCHeap::GetGCHeap(); if(heap->HooksEnabled()) { const void* p = GetUserPointer(item); size_t userSize = GC::Size(p); #ifdef MMGC_MEMORY_PROFILER if(heap->GetProfiler()) m_totalAskSize -= heap->GetProfiler()->GetAskSize(p); #endif heap->FinalizeHook(p, userSize); heap->FreeHook(p, userSize, uint8_t(GCHeap::GCFreedPoison)); } #endif if(b->flags[0] & kHasWeakRef) m_gc->ClearWeakRef(GetUserPointer(item)); LargeBlock **prev = &m_blocks; while(*prev) { if(b == *prev) { *prev = Next(b); size_t numBlocks = b->GetNumBlocks(); m_totalAllocatedBytes -= b->size; VALGRIND_MEMPOOL_FREE(b, b); VALGRIND_MEMPOOL_FREE(b, item); VALGRIND_DESTROY_MEMPOOL(b); m_gc->FreeBlock(b, (uint32_t)numBlocks, m_partitionIndex); return; } prev = (LargeBlock**)(&(*prev)->next); } GCAssertMsg(false, "Bad free!"); }
void* FixedAlloc::Alloc(size_t size, FixedMallocOpts flags) { void *item = InlineAllocSansHook(size, flags); GCAssertMsg(item != NULL || (flags&kCanFail), "NULL is only valid when kCanFail is set"); #ifdef MMGC_HOOKS InlineAllocHook(size, item); #endif return item; }
void VerifyTaggedScalar(void* p) { if (!IsScalarAllocation(p)) { if (IsArrayAllocation(p, true) || IsArrayAllocation(p, false)) { GCAssertMsg(0, "Trying to release array pointer with scalar destructor! Check the allocation and free calls for this object!"); } else if (!IsGCHeapAllocation(p)) { GCAssertMsg(0, "Trying to release system memory with scalar deletefunc! Check the allocation and free calls for this object!"); } else { GCAssertMsg(0, "Trying to release funky memory with scalar deletefunc! Check the allocation and free calls for this object!"); } } }
/* static */ void GCAlloc::Free(const void *item) { GCBlock *b = GetBlock(item); GCAlloc *a = b->alloc; #ifdef MMGC_HOOKS GCHeap* heap = GCHeap::GetGCHeap(); if(heap->HooksEnabled()) { const void* p = GetUserPointer(item); size_t userSize = GC::Size(p); #ifdef MMGC_MEMORY_PROFILER if(heap->GetProfiler()) a->m_totalAskSize -= heap->GetProfiler()->GetAskSize(p); #endif heap->FinalizeHook(p, userSize); heap->FreeHook(p, userSize, 0xca); } #endif #ifdef _DEBUG // check that its not already been freed void *free = b->firstFree; while(free) { GCAssert(free != item); free = *((void**) free); } #endif int index = GetIndex(b, item); if(GetBit(b, index, kHasWeakRef)) { b->gc->ClearWeakRef(GetUserPointer(item)); } bool wasFull = b->IsFull(); if(b->needsSweeping) { #ifdef _DEBUG bool gone = #endif a->Sweep(b); GCAssertMsg(!gone, "How can a page I'm about to free an item on be empty?"); wasFull = false; } if(wasFull) { a->AddToFreeList(b); } b->FreeItem(item, index); if(b->numItems == 0) { a->UnlinkChunk(b); a->FreeChunk(b); } }
void VerifyTaggedArray(void* p, bool primitive) { if (!IsArrayAllocation(p, primitive)) { if (IsArrayAllocation(p, !primitive)) { GCAssertMsg(0, "Trying to release array pointer with different type destructor! Check the allocation and free calls for this object!"); } if (IsScalarAllocation(p)) { GCAssertMsg(0, "Trying to release scalar pointer with vector destructor! Check the allocation and free calls for this object!"); } else if (!IsGCHeapAllocation(p)) { GCAssertMsg(0, "Trying to release system pointer with vector deletefunc! Check the allocation and free calls for this object!"); } else { GCAssertMsg(0, "Trying to release funky memory with vector deletefunc! Check the allocation and free calls for this object!"); } } }
/*static*/ void FixedAlloc::Free(void *item) { FixedBlock *b = (FixedBlock*) ((uintptr_t)item & ~0xFFF); GCAssertMsg(b->alloc->m_heap->IsAddressInHeap(item), "Bogus pointer passed to free"); #ifdef MMGC_HOOKS GCHeap *heap = b->alloc->m_heap; if(heap->HooksEnabled()) { #ifdef MMGC_MEMORY_PROFILER if(heap->GetProfiler()) b->alloc->m_totalAskSize -= heap->GetProfiler()->GetAskSize(item); #endif heap->FinalizeHook(item, b->size - DebugSize()); heap->FreeHook(item, b->size - DebugSize(), 0xed); } #endif item = GetRealPointer(item); // Add this item to the free list *((void**)item) = b->firstFree; b->firstFree = item; // We were full but now we have a free spot, add us to the free block list. if (b->numAlloc == b->alloc->m_itemsPerBlock) { GCAssert(!b->nextFree && !b->prevFree); b->nextFree = b->alloc->m_firstFree; if (b->alloc->m_firstFree) b->alloc->m_firstFree->prevFree = b; b->alloc->m_firstFree = b; } #ifdef _DEBUG else // we should already be on the free list { GCAssert ((b == b->alloc->m_firstFree) || b->prevFree); } #endif b->numAlloc--; if(b->numAlloc == 0) { b->alloc->FreeChunk(b); } }
void* NewTaggedArray(size_t count, size_t elsize, FixedMallocOpts opts, bool isPrimitive) { GCAssertMsg(GCHeap::GetGCHeap()->IsStackEntered() || (opts&kCanFail) != 0, "MMGC_ENTER macro must exist on the stack"); size_t size = GCHeap::CheckForCallocSizeOverflow(count, elsize); if(!isPrimitive) size = GCHeap::CheckForAllocSizeOverflow(size, MMGC_ARRAYHEADER_SIZE); void *p = TaggedAlloc(size, opts, MMGC_NORM_ARRAY_GUARD + uint32_t(isPrimitive)); if (!isPrimitive && p != NULL) { *(size_t*)p = count; p = (char*)p + MMGC_ARRAYHEADER_SIZE; } return p; }
GCAlloc::~GCAlloc() { CoalesceQuickList(); // Free all of the blocks GCAssertMsg(GetNumAlloc() == 0, "You have leaks"); while (m_firstBlock) { #ifdef MMGC_MEMORY_INFO //check where any item within this block wasn't written to after being poisoned VerifyFreeBlockIntegrity(m_firstBlock->firstFree, m_firstBlock->size); #endif //MMGC_MEMORY_INFO GCBlock *b = m_firstBlock; UnlinkChunk(b); FreeChunk(b); } }
void GCLargeAlloc::Free(const void *item) { LargeBlock *b = GetBlockHeader(item); if(b->flags & kHasWeakRef) b->gc->ClearWeakRef(GetUserPointer(item)); LargeBlock **prev = &m_blocks; while(*prev) { if(b == *prev) { *prev = b->next; m_gc->FreeBlock(b, b->GetNumBlocks()); return; } prev = &(*prev)->next; } GCAssertMsg(false, "Bad free!"); }
GCWorkItem *GCMarkStack::GetItemAbove(GCWorkItem *item) { if(item == Peek()) return NULL; GCStackSegment *seg = m_topSegment; GCStackSegment *last = NULL; while(seg) { if(item >= seg->m_items && item < seg->m_items + kMarkStackItems) { if(item+1 == seg->m_items + kMarkStackItems) { // The two items spanned a segment, above is first item in next // segment (or "last" in the backwards traversal sense). return &last->m_items[0]; } else { return item+1; } } last = seg; seg = seg->m_prev; } GCAssertMsg(false, "Invalid attempt to get the item above an item not in the stack."); return NULL; }
void FixedMalloc::EnsureFixedMallocMemory(const void* item) { // For a discussion of this flag, see bugzilla 564878. if (!m_heap->config.checkFixedMemory()) return; for (int i=0; i<kNumSizeClasses; i++) if (m_allocs[i].QueryOwnsObject(item)) return; #ifdef AVMPLUS_SAMPLER if (m_heap->SafeSize(GetRealPointer(item)) != (size_t)-1) return; #else { MMGC_LOCK(m_largeObjectLock); for ( LargeObject* lo=largeObjects; lo != NULL ; lo=lo->next) if (lo->item == item) return; } #endif GCAssertMsg(false, "Trying to delete an object with FixedMalloc::Free that was not allocated with FixedMalloc::Alloc"); }
REALLY_INLINE void* NewTaggedScalar(size_t size, FixedMallocOpts opts) { GCAssertMsg(GCHeap::GetGCHeap()->IsStackEntered() || (opts&kCanFail) != 0, "MMGC_ENTER macro must exist on the stack"); return TaggedAlloc(size, opts, MMGC_SCALAR_GUARD); }
void* FixedAlloc::Alloc(size_t size, FixedMallocOpts opts) { (void)size; GCAssertMsg(m_heap->StackEnteredCheck() || (opts&kCanFail) != 0, "MMGC_ENTER must be on the stack"); GCAssertMsg(((size_t)m_itemSize >= size), "allocator itemsize too small"); if(!m_firstFree) { bool canFail = (opts & kCanFail) != 0; CreateChunk(canFail); if(!m_firstFree) { if (!canFail) { GCAssertMsg(0, "Memory allocation failed to abort properly"); GCHeap::SignalInconsistentHeapState("Failed to abort"); /*NOTREACHED*/ } return NULL; } } FixedBlock* b = m_firstFree; GCAssert(b && !IsFull(b)); b->numAlloc++; // Consume the free list if available void *item = NULL; if (b->firstFree) { item = b->firstFree; b->firstFree = *((void**)item); // assert that the freelist hasn't been tampered with (by writing to the first 4 bytes) GCAssert(b->firstFree == NULL || (b->firstFree >= b->items && (((uintptr_t)b->firstFree - (uintptr_t)b->items) % b->size) == 0 && (uintptr_t) b->firstFree < ((uintptr_t)b & ~0xfff) + GCHeap::kBlockSize)); #ifdef MMGC_MEMORY_INFO //check for writes on deleted memory VerifyFreeBlockIntegrity(item, b->size); #endif } else { // Take next item from end of block item = b->nextItem; GCAssert(item != 0); if(!IsFull(b)) { // There are more items at the end of the block b->nextItem = (void *) ((uintptr_t)item+m_itemSize); } else { b->nextItem = 0; } } // If we're out of free items, be sure to remove ourselves from the // list of blocks with free items. if (IsFull(b)) { m_firstFree = b->nextFree; b->nextFree = NULL; GCAssert(b->prevFree == NULL); if (m_firstFree) m_firstFree->prevFree = 0; } item = GetUserPointer(item); #ifdef MMGC_HOOKS if(m_heap->HooksEnabled()) { #ifdef MMGC_MEMORY_PROFILER m_totalAskSize += size; #endif m_heap->AllocHook(item, size, b->size - DebugSize()); } #endif #ifdef _DEBUG // fresh memory poisoning if((opts & kZero) == 0) memset(item, 0xfa, b->size - DebugSize()); #endif if((opts & kZero) != 0) memset(item, 0, b->size - DebugSize()); return item; }
void GCLargeAlloc::Finalize() { m_startedFinalize = true; LargeBlock **prev = &m_blocks; while (*prev) { LargeBlock *b = *prev; if ((b->flags[0] & kMark) == 0) { GCAssert((b->flags[0] & kQueued) == 0); GC* gc = b->gc; // GC::Finalize calls GC::MarkOrClearWeakRefs before calling GCAlloc::Finalize, // ergo there should be no unmarked objects with weak refs. GCAssertMsg((b->flags[0] & kHasWeakRef) == 0, "No unmarked object should have a weak ref at this point"); // Large blocks may be allocated by finalizers for large blocks, creating contention // for the block list. Yet the block list must be live, since eg GetUsageInfo may be // called by the finalizers (or their callees). // // Unlink the block from the list early to avoid contention. *prev = Next(b); b->next = NULL; void *item = b+1; if (b->flags[0] & kFinalizable) { GCFinalizedObject *obj = (GCFinalizedObject *) item; obj = (GCFinalizedObject *) GetUserPointer(obj); obj->~GCFinalizedObject(); #if defined(_DEBUG) if(b->rcobject) { gc->RCObjectZeroCheck((RCObject*)obj); } #endif } // GC::GetWeakRef will not allow a weak reference to be created to an object that // is ready for destruction. GCAssertMsg((b->flags[0] & kHasWeakRef) == 0, "No unmarked object should have a weak ref at this point"); #ifdef MMGC_HOOKS if(m_gc->heap->HooksEnabled()) { #ifdef MMGC_MEMORY_PROFILER if(GCHeap::GetGCHeap()->GetProfiler()) m_totalAskSize -= GCHeap::GetGCHeap()->GetProfiler()->GetAskSize(GetUserPointer(item)); #endif m_gc->heap->FinalizeHook(GetUserPointer(item), b->size - DebugSize()); } #endif // The block is not empty until now, so now add it. gc->AddToLargeEmptyBlockList(b); continue; } // clear marks b->flags[0] &= ~(kMark|kQueued); prev = (LargeBlock**)(&b->next); } m_startedFinalize = false; }
void* GCAlloc::Alloc(int flags) #endif { GCAssertMsg(((size_t)m_itemSize >= size), "allocator itemsize too small"); // Allocation must be signalled before we allocate because no GC work must be allowed to // come between an allocation and an initialization - if it does, we may crash, as // GCFinalizedObject subclasses may not have a valid vtable, but the GC depends on them // having it. In principle we could signal allocation late but only set the object // flags after signaling, but we might still cause trouble for the profiler, which also // depends on non-interruptibility. m_gc->SignalAllocWork(m_itemSize); GCBlock* b = m_firstFree; start: if (b == NULL) { if (m_needsSweeping && !m_gc->collecting) { Sweep(m_needsSweeping); b = m_firstFree; goto start; } bool canFail = (flags & GC::kCanFail) != 0; CreateChunk(canFail); b = m_firstFree; if (b == NULL) { GCAssert(canFail); return NULL; } } GCAssert(!b->needsSweeping); GCAssert(b == m_firstFree); GCAssert(b && !b->IsFull()); void *item; if(b->firstFree) { item = b->firstFree; b->firstFree = *((void**)item); // clear free list pointer, the rest was zero'd in free *(intptr_t*) item = 0; #ifdef MMGC_MEMORY_INFO //check for writes on deleted memory VerifyFreeBlockIntegrity(item, b->size); #endif } else { item = b->nextItem; if(((uintptr_t)((char*)item + b->size) & 0xfff) != 0) { b->nextItem = (char*)item + b->size; } else { b->nextItem = NULL; } } // set up bits, items start out white and whether they need finalization // is determined by the caller // make sure we ended up in the right place GCAssert(((flags&GC::kContainsPointers) != 0) == ContainsPointers()); // this assumes what we assert GCAssert((unsigned long)GC::kFinalize == (unsigned long)GCAlloc::kFinalize); int index = GetIndex(b, item); GCAssert(index >= 0); Clear4BitsAndSet(b, index, flags & kFinalize); b->numItems++; #ifdef MMGC_MEMORY_INFO m_numAlloc++; #endif // If we're out of free items, be sure to remove ourselves from the // list of blocks with free items. TODO Minor optimization: when we // carve an item off the end of the block, we don't need to check here // unless we just set b->nextItem to NULL. if (b->IsFull()) { m_firstFree = b->nextFree; b->nextFree = NULL; GCAssert(b->prevFree == NULL); if (m_firstFree) m_firstFree->prevFree = 0; } // prevent mid-collection (ie destructor) allocations on un-swept pages from // getting swept. If the page is finalized and doesn't need sweeping we don't want // to set the mark otherwise it will be marked when we start the next marking phase // and write barriers won't fire (since its black) if(m_gc->collecting) { if((b->finalizeState != m_gc->finalizedValue) || b->needsSweeping) SetBit(b, index, kMark); } GCAssert((uintptr_t(item) & ~0xfff) == (uintptr_t) b); GCAssert((uintptr_t(item) & 7) == 0); #ifdef MMGC_HOOKS GCHeap* heap = GCHeap::GetGCHeap(); if(heap->HooksEnabled()) { size_t userSize = m_itemSize - DebugSize(); #ifdef MMGC_MEMORY_PROFILER m_totalAskSize += size; heap->AllocHook(GetUserPointer(item), size, userSize); #else heap->AllocHook(GetUserPointer(item), 0, userSize); #endif } #endif return item; }
void GCAlloc::Finalize() { m_finalized = true; // Go through every item of every block. Look for items // that are in use but not marked as reachable, and delete // them. GCBlock *next = NULL; for (GCBlock* b = m_firstBlock; b != NULL; b = next) { // we can unlink block below next = Next(b); GCAssert(!b->needsSweeping); // remove from freelist to avoid mutator destructor allocations // from using this block bool putOnFreeList = false; if(m_firstFree == b || b->prevFree != NULL || b->nextFree != NULL) { putOnFreeList = true; RemoveFromFreeList(b); } GCAssert(kMark == 0x1 && kFinalize == 0x4 && kHasWeakRef == 0x8); int numMarkedItems = 0; // TODO: MMX version for IA32 uint32_t *bits = (uint32_t*) b->GetBits(); uint32_t count = b->nextItem ? GetIndex(b, b->nextItem) : m_itemsPerBlock; // round up to eight uint32_t numInts = ((count+7)&~7) >> 3; for(uint32_t i=0; i < numInts; i++) { uint32_t marks = bits[i]; // hmm, is it better to screw around with exact counts or just examine // 8 items on each pass, with the later we open the door to unrolling uint32_t subCount = i==(numInts-1) ? ((count-1)&7)+1 : 8; for(uint32_t j=0; j<subCount;j++,marks>>=4) { int mq = marks & kFreelist; if(mq == kFreelist) continue; if(mq == kMark) { numMarkedItems++; continue; } GCAssertMsg(mq != kQueued, "No queued objects should exist when finalizing"); void* item = (char*)b->items + m_itemSize*((i*8)+j); #ifdef MMGC_HOOKS if(m_gc->heap->HooksEnabled()) { #ifdef MMGC_MEMORY_PROFILER if(m_gc->heap->GetProfiler()) m_totalAskSize -= m_gc->heap->GetProfiler()->GetAskSize(GetUserPointer(item)); #endif m_gc->heap->FinalizeHook(GetUserPointer(item), m_itemSize - DebugSize()); } #endif if(!(marks & (kFinalize|kHasWeakRef))) continue; if (marks & kFinalize) { GCFinalizedObject *obj = (GCFinalizedObject*)GetUserPointer(item); GCAssert(*(intptr_t*)obj != 0); bits[i] &= ~(kFinalize<<(j*4)); // Clear bits first so we won't get second finalization if finalizer longjmps out obj->~GCFinalizedObject(); #if defined(_DEBUG) if(b->alloc->ContainsRCObjects()) { m_gc->RCObjectZeroCheck((RCObject*)obj); } #endif } if (marks & kHasWeakRef) { b->gc->ClearWeakRef(GetUserPointer(item)); } } } // 3 outcomes: // 1) empty, put on list of empty pages // 2) no freed items, partially empty or full, return to free if partially empty // 3) some freed item add to the to be swept list if(numMarkedItems == 0) { // add to list of block to be returned to the Heap after finalization // we don't do this during finalization b/c we want finalizers to be able // to reference the memory of other objects being finalized UnlinkChunk(b); b->gc->AddToSmallEmptyBlockList(b); putOnFreeList = false; } else if(numMarkedItems == b->numItems) { // nothing changed on this page, clear marks // note there will be at least one free item on the page (otherwise it // would not have been scanned) so the page just stays on the freelist ClearMarks(b); } else if(!b->needsSweeping) { // free'ing some items but not all if(b->nextFree || b->prevFree || b == m_firstFree) { RemoveFromFreeList(b); b->nextFree = b->prevFree = NULL; } AddToSweepList(b); putOnFreeList = false; } b->finalizeState = m_gc->finalizedValue; if(putOnFreeList) AddToFreeList(b); } }