void FixedAlloc::Destroy() { // Free all of the blocks while (m_firstBlock) { #ifdef MMGC_MEMORY_PROFILER if(m_firstBlock->numAlloc > 0 && m_heap->GetStatus() != kMemAbort) { union { char* mem_c; uint32_t* mem; }; mem_c = m_firstBlock->items; unsigned int itemNum = 0; while(itemNum++ < m_itemsPerBlock) { if(IsInUse(m_firstBlock, mem)) { GCLog("Leaked %d byte item. Addr: 0x%p\n", GetItemSize(), GetUserPointer(mem)); PrintAllocStackTrace(GetUserPointer(mem)); } mem_c += m_itemSize; } } #ifdef MMGC_MEMORY_INFO //check for writes on deleted memory VerifyFreeBlockIntegrity(m_firstBlock->firstFree, m_firstBlock->size); #endif #endif FreeChunk(m_firstBlock); } m_firstBlock = NULL; }
void GCLargeAlloc::Finalize() { m_startedFinalize = true; LargeBlock **prev = &m_blocks; while (*prev) { LargeBlock *b = *prev; if ((b->flags & kMarkFlag) == 0) { void *item = b+1; if (NeedsFinalize(b)) { GCFinalizable *obj = (GCFinalizable *) item; obj = (GCFinalizable *) GetUserPointer(obj); //obj->~GCFinalizable(); obj->Finalize(); #if defined(_DEBUG) && defined(MMGC_DRC) if((b->flags & kRCObject) != 0) { b->gc->RCObjectZeroCheck((RCObject*)obj); } #endif } if(b->flags & kHasWeakRef) { b->gc->ClearWeakRef(GetUserPointer(item)); } SAMPLE_DEALLOC(item, GC::Size(item)); // unlink from list *prev = b->next; b->gc->AddToLargeEmptyBlockList(b); continue; } // clear marks b->flags &= ~(kMarkFlag|kQueuedFlag); prev = &b->next; } m_startedFinalize = false; }
void GCLargeAlloc::Free(const void *item) { LargeBlock *b = GetLargeBlock(item); #ifdef GCDEBUG // RCObject have contract that they must clean themselves, since they // have to scan themselves to decrement other RCObjects they might as well // clean themselves too, better than suffering a memset later if(b->rcobject) m_gc->RCObjectZeroCheck((RCObject*)GetUserPointer(item)); #endif // We can't allow free'ing something during Sweeping, otherwise alloc counters // get decremented twice and destructors will be called twice. GCAssert(m_gc->collecting == false || m_gc->marking == true); if (m_gc->marking && (m_gc->collecting || IsProtectedAgainstFree(b))) { m_gc->AbortFree(GetUserPointer(item)); return; } m_gc->policy.signalFreeWork(b->size); #ifdef MMGC_HOOKS GCHeap* heap = GCHeap::GetGCHeap(); if(heap->HooksEnabled()) { const void* p = GetUserPointer(item); size_t userSize = GC::Size(p); #ifdef MMGC_MEMORY_PROFILER if(heap->GetProfiler()) m_totalAskSize -= heap->GetProfiler()->GetAskSize(p); #endif heap->FinalizeHook(p, userSize); heap->FreeHook(p, userSize, uint8_t(GCHeap::GCFreedPoison)); } #endif if(b->flags[0] & kHasWeakRef) m_gc->ClearWeakRef(GetUserPointer(item)); LargeBlock **prev = &m_blocks; while(*prev) { if(b == *prev) { *prev = Next(b); size_t numBlocks = b->GetNumBlocks(); m_totalAllocatedBytes -= b->size; VALGRIND_MEMPOOL_FREE(b, b); VALGRIND_MEMPOOL_FREE(b, item); VALGRIND_DESTROY_MEMPOOL(b); m_gc->FreeBlock(b, (uint32_t)numBlocks, m_partitionIndex); return; } prev = (LargeBlock**)(&(*prev)->next); } GCAssertMsg(false, "Bad free!"); }
/* static */ void GCAlloc::Free(const void *item) { GCBlock *b = GetBlock(item); GCAlloc *a = b->alloc; #ifdef MMGC_HOOKS GCHeap* heap = GCHeap::GetGCHeap(); if(heap->HooksEnabled()) { const void* p = GetUserPointer(item); size_t userSize = GC::Size(p); #ifdef MMGC_MEMORY_PROFILER if(heap->GetProfiler()) a->m_totalAskSize -= heap->GetProfiler()->GetAskSize(p); #endif heap->FinalizeHook(p, userSize); heap->FreeHook(p, userSize, 0xca); } #endif #ifdef _DEBUG // check that its not already been freed void *free = b->firstFree; while(free) { GCAssert(free != item); free = *((void**) free); } #endif int index = GetIndex(b, item); if(GetBit(b, index, kHasWeakRef)) { b->gc->ClearWeakRef(GetUserPointer(item)); } bool wasFull = b->IsFull(); if(b->needsSweeping) { #ifdef _DEBUG bool gone = #endif a->Sweep(b); GCAssertMsg(!gone, "How can a page I'm about to free an item on be empty?"); wasFull = false; } if(wasFull) { a->AddToFreeList(b); } b->FreeItem(item, index); if(b->numItems == 0) { a->UnlinkChunk(b); a->FreeChunk(b); } }
/*static*/ const void *FixedAlloc::FindBeginning(const void *addr) { FixedBlock *b = GetFixedBlock(addr); uint32_t itemNum = 0; char *mem = b->items; while(itemNum++ < b->alloc->m_itemsPerBlock) { char *next = mem + b->alloc->m_itemSize; if(addr >= mem && addr < next) return GetUserPointer(mem); mem = next; } return NULL; }
void GCLargeAlloc::Free(const void *item) { LargeBlock *b = GetBlockHeader(item); if(b->flags & kHasWeakRef) b->gc->ClearWeakRef(GetUserPointer(item)); LargeBlock **prev = &m_blocks; while(*prev) { if(b == *prev) { *prev = b->next; m_gc->FreeBlock(b, b->GetNumBlocks()); return; } prev = &(*prev)->next; } GCAssertMsg(false, "Bad free!"); }
void FixedAlloc::Destroy() { // Free all of the blocks while (m_firstBlock) { #ifdef MMGC_MEMORY_PROFILER if(m_firstBlock->numAlloc > 0 && m_heap->GetStatus() != kMemAbort) { union { char* mem_c; uint32_t* mem; }; mem_c = m_firstBlock->items; unsigned int itemNum = 0; while(itemNum++ < m_itemsPerBlock) { if(IsInUse(m_firstBlock, mem)) { // supress output in release build UNLESS the profiler is on #ifndef GCDEBUG if(m_heap->GetProfiler() != NULL) #endif { GCLog("Leaked %d byte item. Addr: 0x%p\n", GetItemSize(), GetUserPointer(mem)); PrintAllocStackTrace(GetUserPointer(mem)); } } mem_c += m_itemSize; } } #ifdef MMGC_MEMORY_INFO //check for writes on deleted memory VerifyFreeBlockIntegrity(m_firstBlock->firstFree, m_firstBlock->size); #endif #endif // Note, don't cache any state across this call; FreeChunk may temporarily // release locks held if the true type of this allocator is FixedAllocSafe. FreeChunk(m_firstBlock); } m_firstBlock = NULL; }
void *FixedMalloc::LargeAlloc(size_t size, FixedMallocOpts flags) { GCHeap::CheckForAllocSizeOverflow(size, GCHeap::kBlockSize+DebugSize()); size += DebugSize(); int blocksNeeded = (int)GCHeap::SizeToBlocks(size); uint32_t gcheap_flags = GCHeap::kExpand; if((flags & kCanFail) != 0) gcheap_flags |= GCHeap::kCanFail; if((flags & kZero) != 0) gcheap_flags |= GCHeap::kZero; void *item = m_heap->Alloc(blocksNeeded, gcheap_flags); if(item) { item = GetUserPointer(item); #ifdef MMGC_HOOKS if(m_heap->HooksEnabled()) m_heap->AllocHook(item, size - DebugSize(), Size(item)); #endif // MMGC_HOOKS UpdateLargeAllocStats(item, blocksNeeded); #ifdef DEBUG // Fresh memory poisoning if((flags & kZero) == 0) memset(item, uint8_t(GCHeap::FXFreshPoison), size - DebugSize()); #ifndef AVMPLUS_SAMPLER // Enregister the large object AddToLargeObjectTracker(item); #endif #endif // DEBUG } return item; }
void GCAlloc::SweepGuts(GCBlock *b) { // TODO: MMX version for IA32 uint32_t *bits = (uint32_t*) b->GetBits(); uint32_t count = b->nextItem ? GetIndex(b, b->nextItem) : m_itemsPerBlock; // round up to eight uint32_t numInts = ((count+7)&~7) >> 3; for(uint32_t i=0; i < numInts; i++) { uint32_t marks = bits[i]; // hmm, is it better to screw around with exact counts or just examine // 8 items on each pass, with the later we open the door to unrolling uint32_t subCount = i==(numInts-1) ? ((count-1)&7)+1 : 8; for(uint32_t j=0; j<subCount;j++,marks>>=4) { int mq = marks & kFreelist; if(mq == kMark || mq == kQueued) // Sweeping is lazy; don't sweep objects on the mark stack { // live item, clear bits bits[i] &= ~(kFreelist<<(j*4)); continue; } if(mq == kFreelist) continue; // freelist item, ignore // garbage, freelist it void *item = (char*)b->items + m_itemSize*(i*8+j); #ifdef MMGC_HOOKS if(m_gc->heap->HooksEnabled()) m_gc->heap->FreeHook(GetUserPointer(item), b->size - DebugSize(), 0xba); #endif b->FreeItem(item, (i*8+j)); } } }
GCAlloc::GCBlock* GCAlloc::CreateChunk(int flags) { // Too many definitions of kBlockSize, make sure they're at least in sync. GCAssert(uint32_t(kBlockSize) == GCHeap::kBlockSize); // Get bitmap space; this may trigger OOM handling. gcbits_t* bits = m_bitsInPage ? NULL : (gcbits_t*)m_gc->AllocBits(m_numBitmapBytes, m_sizeClassIndex); // Allocate a new block; this may trigger OOM handling (though that // won't affect the bitmap space, which is not GC'd individually). GCBlock* b = (GCBlock*) m_gc->AllocBlock(1, PageMap::kGCAllocPage, /*zero*/true, (flags&GC::kCanFail) != 0); if (b) { VALGRIND_CREATE_MEMPOOL(b, 0/*redZoneSize*/, 1/*zeroed*/); // treat block header as a separate allocation VALGRIND_MEMPOOL_ALLOC(b, b, sizeof(GCBlock)); b->gc = m_gc; b->alloc = this; b->size = m_itemSize; b->slowFlags = 0; if(m_gc->collecting && m_finalized) b->finalizeState = m_gc->finalizedValue; else b->finalizeState = !m_gc->finalizedValue; b->bibopTag = m_bibopTag; #ifdef MMGC_FASTBITS b->bitsShift = (uint8_t) m_bitsShift; #endif b->containsPointers = ContainsPointers(); b->rcobject = ContainsRCObjects(); if (m_bitsInPage) b->bits = (gcbits_t*)b + sizeof(GCBlock); else b->bits = bits; // ditto for in page bits if (m_bitsInPage) { VALGRIND_MEMPOOL_ALLOC(b, b->bits, m_numBitmapBytes); } // Link the block at the end of the list b->prev = m_lastBlock; b->next = 0; if (m_lastBlock) { m_lastBlock->next = b; } if (!m_firstBlock) { m_firstBlock = b; } m_lastBlock = b; // Add our new ChunkBlock to the firstFree list (which should be empty) if (m_firstFree) { GCAssert(m_firstFree->prevFree == 0); m_firstFree->prevFree = b; } b->nextFree = m_firstFree; b->prevFree = 0; m_firstFree = b; // calculate back from end (better alignment, no dead space at end) b->items = (char*)b+GCHeap::kBlockSize - m_itemsPerBlock * m_itemSize; b->numFree = (short)m_itemsPerBlock; // explode the new block onto its free list // // We must make the object look free, which means poisoning it properly and setting // the mark bits correctly. b->firstFree = b->items; void** p = (void**)(void*)b->items; int limit = m_itemsPerBlock-1; #ifdef MMGC_HOOKS GCHeap* heap = GCHeap::GetGCHeap(); #endif for ( int i=0 ; i < limit ; i++ ) { #ifdef MMGC_HOOKS #ifdef MMGC_MEMORY_INFO // DebugSize is 0 if MEMORY_INFO is off, so we get an "obviously true" warning from GCC. GCAssert(m_itemSize >= DebugSize()); #endif if(heap->HooksEnabled()) heap->PseudoFreeHook(GetUserPointer(p), m_itemSize - DebugSize(), uint8_t(GCHeap::GCSweptPoison)); #endif p = FLSeed(p, (char*)p + m_itemSize); } #ifdef MMGC_HOOKS if(heap->HooksEnabled()) heap->PseudoFreeHook(GetUserPointer(p), m_itemSize - DebugSize(), uint8_t(GCHeap::GCSweptPoison)); #endif p[0] = NULL; // Set all the mark bits to 'free' GCAssert(sizeof(gcbits_t) == 1); GCAssert(kFreelist == 3); GCAssert(m_numBitmapBytes % 4 == 0); uint32_t *pbits = (uint32_t*)(void *)b->bits; for(int i=0, n=m_numBitmapBytes>>2; i < n; i++) pbits[i] = 0x03030303; #ifdef MMGC_MEMORY_INFO VerifyFreeBlockIntegrity(b->firstFree, m_itemSize); #endif } else { if (bits)
void* GCLargeAlloc::Alloc(size_t requestSize, int flags) #endif { #ifdef DEBUG m_gc->heap->CheckForOOMAbortAllocation(); #endif GCHeap::CheckForAllocSizeOverflow(requestSize, sizeof(LargeBlock)+GCHeap::kBlockSize); int blocks = (int)((requestSize+sizeof(LargeBlock)+GCHeap::kBlockSize-1) / GCHeap::kBlockSize); uint32_t computedSize = blocks*GCHeap::kBlockSize - sizeof(LargeBlock); // Allocation must be signalled before we allocate because no GC work must be allowed to // come between an allocation and an initialization - if it does, we may crash, as // GCFinalizedObject subclasses may not have a valid vtable, but the GC depends on them // having it. In principle we could signal allocation late but only set the object // flags after signaling, but we might still cause trouble for the profiler, which also // depends on non-interruptibility. m_gc->SignalAllocWork(computedSize); // Pointer containing memory is always zeroed (see bug 594533). if((flags&GC::kContainsPointers) != 0) flags |= GC::kZero; LargeBlock *block = (LargeBlock*) m_gc->AllocBlock(blocks, PageMap::kGCLargeAllocPageFirst, (flags&GC::kZero) != 0, (flags&GC::kCanFail) != 0); void *item = NULL; if (block) { // Code below uses these optimizations GCAssert((unsigned long)GC::kFinalize == (unsigned long)kFinalizable); GCAssert((unsigned long)GC::kInternalExact == (unsigned long)kVirtualGCTrace); gcbits_t flagbits0 = 0; gcbits_t flagbits1 = 0; #if defined VMCFG_EXACT_TRACING flagbits0 = (flags & (GC::kFinalize|GC::kInternalExact)); #elif defined VMCFG_SELECTABLE_EXACT_TRACING flagbits0 = (flags & (GC::kFinalize|m_gc->runtimeSelectableExactnessFlag)); // 0 or GC::kInternalExact #else flagbits0 = (flags & GC::kFinalize); #endif VALGRIND_CREATE_MEMPOOL(block, /*rdzone*/0, (flags&GC::kZero) != 0); VALGRIND_MEMPOOL_ALLOC(block, block, sizeof(LargeBlock)); block->gc = this->m_gc; block->alloc= this; block->next = m_blocks; block->size = computedSize; block->bibopTag = 0; #ifdef MMGC_FASTBITS block->bitsShift = 12; // Always use bits[0] #endif block->containsPointers = ((flags&GC::kContainsPointers) != 0) ? 1 : 0; block->rcobject = ((flags&GC::kRCObject) != 0) ? 1 : 0; block->bits = block->flags; m_blocks = block; item = block->GetObject(); if(m_gc->collecting && !m_startedFinalize) flagbits0 |= kMark; block->flags[0] = flagbits0; block->flags[1] = flagbits1; #ifdef _DEBUG (void)originalSize; if (flags & GC::kZero) { if (!RUNNING_ON_VALGRIND) { // AllocBlock should take care of this for(int i=0, n=(int)(requestSize/sizeof(int)); i<n; i++) { if(((int*)item)[i] != 0) GCAssert(false); } } } #endif // see comments in GCAlloc about using full size instead of ask size VALGRIND_MEMPOOL_ALLOC(block, item, computedSize); #ifdef MMGC_HOOKS GCHeap* heap = GCHeap::GetGCHeap(); if(heap->HooksEnabled()) { size_t userSize = block->size - DebugSize(); #ifdef MMGC_MEMORY_PROFILER m_totalAskSize += originalSize; heap->AllocHook(GetUserPointer(item), originalSize, userSize, /*managed=*/true); #else heap->AllocHook(GetUserPointer(item), 0, userSize, /*managed=*/true); #endif } #endif } return item; }
void GCLargeAlloc::Finalize() { m_startedFinalize = true; LargeBlock **prev = &m_blocks; while (*prev) { LargeBlock *b = *prev; if ((b->flags[0] & kMark) == 0) { GCAssert((b->flags[0] & kQueued) == 0); GC* gc = b->gc; // GC::Finalize calls GC::MarkOrClearWeakRefs before calling GCAlloc::Finalize, // ergo there should be no unmarked objects with weak refs. GCAssertMsg((b->flags[0] & kHasWeakRef) == 0, "No unmarked object should have a weak ref at this point"); // Large blocks may be allocated by finalizers for large blocks, creating contention // for the block list. Yet the block list must be live, since eg GetUsageInfo may be // called by the finalizers (or their callees). // // Unlink the block from the list early to avoid contention. *prev = Next(b); b->next = NULL; void *item = b+1; if (b->flags[0] & kFinalizable) { GCFinalizedObject *obj = (GCFinalizedObject *) item; obj = (GCFinalizedObject *) GetUserPointer(obj); obj->~GCFinalizedObject(); #if defined(_DEBUG) if(b->rcobject) { gc->RCObjectZeroCheck((RCObject*)obj); } #endif } // GC::GetWeakRef will not allow a weak reference to be created to an object that // is ready for destruction. GCAssertMsg((b->flags[0] & kHasWeakRef) == 0, "No unmarked object should have a weak ref at this point"); #ifdef MMGC_HOOKS if(m_gc->heap->HooksEnabled()) { #ifdef MMGC_MEMORY_PROFILER if(GCHeap::GetGCHeap()->GetProfiler()) m_totalAskSize -= GCHeap::GetGCHeap()->GetProfiler()->GetAskSize(GetUserPointer(item)); #endif m_gc->heap->FinalizeHook(GetUserPointer(item), b->size - DebugSize()); } #endif // The block is not empty until now, so now add it. gc->AddToLargeEmptyBlockList(b); continue; } // clear marks b->flags[0] &= ~(kMark|kQueued); prev = (LargeBlock**)(&b->next); } m_startedFinalize = false; }
void GCAlloc::Finalize() { m_finalized = true; // Go through every item of every block. Look for items // that are in use but not marked as reachable, and delete // them. GCBlock *next = NULL; for (GCBlock* b = m_firstBlock; b != NULL; b = next) { // we can unlink block below next = Next(b); GCAssert(!b->needsSweeping); // remove from freelist to avoid mutator destructor allocations // from using this block bool putOnFreeList = false; if(m_firstFree == b || b->prevFree != NULL || b->nextFree != NULL) { putOnFreeList = true; RemoveFromFreeList(b); } GCAssert(kMark == 0x1 && kFinalize == 0x4 && kHasWeakRef == 0x8); int numMarkedItems = 0; // TODO: MMX version for IA32 uint32_t *bits = (uint32_t*) b->GetBits(); uint32_t count = b->nextItem ? GetIndex(b, b->nextItem) : m_itemsPerBlock; // round up to eight uint32_t numInts = ((count+7)&~7) >> 3; for(uint32_t i=0; i < numInts; i++) { uint32_t marks = bits[i]; // hmm, is it better to screw around with exact counts or just examine // 8 items on each pass, with the later we open the door to unrolling uint32_t subCount = i==(numInts-1) ? ((count-1)&7)+1 : 8; for(uint32_t j=0; j<subCount;j++,marks>>=4) { int mq = marks & kFreelist; if(mq == kFreelist) continue; if(mq == kMark) { numMarkedItems++; continue; } GCAssertMsg(mq != kQueued, "No queued objects should exist when finalizing"); void* item = (char*)b->items + m_itemSize*((i*8)+j); #ifdef MMGC_HOOKS if(m_gc->heap->HooksEnabled()) { #ifdef MMGC_MEMORY_PROFILER if(m_gc->heap->GetProfiler()) m_totalAskSize -= m_gc->heap->GetProfiler()->GetAskSize(GetUserPointer(item)); #endif m_gc->heap->FinalizeHook(GetUserPointer(item), m_itemSize - DebugSize()); } #endif if(!(marks & (kFinalize|kHasWeakRef))) continue; if (marks & kFinalize) { GCFinalizedObject *obj = (GCFinalizedObject*)GetUserPointer(item); GCAssert(*(intptr_t*)obj != 0); bits[i] &= ~(kFinalize<<(j*4)); // Clear bits first so we won't get second finalization if finalizer longjmps out obj->~GCFinalizedObject(); #if defined(_DEBUG) if(b->alloc->ContainsRCObjects()) { m_gc->RCObjectZeroCheck((RCObject*)obj); } #endif } if (marks & kHasWeakRef) { b->gc->ClearWeakRef(GetUserPointer(item)); } } } // 3 outcomes: // 1) empty, put on list of empty pages // 2) no freed items, partially empty or full, return to free if partially empty // 3) some freed item add to the to be swept list if(numMarkedItems == 0) { // add to list of block to be returned to the Heap after finalization // we don't do this during finalization b/c we want finalizers to be able // to reference the memory of other objects being finalized UnlinkChunk(b); b->gc->AddToSmallEmptyBlockList(b); putOnFreeList = false; } else if(numMarkedItems == b->numItems) { // nothing changed on this page, clear marks // note there will be at least one free item on the page (otherwise it // would not have been scanned) so the page just stays on the freelist ClearMarks(b); } else if(!b->needsSweeping) { // free'ing some items but not all if(b->nextFree || b->prevFree || b == m_firstFree) { RemoveFromFreeList(b); b->nextFree = b->prevFree = NULL; } AddToSweepList(b); putOnFreeList = false; } b->finalizeState = m_gc->finalizedValue; if(putOnFreeList) AddToFreeList(b); } }
void* GCAlloc::Alloc(int flags) #endif { GCAssertMsg(((size_t)m_itemSize >= size), "allocator itemsize too small"); // Allocation must be signalled before we allocate because no GC work must be allowed to // come between an allocation and an initialization - if it does, we may crash, as // GCFinalizedObject subclasses may not have a valid vtable, but the GC depends on them // having it. In principle we could signal allocation late but only set the object // flags after signaling, but we might still cause trouble for the profiler, which also // depends on non-interruptibility. m_gc->SignalAllocWork(m_itemSize); GCBlock* b = m_firstFree; start: if (b == NULL) { if (m_needsSweeping && !m_gc->collecting) { Sweep(m_needsSweeping); b = m_firstFree; goto start; } bool canFail = (flags & GC::kCanFail) != 0; CreateChunk(canFail); b = m_firstFree; if (b == NULL) { GCAssert(canFail); return NULL; } } GCAssert(!b->needsSweeping); GCAssert(b == m_firstFree); GCAssert(b && !b->IsFull()); void *item; if(b->firstFree) { item = b->firstFree; b->firstFree = *((void**)item); // clear free list pointer, the rest was zero'd in free *(intptr_t*) item = 0; #ifdef MMGC_MEMORY_INFO //check for writes on deleted memory VerifyFreeBlockIntegrity(item, b->size); #endif } else { item = b->nextItem; if(((uintptr_t)((char*)item + b->size) & 0xfff) != 0) { b->nextItem = (char*)item + b->size; } else { b->nextItem = NULL; } } // set up bits, items start out white and whether they need finalization // is determined by the caller // make sure we ended up in the right place GCAssert(((flags&GC::kContainsPointers) != 0) == ContainsPointers()); // this assumes what we assert GCAssert((unsigned long)GC::kFinalize == (unsigned long)GCAlloc::kFinalize); int index = GetIndex(b, item); GCAssert(index >= 0); Clear4BitsAndSet(b, index, flags & kFinalize); b->numItems++; #ifdef MMGC_MEMORY_INFO m_numAlloc++; #endif // If we're out of free items, be sure to remove ourselves from the // list of blocks with free items. TODO Minor optimization: when we // carve an item off the end of the block, we don't need to check here // unless we just set b->nextItem to NULL. if (b->IsFull()) { m_firstFree = b->nextFree; b->nextFree = NULL; GCAssert(b->prevFree == NULL); if (m_firstFree) m_firstFree->prevFree = 0; } // prevent mid-collection (ie destructor) allocations on un-swept pages from // getting swept. If the page is finalized and doesn't need sweeping we don't want // to set the mark otherwise it will be marked when we start the next marking phase // and write barriers won't fire (since its black) if(m_gc->collecting) { if((b->finalizeState != m_gc->finalizedValue) || b->needsSweeping) SetBit(b, index, kMark); } GCAssert((uintptr_t(item) & ~0xfff) == (uintptr_t) b); GCAssert((uintptr_t(item) & 7) == 0); #ifdef MMGC_HOOKS GCHeap* heap = GCHeap::GetGCHeap(); if(heap->HooksEnabled()) { size_t userSize = m_itemSize - DebugSize(); #ifdef MMGC_MEMORY_PROFILER m_totalAskSize += size; heap->AllocHook(GetUserPointer(item), size, userSize); #else heap->AllocHook(GetUserPointer(item), 0, userSize); #endif } #endif return item; }
void* FixedAlloc::Alloc(size_t size, FixedMallocOpts opts) { (void)size; GCAssertMsg(m_heap->StackEnteredCheck() || (opts&kCanFail) != 0, "MMGC_ENTER must be on the stack"); GCAssertMsg(((size_t)m_itemSize >= size), "allocator itemsize too small"); if(!m_firstFree) { bool canFail = (opts & kCanFail) != 0; CreateChunk(canFail); if(!m_firstFree) { if (!canFail) { GCAssertMsg(0, "Memory allocation failed to abort properly"); GCHeap::SignalInconsistentHeapState("Failed to abort"); /*NOTREACHED*/ } return NULL; } } FixedBlock* b = m_firstFree; GCAssert(b && !IsFull(b)); b->numAlloc++; // Consume the free list if available void *item = NULL; if (b->firstFree) { item = b->firstFree; b->firstFree = *((void**)item); // assert that the freelist hasn't been tampered with (by writing to the first 4 bytes) GCAssert(b->firstFree == NULL || (b->firstFree >= b->items && (((uintptr_t)b->firstFree - (uintptr_t)b->items) % b->size) == 0 && (uintptr_t) b->firstFree < ((uintptr_t)b & ~0xfff) + GCHeap::kBlockSize)); #ifdef MMGC_MEMORY_INFO //check for writes on deleted memory VerifyFreeBlockIntegrity(item, b->size); #endif } else { // Take next item from end of block item = b->nextItem; GCAssert(item != 0); if(!IsFull(b)) { // There are more items at the end of the block b->nextItem = (void *) ((uintptr_t)item+m_itemSize); } else { b->nextItem = 0; } } // If we're out of free items, be sure to remove ourselves from the // list of blocks with free items. if (IsFull(b)) { m_firstFree = b->nextFree; b->nextFree = NULL; GCAssert(b->prevFree == NULL); if (m_firstFree) m_firstFree->prevFree = 0; } item = GetUserPointer(item); #ifdef MMGC_HOOKS if(m_heap->HooksEnabled()) { #ifdef MMGC_MEMORY_PROFILER m_totalAskSize += size; #endif m_heap->AllocHook(item, size, b->size - DebugSize()); } #endif #ifdef _DEBUG // fresh memory poisoning if((opts & kZero) == 0) memset(item, 0xfa, b->size - DebugSize()); #endif if((opts & kZero) != 0) memset(item, 0, b->size - DebugSize()); return item; }