char * LargeHeapBucket::TryAllocFromFreeList(Recycler * recycler, size_t sizeCat, ObjectInfoBits attributes) { Assert((attributes & InternalObjectInfoBitMask) == attributes); LargeHeapBlockFreeList* freeListEntry = this->freeList; // Walk through the free list, find the first entry that can fit our desired size while (freeListEntry) { LargeHeapBlock* heapBlock = freeListEntry->heapBlock; char * memBlock = heapBlock->TryAllocFromFreeList(sizeCat, attributes); if (memBlock) { // Don't need to verify zero fill here since we will do it in LargeHeapBucket::Alloc return memBlock; } else { #if DBG LargeAllocationVerboseTrace(recycler->GetRecyclerFlagsTable(), _u("Unable to allocate object of size 0x%x from freelist\n"), sizeCat); #endif } freeListEntry = freeListEntry->next; } return nullptr; }
//===================================================================================================== // Free //===================================================================================================== void LargeHeapBucket::ExplicitFree(void * object, size_t sizeCat) { Assert(HeapInfo::GetMediumObjectAlignedSizeNoCheck(sizeCat) == this->sizeCat); LargeObjectHeader * header = LargeHeapBlock::GetHeaderFromAddress(object); Assert(header->GetAttributes(this->heapInfo->recycler->Cookie) == ObjectInfoBits::NoBit || header->GetAttributes(this->heapInfo->recycler->Cookie) == ObjectInfoBits::LeafBit); Assert(!header->isExplicitFreed); DebugOnly(header->isExplicitFreed = true); Assert(header->objectSize >= sizeCat); #if DBG HeapBlock* heapBlock = this->GetRecycler()->FindHeapBlock(object); Assert(heapBlock != nullptr); Assert(heapBlock->IsLargeHeapBlock()); LargeHeapBlock * largeHeapBlock = (LargeHeapBlock *)heapBlock; LargeObjectHeader * dbgHeader; Assert(largeHeapBlock->GetObjectHeader(object, &dbgHeader)); Assert(dbgHeader == header); #endif FreeObject * freeObject = (FreeObject *)object; freeObject->SetNext(this->explicitFreeList); this->explicitFreeList = freeObject; header->SetAttributes(this->heapInfo->recycler->Cookie, ObjectInfoBits::LeafBit); // We can stop scanning it now. }
//===================================================================================================== // Allocation //===================================================================================================== char * LargeHeapBucket::TryAllocFromNewHeapBlock(Recycler * recycler, size_t sizeCat, size_t size, ObjectInfoBits attributes, bool nothrow) { Assert((attributes & InternalObjectInfoBitMask) == attributes); #ifdef RECYCLER_PAGE_HEAP if (IsPageHeapEnabled(attributes)) { return this->PageHeapAlloc(recycler, sizeCat, size, attributes, this->heapInfo->pageHeapMode, true); } #endif LargeHeapBlock * heapBlock = AddLargeHeapBlock(sizeCat, nothrow); if (heapBlock == nullptr) { return nullptr; } char * memBlock = heapBlock->Alloc(sizeCat, attributes); Assert(memBlock != nullptr); return memBlock; }
char * LargeHeapBucket::TryAllocFromExplicitFreeList(Recycler * recycler, size_t sizeCat, ObjectInfoBits attributes) { Assert((attributes & InternalObjectInfoBitMask) == attributes); FreeObject * currFreeObject = this->explicitFreeList; FreeObject * prevFreeObject = nullptr; while (currFreeObject != nullptr) { char * memBlock = (char *)currFreeObject; LargeObjectHeader * header = LargeHeapBlock::GetHeaderFromAddress(memBlock); Assert(header->isExplicitFreed); Assert(HeapInfo::GetMediumObjectAlignedSizeNoCheck(header->objectSize) == this->sizeCat); if (header->objectSize < sizeCat) { prevFreeObject = currFreeObject; currFreeObject = currFreeObject->GetNext(); continue; } DebugOnly(header->isExplicitFreed = false); if (prevFreeObject) { prevFreeObject->SetNext(currFreeObject->GetNext()); } else { this->explicitFreeList = currFreeObject->GetNext(); } #ifdef RECYCLER_MEMORY_VERIFY HeapBlock* heapBlock = recycler->FindHeapBlock(memBlock); Assert(heapBlock != nullptr); Assert(heapBlock->IsLargeHeapBlock()); LargeHeapBlock * largeHeapBlock = (LargeHeapBlock *)heapBlock; LargeObjectHeader * dbgHeader; Assert(largeHeapBlock->GetObjectHeader(memBlock, &dbgHeader)); Assert(dbgHeader == header); ((FreeObject *)memBlock)->DebugFillNext(); #endif #ifdef RECYCLER_ZERO_MEM_CHECK // TODO: large heap block doesn't separate leaf object on to different page allocator. // so all the memory should still be zeroed. memset(memBlock, 0, sizeof(FreeObject)); #endif header->SetAttributes(recycler->Cookie, (attributes & StoredObjectInfoBitMask)); if ((attributes & ObjectInfoBits::FinalizeBit) != 0) { LargeHeapBlock* heapBlock = (LargeHeapBlock *)recycler->FindHeapBlock(memBlock); heapBlock->finalizeCount++; #ifdef RECYCLER_FINALIZE_CHECK heapInfo->liveFinalizableObjectCount++; heapInfo->newFinalizableObjectCount++; #endif } return memBlock; } return nullptr; }
LargeHeapBlock* LargeHeapBucket::AddLargeHeapBlock(size_t size, bool nothrow) { Recycler* recycler = this->heapInfo->recycler; Segment * segment; size_t pageCount = LargeHeapBlock::GetPagesNeeded(size, this->supportFreeList); if (pageCount == 0) { if (nothrow == false) { // overflow // Since nothrow is false here, it's okay to throw recycler->OutOfMemory(); } return nullptr; } char * address = nullptr; size_t realPageCount = pageCount; address = recycler->GetRecyclerLargeBlockPageAllocator()->Alloc(&realPageCount, &segment); pageCount = realPageCount; if (address == nullptr) { return nullptr; } #ifdef RECYCLER_ZERO_MEM_CHECK recycler->VerifyZeroFill(address, pageCount * AutoSystemInfo::PageSize); #endif uint objectCount = LargeHeapBlock::GetMaxLargeObjectCount(pageCount, size); LargeHeapBlock * heapBlock = LargeHeapBlock::New(address, pageCount, segment, objectCount, supportFreeList ? this : nullptr); #if DBG LargeAllocationVerboseTrace(recycler->GetRecyclerFlagsTable(), _u("Allocated new large heap block 0x%p for sizeCat 0x%x\n"), heapBlock, sizeCat); #endif #ifdef ENABLE_JS_ETW #if ENABLE_DEBUG_CONFIG_OPTIONS if (segment->GetPageCount() > recycler->GetRecyclerLargeBlockPageAllocator()->GetMaxAllocPageCount()) { EventWriteJSCRIPT_INTERNAL_RECYCLER_EXTRALARGE_OBJECT_ALLOC(size); } #endif #endif if (!heapBlock) { recycler->GetRecyclerLargeBlockPageAllocator()->SuspendIdleDecommit(); recycler->GetRecyclerLargeBlockPageAllocator()->Release(address, pageCount, segment); recycler->GetRecyclerLargeBlockPageAllocator()->ResumeIdleDecommit(); return nullptr; } #if ENABLE_PARTIAL_GC recycler->autoHeap.uncollectedNewPageCount += pageCount; #endif RECYCLER_SLOW_CHECK(this->heapInfo->heapBlockCount[HeapBlock::HeapBlockType::LargeBlockType]++); heapBlock->heapInfo = this->heapInfo; heapBlock->lastCollectAllocCount = 0; Assert(recycler->collectionState != CollectionStateMark); if (!recycler->heapBlockMap.SetHeapBlock(address, pageCount, heapBlock, HeapBlock::HeapBlockType::LargeBlockType, 0)) { recycler->GetRecyclerLargeBlockPageAllocator()->SuspendIdleDecommit(); heapBlock->ReleasePages(recycler); recycler->GetRecyclerLargeBlockPageAllocator()->ResumeIdleDecommit(); LargeHeapBlock::Delete(heapBlock); RECYCLER_SLOW_CHECK(this->heapInfo->heapBlockCount[HeapBlock::HeapBlockType::LargeBlockType]--); return nullptr; } heapBlock->SetNextBlock(this->largeBlockList); this->largeBlockList = heapBlock; RECYCLER_PERF_COUNTER_ADD(FreeObjectSize, heapBlock->GetPageCount() * AutoSystemInfo::PageSize); return heapBlock; }
char* LargeHeapBucket::PageHeapAlloc(Recycler * recycler, size_t sizeCat, size_t size, ObjectInfoBits attributes, PageHeapMode mode, bool nothrow) { Segment * segment; size_t pageCount = LargeHeapBlock::GetPagesNeeded(size, false); if (pageCount == 0) { if (nothrow == false) { // overflow // Since nothrow is false here, it's okay to throw recycler->OutOfMemory(); } return nullptr; } if(size<sizeof(void*)) { attributes = (ObjectInfoBits)(attributes | LeafBit); } size_t actualPageCount = pageCount + 1; // 1 for guard page auto pageAllocator = recycler->GetRecyclerLargeBlockPageAllocator(); char * baseAddress = pageAllocator->Alloc(&actualPageCount, &segment); if (baseAddress == nullptr) { return nullptr; } size_t guardPageCount = actualPageCount - pageCount; // pageAllocator can return more than asked pages char* address = nullptr; char* guardPageAddress = nullptr; if (heapInfo->pageHeapMode == PageHeapMode::PageHeapModeBlockStart) { address = baseAddress + AutoSystemInfo::PageSize * guardPageCount; guardPageAddress = baseAddress; } else if (heapInfo->pageHeapMode == PageHeapMode::PageHeapModeBlockEnd) { address = baseAddress; guardPageAddress = baseAddress + pageCount * AutoSystemInfo::PageSize; } else { AnalysisAssert(false); } LargeHeapBlock * heapBlock = LargeHeapBlock::New(address, pageCount, segment, 1, nullptr); if (!heapBlock) { pageAllocator->SuspendIdleDecommit(); pageAllocator->Release(baseAddress, actualPageCount, segment); pageAllocator->ResumeIdleDecommit(); return nullptr; } heapBlock->heapInfo = this->heapInfo; heapBlock->actualPageCount = actualPageCount; heapBlock->guardPageAddress = guardPageAddress; // fill pattern before set pageHeapMode, so background scan stack may verify the pattern size_t usedSpace = sizeof(LargeObjectHeader) + size; memset(address + usedSpace, 0xF0, pageCount * AutoSystemInfo::PageSize - usedSpace); heapBlock->pageHeapMode = heapInfo->pageHeapMode; if (!recycler->heapBlockMap.SetHeapBlock(address, pageCount, heapBlock, HeapBlock::HeapBlockType::LargeBlockType, 0)) { pageAllocator->SuspendIdleDecommit(); heapBlock->ReleasePages(recycler); pageAllocator->ResumeIdleDecommit(); LargeHeapBlock::Delete(heapBlock); return nullptr; } heapBlock->ResetMarks(ResetMarkFlags_None, recycler); char * memBlock = heapBlock->Alloc(size, attributes); Assert(memBlock != nullptr); #pragma prefast(suppress:6250, "This method decommits memory") if (::VirtualFree(guardPageAddress, AutoSystemInfo::PageSize * guardPageCount, MEM_DECOMMIT) == FALSE) { AssertMsg(false, "Unable to decommit guard page."); ReportFatalException(NULL, E_FAIL, Fatal_Internal_Error, 2); return nullptr; } if (this->largePageHeapBlockList) { HeapBlockList::Tail(this->largePageHeapBlockList)->SetNextBlock(heapBlock); } else { this->largePageHeapBlockList = heapBlock; } #if ENABLE_PARTIAL_GC recycler->autoHeap.uncollectedNewPageCount += pageCount; #endif RECYCLER_SLOW_CHECK(this->heapInfo->heapBlockCount[HeapBlock::HeapBlockType::LargeBlockType]++); RECYCLER_PERF_COUNTER_ADD(FreeObjectSize, heapBlock->GetPageCount() * AutoSystemInfo::PageSize); if (recycler->ShouldCapturePageHeapAllocStack()) { heapBlock->CapturePageHeapAllocStack(); } return memBlock; }
char* LargeHeapBucket::PageHeapAlloc(Recycler * recycler, size_t size, ObjectInfoBits attributes, PageHeapMode mode, bool nothrow) { size_t sizeCat = HeapInfo::GetAlignedSizeNoCheck(size); Segment * segment; size_t pageCount = LargeHeapBlock::GetPagesNeeded(size, this->supportFreeList); if (pageCount == 0) { if (nothrow == false) { // overflow // Since nothrow is false here, it's okay to throw recycler->OutOfMemory(); } return nullptr; } size_t actualPageCount = pageCount + 1; // for page heap char * baseAddress = recycler->GetRecyclerLargeBlockPageAllocator()->Alloc(&actualPageCount, &segment); if (baseAddress == nullptr) { return nullptr; } char* address = nullptr; char* guardPageAddress = nullptr; DWORD guardPageOldProtectFlags = PAGE_NOACCESS; if (heapInfo->pageHeapMode == PageHeapMode::PageHeapModeBlockStart) { address = baseAddress + AutoSystemInfo::PageSize; guardPageAddress = baseAddress; } else if (heapInfo->pageHeapMode == PageHeapMode::PageHeapModeBlockEnd) { address = baseAddress; guardPageAddress = baseAddress + pageCount* AutoSystemInfo::PageSize; } else { AnalysisAssert(false); } if (::VirtualProtect(static_cast<LPVOID>(guardPageAddress), AutoSystemInfo::PageSize, PAGE_NOACCESS, &guardPageOldProtectFlags) == FALSE) { AssertMsg(false, "Unable to set permission for guard page."); return nullptr; } #ifdef RECYCLER_ZERO_MEM_CHECK recycler->VerifyZeroFill(address, pageCount * AutoSystemInfo::PageSize); #endif LargeHeapBlock * heapBlock = LargeHeapBlock::New(address, pageCount, segment, 1, nullptr); if (!heapBlock) { recycler->GetRecyclerLargeBlockPageAllocator()->SuspendIdleDecommit(); recycler->GetRecyclerLargeBlockPageAllocator()->Release(address, actualPageCount, segment); recycler->GetRecyclerLargeBlockPageAllocator()->ResumeIdleDecommit(); return nullptr; } heapBlock->actualPageCount = actualPageCount; heapBlock->guardPageAddress = guardPageAddress; heapBlock->guardPageOldProtectFlags = guardPageOldProtectFlags; heapBlock->pageHeapMode = heapInfo->pageHeapMode; if (heapBlock->pageHeapMode == PageHeapMode::PageHeapModeBlockEnd) { // TODO: pad the address to close-most to the guard page to increase the chance to hit guard page when overflow // some Mark code need to be updated to support this // heapBlock->SetEndAllocAddress(address // + AutoSystemInfo::PageSize - (((AllocSizeMath::Add(sizeCat, sizeof(LargeObjectHeader)) - 1) % AutoSystemInfo::PageSize) / HeapInfo::ObjectGranularity + 1) * HeapInfo::ObjectGranularity); } #if DBG LargeAllocationVerboseTrace(recycler->GetRecyclerFlagsTable(), _u("Allocated new large heap block 0x%p for sizeCat 0x%x\n"), heapBlock, sizeCat); #endif #ifdef ENABLE_JS_ETW #if ENABLE_DEBUG_CONFIG_OPTIONS if (segment->GetPageCount() > recycler->GetRecyclerLargeBlockPageAllocator()->GetMaxAllocPageCount()) { EventWriteJSCRIPT_INTERNAL_RECYCLER_EXTRALARGE_OBJECT_ALLOC(size); } #endif #endif #if ENABLE_PARTIAL_GC recycler->autoHeap.uncollectedNewPageCount += pageCount; #endif RECYCLER_SLOW_CHECK(this->heapInfo->heapBlockCount[HeapBlock::HeapBlockType::LargeBlockType]++); heapBlock->heapInfo = this->heapInfo; Assert(recycler->collectionState != CollectionStateMark); if (!recycler->heapBlockMap.SetHeapBlock(address, pageCount, heapBlock, HeapBlock::HeapBlockType::LargeBlockType, 0)) { recycler->GetRecyclerLargeBlockPageAllocator()->SuspendIdleDecommit(); heapBlock->ReleasePages<true>(recycler); recycler->GetRecyclerLargeBlockPageAllocator()->ResumeIdleDecommit(); LargeHeapBlock::Delete(heapBlock); RECYCLER_SLOW_CHECK(this->heapInfo->heapBlockCount[HeapBlock::HeapBlockType::LargeBlockType]--); return nullptr; } heapBlock->ResetMarks(ResetMarkFlags_None, recycler); if (this->largePageHeapBlockList) { HeapBlockList::Tail(this->largePageHeapBlockList)->SetNextBlock(heapBlock); } else { this->largePageHeapBlockList = heapBlock; } RECYCLER_PERF_COUNTER_ADD(FreeObjectSize, heapBlock->GetPageCount() * AutoSystemInfo::PageSize); char * memBlock = heapBlock->Alloc(sizeCat, attributes); Assert(memBlock != nullptr); if (recycler->ShouldCapturePageHeapAllocStack()) { heapBlock->CapturePageHeapAllocStack(); } return memBlock; }