LargeHeapBlock* LargeHeapBucket::AddLargeHeapBlock(size_t size, bool nothrow) { Recycler* recycler = this->heapInfo->recycler; Segment * segment; size_t pageCount = LargeHeapBlock::GetPagesNeeded(size, this->supportFreeList); if (pageCount == 0) { if (nothrow == false) { // overflow // Since nothrow is false here, it's okay to throw recycler->OutOfMemory(); } return nullptr; } char * address = nullptr; size_t realPageCount = pageCount; address = recycler->GetRecyclerLargeBlockPageAllocator()->Alloc(&realPageCount, &segment); pageCount = realPageCount; if (address == nullptr) { return nullptr; } #ifdef RECYCLER_ZERO_MEM_CHECK recycler->VerifyZeroFill(address, pageCount * AutoSystemInfo::PageSize); #endif uint objectCount = LargeHeapBlock::GetMaxLargeObjectCount(pageCount, size); LargeHeapBlock * heapBlock = LargeHeapBlock::New(address, pageCount, segment, objectCount, supportFreeList ? this : nullptr); #if DBG LargeAllocationVerboseTrace(recycler->GetRecyclerFlagsTable(), _u("Allocated new large heap block 0x%p for sizeCat 0x%x\n"), heapBlock, sizeCat); #endif #ifdef ENABLE_JS_ETW #if ENABLE_DEBUG_CONFIG_OPTIONS if (segment->GetPageCount() > recycler->GetRecyclerLargeBlockPageAllocator()->GetMaxAllocPageCount()) { EventWriteJSCRIPT_INTERNAL_RECYCLER_EXTRALARGE_OBJECT_ALLOC(size); } #endif #endif if (!heapBlock) { recycler->GetRecyclerLargeBlockPageAllocator()->SuspendIdleDecommit(); recycler->GetRecyclerLargeBlockPageAllocator()->Release(address, pageCount, segment); recycler->GetRecyclerLargeBlockPageAllocator()->ResumeIdleDecommit(); return nullptr; } #if ENABLE_PARTIAL_GC recycler->autoHeap.uncollectedNewPageCount += pageCount; #endif RECYCLER_SLOW_CHECK(this->heapInfo->heapBlockCount[HeapBlock::HeapBlockType::LargeBlockType]++); heapBlock->heapInfo = this->heapInfo; heapBlock->lastCollectAllocCount = 0; Assert(recycler->collectionState != CollectionStateMark); if (!recycler->heapBlockMap.SetHeapBlock(address, pageCount, heapBlock, HeapBlock::HeapBlockType::LargeBlockType, 0)) { recycler->GetRecyclerLargeBlockPageAllocator()->SuspendIdleDecommit(); heapBlock->ReleasePages(recycler); recycler->GetRecyclerLargeBlockPageAllocator()->ResumeIdleDecommit(); LargeHeapBlock::Delete(heapBlock); RECYCLER_SLOW_CHECK(this->heapInfo->heapBlockCount[HeapBlock::HeapBlockType::LargeBlockType]--); return nullptr; } heapBlock->SetNextBlock(this->largeBlockList); this->largeBlockList = heapBlock; RECYCLER_PERF_COUNTER_ADD(FreeObjectSize, heapBlock->GetPageCount() * AutoSystemInfo::PageSize); return heapBlock; }
char* LargeHeapBucket::PageHeapAlloc(Recycler * recycler, size_t sizeCat, size_t size, ObjectInfoBits attributes, PageHeapMode mode, bool nothrow) { Segment * segment; size_t pageCount = LargeHeapBlock::GetPagesNeeded(size, false); if (pageCount == 0) { if (nothrow == false) { // overflow // Since nothrow is false here, it's okay to throw recycler->OutOfMemory(); } return nullptr; } if(size<sizeof(void*)) { attributes = (ObjectInfoBits)(attributes | LeafBit); } size_t actualPageCount = pageCount + 1; // 1 for guard page auto pageAllocator = recycler->GetRecyclerLargeBlockPageAllocator(); char * baseAddress = pageAllocator->Alloc(&actualPageCount, &segment); if (baseAddress == nullptr) { return nullptr; } size_t guardPageCount = actualPageCount - pageCount; // pageAllocator can return more than asked pages char* address = nullptr; char* guardPageAddress = nullptr; if (heapInfo->pageHeapMode == PageHeapMode::PageHeapModeBlockStart) { address = baseAddress + AutoSystemInfo::PageSize * guardPageCount; guardPageAddress = baseAddress; } else if (heapInfo->pageHeapMode == PageHeapMode::PageHeapModeBlockEnd) { address = baseAddress; guardPageAddress = baseAddress + pageCount * AutoSystemInfo::PageSize; } else { AnalysisAssert(false); } LargeHeapBlock * heapBlock = LargeHeapBlock::New(address, pageCount, segment, 1, nullptr); if (!heapBlock) { pageAllocator->SuspendIdleDecommit(); pageAllocator->Release(baseAddress, actualPageCount, segment); pageAllocator->ResumeIdleDecommit(); return nullptr; } heapBlock->heapInfo = this->heapInfo; heapBlock->actualPageCount = actualPageCount; heapBlock->guardPageAddress = guardPageAddress; // fill pattern before set pageHeapMode, so background scan stack may verify the pattern size_t usedSpace = sizeof(LargeObjectHeader) + size; memset(address + usedSpace, 0xF0, pageCount * AutoSystemInfo::PageSize - usedSpace); heapBlock->pageHeapMode = heapInfo->pageHeapMode; if (!recycler->heapBlockMap.SetHeapBlock(address, pageCount, heapBlock, HeapBlock::HeapBlockType::LargeBlockType, 0)) { pageAllocator->SuspendIdleDecommit(); heapBlock->ReleasePages(recycler); pageAllocator->ResumeIdleDecommit(); LargeHeapBlock::Delete(heapBlock); return nullptr; } heapBlock->ResetMarks(ResetMarkFlags_None, recycler); char * memBlock = heapBlock->Alloc(size, attributes); Assert(memBlock != nullptr); #pragma prefast(suppress:6250, "This method decommits memory") if (::VirtualFree(guardPageAddress, AutoSystemInfo::PageSize * guardPageCount, MEM_DECOMMIT) == FALSE) { AssertMsg(false, "Unable to decommit guard page."); ReportFatalException(NULL, E_FAIL, Fatal_Internal_Error, 2); return nullptr; } if (this->largePageHeapBlockList) { HeapBlockList::Tail(this->largePageHeapBlockList)->SetNextBlock(heapBlock); } else { this->largePageHeapBlockList = heapBlock; } #if ENABLE_PARTIAL_GC recycler->autoHeap.uncollectedNewPageCount += pageCount; #endif RECYCLER_SLOW_CHECK(this->heapInfo->heapBlockCount[HeapBlock::HeapBlockType::LargeBlockType]++); RECYCLER_PERF_COUNTER_ADD(FreeObjectSize, heapBlock->GetPageCount() * AutoSystemInfo::PageSize); if (recycler->ShouldCapturePageHeapAllocStack()) { heapBlock->CapturePageHeapAllocStack(); } return memBlock; }
void SmallHeapBlockAllocator<TBlockType>::Clear() { TBlockType * heapBlock = this->heapBlock; if (heapBlock != nullptr) { Assert(heapBlock->isInAllocator); heapBlock->isInAllocator = false; FreeObject * remainingFreeObjectList = nullptr; if (this->endAddress != nullptr) { #ifdef RECYCLER_TRACK_NATIVE_ALLOCATED_OBJECTS TrackNativeAllocatedObjects(); lastNonNativeBumpAllocatedBlock = nullptr; #endif #ifdef PROFILE_RECYCLER_ALLOC // Need to tell the tracker this->bucket->heapInfo->recycler->TrackUnallocated((char *)this->freeObjectList, this->endAddress, this->bucket->sizeCat); #endif RecyclerMemoryTracking::ReportUnallocated(this->heapBlock->heapBucket->heapInfo->recycler, (char *)this->freeObjectList, this->endAddress, heapBlock->heapBucket->sizeCat); #ifdef RECYCLER_PERF_COUNTERS size_t unallocatedObjects = heapBlock->objectCount - ((char *)this->freeObjectList - heapBlock->address) / heapBlock->objectSize; size_t unallocatedObjectBytes = unallocatedObjects * heapBlock->GetObjectSize(); RECYCLER_PERF_COUNTER_ADD(LiveObject, unallocatedObjects); RECYCLER_PERF_COUNTER_ADD(LiveObjectSize, unallocatedObjectBytes); RECYCLER_PERF_COUNTER_SUB(FreeObjectSize, unallocatedObjectBytes); RECYCLER_PERF_COUNTER_ADD(SmallHeapBlockLiveObject, unallocatedObjects); RECYCLER_PERF_COUNTER_ADD(SmallHeapBlockLiveObjectSize, unallocatedObjectBytes); RECYCLER_PERF_COUNTER_SUB(SmallHeapBlockFreeObjectSize, unallocatedObjectBytes); #endif Assert(heapBlock->freeObjectList == nullptr); this->endAddress = nullptr; } else { remainingFreeObjectList = this->freeObjectList; heapBlock->freeObjectList = remainingFreeObjectList; } this->freeObjectList = nullptr; // this->freeObjectList and this->lastFreeCount are accessed in SmallHeapBlock::ResetMarks // the order of access there is first we see if lastFreeCount = 0, and if it is, we assert // that freeObjectList = null. Because of ARM's memory model, we need to insert barriers // so that the two variables can be accessed correctly across threads. Here, after we write // to this->freeObjectList, we insert a write barrier so that if this->lastFreeCount is 0, // this->freeObjectList must have been set to null. On the other end, we stick a read barrier // We use the MemoryBarrier macro because of ARMs lack of a separate read barrier #if defined(_M_ARM32_OR_ARM64) #if DBG MemoryBarrier(); #endif #endif if (remainingFreeObjectList == nullptr) { uint lastFreeCount = heapBlock->GetAndClearLastFreeCount(); heapBlock->heapBucket->heapInfo->uncollectedAllocBytes += lastFreeCount * heapBlock->GetObjectSize(); Assert(heapBlock->lastUncollectedAllocBytes == 0); DebugOnly(heapBlock->lastUncollectedAllocBytes = lastFreeCount * heapBlock->GetObjectSize()); } else { DebugOnly(heapBlock->SetIsClearedFromAllocator(true)); } this->heapBlock = nullptr; RECYCLER_SLOW_CHECK(heapBlock->CheckDebugFreeBitVector(false)); } else if (this->freeObjectList != nullptr) { // Explicit Free Object List #ifdef RECYCLER_MEMORY_VERIFY FreeObject* freeObject = this->freeObjectList; while (freeObject) { HeapBlock* heapBlock = this->bucket->GetRecycler()->FindHeapBlock((void*) freeObject); Assert(heapBlock != nullptr); Assert(!heapBlock->IsLargeHeapBlock()); TBlockType* smallBlock = (TBlockType*)heapBlock; smallBlock->ClearExplicitFreeBitForObject((void*) freeObject); freeObject = freeObject->GetNext(); } #endif this->freeObjectList = nullptr; } }
char* LargeHeapBucket::PageHeapAlloc(Recycler * recycler, size_t size, ObjectInfoBits attributes, PageHeapMode mode, bool nothrow) { size_t sizeCat = HeapInfo::GetAlignedSizeNoCheck(size); Segment * segment; size_t pageCount = LargeHeapBlock::GetPagesNeeded(size, this->supportFreeList); if (pageCount == 0) { if (nothrow == false) { // overflow // Since nothrow is false here, it's okay to throw recycler->OutOfMemory(); } return nullptr; } size_t actualPageCount = pageCount + 1; // for page heap char * baseAddress = recycler->GetRecyclerLargeBlockPageAllocator()->Alloc(&actualPageCount, &segment); if (baseAddress == nullptr) { return nullptr; } char* address = nullptr; char* guardPageAddress = nullptr; DWORD guardPageOldProtectFlags = PAGE_NOACCESS; if (heapInfo->pageHeapMode == PageHeapMode::PageHeapModeBlockStart) { address = baseAddress + AutoSystemInfo::PageSize; guardPageAddress = baseAddress; } else if (heapInfo->pageHeapMode == PageHeapMode::PageHeapModeBlockEnd) { address = baseAddress; guardPageAddress = baseAddress + pageCount* AutoSystemInfo::PageSize; } else { AnalysisAssert(false); } if (::VirtualProtect(static_cast<LPVOID>(guardPageAddress), AutoSystemInfo::PageSize, PAGE_NOACCESS, &guardPageOldProtectFlags) == FALSE) { AssertMsg(false, "Unable to set permission for guard page."); return nullptr; } #ifdef RECYCLER_ZERO_MEM_CHECK recycler->VerifyZeroFill(address, pageCount * AutoSystemInfo::PageSize); #endif LargeHeapBlock * heapBlock = LargeHeapBlock::New(address, pageCount, segment, 1, nullptr); if (!heapBlock) { recycler->GetRecyclerLargeBlockPageAllocator()->SuspendIdleDecommit(); recycler->GetRecyclerLargeBlockPageAllocator()->Release(address, actualPageCount, segment); recycler->GetRecyclerLargeBlockPageAllocator()->ResumeIdleDecommit(); return nullptr; } heapBlock->actualPageCount = actualPageCount; heapBlock->guardPageAddress = guardPageAddress; heapBlock->guardPageOldProtectFlags = guardPageOldProtectFlags; heapBlock->pageHeapMode = heapInfo->pageHeapMode; if (heapBlock->pageHeapMode == PageHeapMode::PageHeapModeBlockEnd) { // TODO: pad the address to close-most to the guard page to increase the chance to hit guard page when overflow // some Mark code need to be updated to support this // heapBlock->SetEndAllocAddress(address // + AutoSystemInfo::PageSize - (((AllocSizeMath::Add(sizeCat, sizeof(LargeObjectHeader)) - 1) % AutoSystemInfo::PageSize) / HeapInfo::ObjectGranularity + 1) * HeapInfo::ObjectGranularity); } #if DBG LargeAllocationVerboseTrace(recycler->GetRecyclerFlagsTable(), _u("Allocated new large heap block 0x%p for sizeCat 0x%x\n"), heapBlock, sizeCat); #endif #ifdef ENABLE_JS_ETW #if ENABLE_DEBUG_CONFIG_OPTIONS if (segment->GetPageCount() > recycler->GetRecyclerLargeBlockPageAllocator()->GetMaxAllocPageCount()) { EventWriteJSCRIPT_INTERNAL_RECYCLER_EXTRALARGE_OBJECT_ALLOC(size); } #endif #endif #if ENABLE_PARTIAL_GC recycler->autoHeap.uncollectedNewPageCount += pageCount; #endif RECYCLER_SLOW_CHECK(this->heapInfo->heapBlockCount[HeapBlock::HeapBlockType::LargeBlockType]++); heapBlock->heapInfo = this->heapInfo; Assert(recycler->collectionState != CollectionStateMark); if (!recycler->heapBlockMap.SetHeapBlock(address, pageCount, heapBlock, HeapBlock::HeapBlockType::LargeBlockType, 0)) { recycler->GetRecyclerLargeBlockPageAllocator()->SuspendIdleDecommit(); heapBlock->ReleasePages<true>(recycler); recycler->GetRecyclerLargeBlockPageAllocator()->ResumeIdleDecommit(); LargeHeapBlock::Delete(heapBlock); RECYCLER_SLOW_CHECK(this->heapInfo->heapBlockCount[HeapBlock::HeapBlockType::LargeBlockType]--); return nullptr; } heapBlock->ResetMarks(ResetMarkFlags_None, recycler); if (this->largePageHeapBlockList) { HeapBlockList::Tail(this->largePageHeapBlockList)->SetNextBlock(heapBlock); } else { this->largePageHeapBlockList = heapBlock; } RECYCLER_PERF_COUNTER_ADD(FreeObjectSize, heapBlock->GetPageCount() * AutoSystemInfo::PageSize); char * memBlock = heapBlock->Alloc(sizeCat, attributes); Assert(memBlock != nullptr); if (recycler->ShouldCapturePageHeapAllocStack()) { heapBlock->CapturePageHeapAllocStack(); } return memBlock; }
void SmallHeapBlockAllocator<TBlockType>::TrackNativeAllocatedObjects() { Assert(this->freeObjectList != nullptr && endAddress != nullptr); Assert(this->heapBlock != nullptr); #if defined(PROFILE_RECYCLER_ALLOC) || defined(RECYCLER_MEMORY_VERIFY) || defined(MEMSPECT_TRACKING) || defined(ETW_MEMORY_TRACKING) if (pfnTrackNativeAllocatedObjectCallBack == nullptr) { return; } if (lastNonNativeBumpAllocatedBlock == nullptr) { #ifdef RECYCLER_PAGE_HEAP Assert((char *)this->freeObjectList == this->heapBlock->GetAddress() || ((SmallHeapBlock*) this->heapBlock)->InPageHeapMode()); #else Assert((char *)this->freeObjectList == this->heapBlock->GetAddress()); #endif return; } Recycler * recycler = this->heapBlock->heapBucket->heapInfo->recycler; size_t sizeCat = this->heapBlock->heapBucket->sizeCat; char * curr = lastNonNativeBumpAllocatedBlock + sizeCat; Assert(curr <= (char *)this->freeObjectList); #if DBG_DUMP AllocationVerboseTrace(recycler->GetRecyclerFlagsTable(), _u("TrackNativeAllocatedObjects: recycler = 0x%p, sizeCat = %u, lastRuntimeAllocatedBlock = 0x%p, freeObjectList = 0x%p, nativeAllocatedObjectCount = %u\n"), recycler, sizeCat, this->lastNonNativeBumpAllocatedBlock, this->freeObjectList, ((char *)this->freeObjectList - curr) / sizeCat); #endif while (curr < (char *)this->freeObjectList) { pfnTrackNativeAllocatedObjectCallBack(recycler, curr, sizeCat); curr += sizeCat; } #elif defined(RECYCLER_PERF_COUNTERS) if (lastNonNativeBumpAllocatedBlock == nullptr) { return; } size_t sizeCat = this->heapBlock->heapBucket->sizeCat; char * curr = lastNonNativeBumpAllocatedBlock + sizeCat; Assert(curr <= (char *)this->freeObjectList); size_t byteCount = ((char *)this->freeObjectList - curr); #if DBG_DUMP AllocationVerboseTrace(_u("TrackNativeAllocatedObjects: recycler = 0x%p, sizeCat = %u, lastRuntimeAllocatedBlock = 0x%p, freeObjectList = 0x%p, nativeAllocatedObjectCount = %u\n"), recycler, sizeCat, this->lastNonNativeBumpAllocatedBlock, this->freeObjectList, ((char *)this->freeObjectList - curr) / sizeCat); #endif RECYCLER_PERF_COUNTER_ADD(LiveObject, byteCount / sizeCat); RECYCLER_PERF_COUNTER_ADD(LiveObjectSize, byteCount); RECYCLER_PERF_COUNTER_SUB(FreeObjectSize, byteCount); RECYCLER_PERF_COUNTER_ADD(SmallHeapBlockLiveObject, byteCount / sizeCat); RECYCLER_PERF_COUNTER_ADD(SmallHeapBlockLiveObjectSize, byteCount); RECYCLER_PERF_COUNTER_SUB(SmallHeapBlockFreeObjectSize, byteCount); #else #error Not implemented #endif }