Пример #1
0
//=====================================================================================================
// Allocation
//=====================================================================================================
char *
LargeHeapBucket::TryAllocFromNewHeapBlock(Recycler * recycler, size_t sizeCat, size_t size, ObjectInfoBits attributes, bool nothrow)
{
    Assert((attributes & InternalObjectInfoBitMask) == attributes);

#ifdef RECYCLER_PAGE_HEAP
    if (IsPageHeapEnabled(attributes))
    {
        return this->PageHeapAlloc(recycler, sizeCat, size, attributes, this->heapInfo->pageHeapMode, true);
    }
#endif

    LargeHeapBlock * heapBlock = AddLargeHeapBlock(sizeCat, nothrow);
    if (heapBlock == nullptr)
    {
        return nullptr;
    }
    char * memBlock = heapBlock->Alloc(sizeCat, attributes);
    Assert(memBlock != nullptr);
    return memBlock;
}
Пример #2
0
char*
LargeHeapBucket::PageHeapAlloc(Recycler * recycler, size_t sizeCat, size_t size, ObjectInfoBits attributes, PageHeapMode mode, bool nothrow)
{
    Segment * segment;
    size_t pageCount = LargeHeapBlock::GetPagesNeeded(size, false);
    if (pageCount == 0)
    {
        if (nothrow == false)
        {
            // overflow
            // Since nothrow is false here, it's okay to throw
            recycler->OutOfMemory();
        }

        return nullptr;
    }

    if(size<sizeof(void*))
    {
        attributes = (ObjectInfoBits)(attributes | LeafBit);
    }


    size_t actualPageCount = pageCount + 1; // 1 for guard page
    auto pageAllocator = recycler->GetRecyclerLargeBlockPageAllocator();
    char * baseAddress = pageAllocator->Alloc(&actualPageCount, &segment);
    if (baseAddress == nullptr)
    {
        return nullptr;
    }

    size_t guardPageCount = actualPageCount - pageCount; // pageAllocator can return more than asked pages

    char* address = nullptr;
    char* guardPageAddress = nullptr;

    if (heapInfo->pageHeapMode == PageHeapMode::PageHeapModeBlockStart)
    {
        address = baseAddress + AutoSystemInfo::PageSize * guardPageCount;
        guardPageAddress = baseAddress;
    }
    else if (heapInfo->pageHeapMode == PageHeapMode::PageHeapModeBlockEnd)
    {
        address = baseAddress;
        guardPageAddress = baseAddress + pageCount * AutoSystemInfo::PageSize;
    }
    else
    {
        AnalysisAssert(false);
    }



    LargeHeapBlock * heapBlock = LargeHeapBlock::New(address, pageCount, segment, 1, nullptr);
    if (!heapBlock)
    {
        pageAllocator->SuspendIdleDecommit();
        pageAllocator->Release(baseAddress, actualPageCount, segment);
        pageAllocator->ResumeIdleDecommit();
        return nullptr;
    }

    heapBlock->heapInfo = this->heapInfo;
    heapBlock->actualPageCount = actualPageCount;
    heapBlock->guardPageAddress = guardPageAddress;

    // fill pattern before set pageHeapMode, so background scan stack may verify the pattern
    size_t usedSpace = sizeof(LargeObjectHeader) + size;
    memset(address + usedSpace, 0xF0, pageCount * AutoSystemInfo::PageSize - usedSpace);
    heapBlock->pageHeapMode = heapInfo->pageHeapMode;

    if (!recycler->heapBlockMap.SetHeapBlock(address, pageCount, heapBlock, HeapBlock::HeapBlockType::LargeBlockType, 0))
    {
        pageAllocator->SuspendIdleDecommit();
        heapBlock->ReleasePages(recycler);
        pageAllocator->ResumeIdleDecommit();
        LargeHeapBlock::Delete(heapBlock);
        return nullptr;
    }

    heapBlock->ResetMarks(ResetMarkFlags_None, recycler);

    char * memBlock = heapBlock->Alloc(size, attributes);
    Assert(memBlock != nullptr);


#pragma prefast(suppress:6250, "This method decommits memory")
    if (::VirtualFree(guardPageAddress, AutoSystemInfo::PageSize * guardPageCount, MEM_DECOMMIT) == FALSE)
    {
        AssertMsg(false, "Unable to decommit guard page.");
        ReportFatalException(NULL, E_FAIL, Fatal_Internal_Error, 2);
        return nullptr;
    }

    if (this->largePageHeapBlockList)
    {
        HeapBlockList::Tail(this->largePageHeapBlockList)->SetNextBlock(heapBlock);
    }
    else
    {
        this->largePageHeapBlockList = heapBlock;
    }

#if ENABLE_PARTIAL_GC
    recycler->autoHeap.uncollectedNewPageCount += pageCount;
#endif

    RECYCLER_SLOW_CHECK(this->heapInfo->heapBlockCount[HeapBlock::HeapBlockType::LargeBlockType]++);
    RECYCLER_PERF_COUNTER_ADD(FreeObjectSize, heapBlock->GetPageCount() * AutoSystemInfo::PageSize);


    if (recycler->ShouldCapturePageHeapAllocStack())
    {
        heapBlock->CapturePageHeapAllocStack();
    }

    return memBlock;
}
Пример #3
0
char*
LargeHeapBucket::PageHeapAlloc(Recycler * recycler, size_t size, ObjectInfoBits attributes, PageHeapMode mode, bool nothrow)
{
    size_t sizeCat = HeapInfo::GetAlignedSizeNoCheck(size);

    Segment * segment;
    size_t pageCount = LargeHeapBlock::GetPagesNeeded(size, this->supportFreeList);
    if (pageCount == 0)
    {
        if (nothrow == false)
        {
            // overflow
            // Since nothrow is false here, it's okay to throw
            recycler->OutOfMemory();
        }

        return nullptr;
    }

    size_t actualPageCount = pageCount + 1; // for page heap

    char * baseAddress = recycler->GetRecyclerLargeBlockPageAllocator()->Alloc(&actualPageCount, &segment);
    if (baseAddress == nullptr)
    {
        return nullptr;
    }

    char* address = nullptr;
    char* guardPageAddress = nullptr;
    DWORD guardPageOldProtectFlags = PAGE_NOACCESS;

    if (heapInfo->pageHeapMode == PageHeapMode::PageHeapModeBlockStart)
    {
        address = baseAddress + AutoSystemInfo::PageSize;
        guardPageAddress = baseAddress;
    }
    else if (heapInfo->pageHeapMode == PageHeapMode::PageHeapModeBlockEnd)
    {
        address = baseAddress;
        guardPageAddress = baseAddress + pageCount* AutoSystemInfo::PageSize;
    }
    else
    {
        AnalysisAssert(false);
    }

    if (::VirtualProtect(static_cast<LPVOID>(guardPageAddress), AutoSystemInfo::PageSize, PAGE_NOACCESS, &guardPageOldProtectFlags) == FALSE)
    {
        AssertMsg(false, "Unable to set permission for guard page.");
        return nullptr;
    }

#ifdef RECYCLER_ZERO_MEM_CHECK
    recycler->VerifyZeroFill(address, pageCount * AutoSystemInfo::PageSize);
#endif

    LargeHeapBlock * heapBlock = LargeHeapBlock::New(address, pageCount, segment, 1, nullptr);
    if (!heapBlock)
    {
        recycler->GetRecyclerLargeBlockPageAllocator()->SuspendIdleDecommit();
        recycler->GetRecyclerLargeBlockPageAllocator()->Release(address, actualPageCount, segment);
        recycler->GetRecyclerLargeBlockPageAllocator()->ResumeIdleDecommit();
        return nullptr;
    }
    heapBlock->actualPageCount = actualPageCount;
    heapBlock->guardPageAddress = guardPageAddress;
    heapBlock->guardPageOldProtectFlags = guardPageOldProtectFlags;
    heapBlock->pageHeapMode = heapInfo->pageHeapMode;

    if (heapBlock->pageHeapMode == PageHeapMode::PageHeapModeBlockEnd)
    {
        // TODO: pad the address to close-most to the guard page to increase the chance to hit guard page when overflow
        // some Mark code need to be updated to support this
        // heapBlock->SetEndAllocAddress(address
        //    + AutoSystemInfo::PageSize - (((AllocSizeMath::Add(sizeCat, sizeof(LargeObjectHeader)) - 1) % AutoSystemInfo::PageSize) / HeapInfo::ObjectGranularity + 1) * HeapInfo::ObjectGranularity);
    }

#if DBG
    LargeAllocationVerboseTrace(recycler->GetRecyclerFlagsTable(), _u("Allocated new large heap block 0x%p for sizeCat 0x%x\n"), heapBlock, sizeCat);
#endif

#ifdef ENABLE_JS_ETW
#if ENABLE_DEBUG_CONFIG_OPTIONS
    if (segment->GetPageCount() > recycler->GetRecyclerLargeBlockPageAllocator()->GetMaxAllocPageCount())
    {
        EventWriteJSCRIPT_INTERNAL_RECYCLER_EXTRALARGE_OBJECT_ALLOC(size);
    }
#endif
#endif

#if ENABLE_PARTIAL_GC
    recycler->autoHeap.uncollectedNewPageCount += pageCount;
#endif

    RECYCLER_SLOW_CHECK(this->heapInfo->heapBlockCount[HeapBlock::HeapBlockType::LargeBlockType]++);

    heapBlock->heapInfo = this->heapInfo;

    Assert(recycler->collectionState != CollectionStateMark);

    if (!recycler->heapBlockMap.SetHeapBlock(address, pageCount, heapBlock, HeapBlock::HeapBlockType::LargeBlockType, 0))
    {
        recycler->GetRecyclerLargeBlockPageAllocator()->SuspendIdleDecommit();
        heapBlock->ReleasePages<true>(recycler);
        recycler->GetRecyclerLargeBlockPageAllocator()->ResumeIdleDecommit();
        LargeHeapBlock::Delete(heapBlock);
        RECYCLER_SLOW_CHECK(this->heapInfo->heapBlockCount[HeapBlock::HeapBlockType::LargeBlockType]--);
        return nullptr;
    }

    heapBlock->ResetMarks(ResetMarkFlags_None, recycler);

    if (this->largePageHeapBlockList)
    {
        HeapBlockList::Tail(this->largePageHeapBlockList)->SetNextBlock(heapBlock);
    }
    else
    {
        this->largePageHeapBlockList = heapBlock;
    }

    RECYCLER_PERF_COUNTER_ADD(FreeObjectSize, heapBlock->GetPageCount() * AutoSystemInfo::PageSize);

    char * memBlock = heapBlock->Alloc(sizeCat, attributes);
    Assert(memBlock != nullptr);
    if (recycler->ShouldCapturePageHeapAllocStack())
    {
        heapBlock->CapturePageHeapAllocStack();
    }

    return memBlock;
}