bool
SmallNormalHeapBucketBase<TBlockType>::RescanObjectsOnPage(TBlockType * block, char* pageAddress, char * blockStartAddress, BVStatic<TBlockAttributes::BitVectorCount> * heapBlockMarkBits, const uint localObjectSize, uint bucketIndex, __out_opt bool* anyObjectRescanned, Recycler * recycler)
{
    RECYCLER_STATS_ADD(recycler, markData.rescanPageCount, TBlockAttributes::PageCount);

    // By the time we get here, we should have ensured that there's a mark on any page somewhere.
    // REVIEW: Worth check on just the page's mark bits?
    Assert(!heapBlockMarkBits->IsAllClear());

    if (anyObjectRescanned != nullptr)
    {
        *anyObjectRescanned = false;
    }

    Assert((char*)pageAddress - blockStartAddress < TBlockAttributes::PageCount * AutoSystemInfo::PageSize);
    const uint pageByteOffset = static_cast<uint>((char*)pageAddress - blockStartAddress);
    uint firstObjectOnPageIndex = pageByteOffset / localObjectSize;

    // This is not necessarily the address on the first object that starts on the page
    // If the last object on the previous page spans two pages, this is the address of that object
    // We do it this way so that we can figure out if we need to rescan the first few bytes of the page
    // if the actual first object on this page is not located at the start of the page
    char* const startObjectAddress = blockStartAddress + (firstObjectOnPageIndex * localObjectSize);
    const uint startBitIndex = TBlockType::GetAddressBitIndex(startObjectAddress);
    const uint pageStartBitIndex = pageByteOffset >> HeapConstants::ObjectAllocationShift;

    Assert(pageByteOffset / AutoSystemInfo::PageSize < USHRT_MAX);
    const ushort pageNumber = static_cast<const ushort>(pageByteOffset / AutoSystemInfo::PageSize);
    const typename TBlockType::BlockInfo& blockInfoForPage = HeapInfo::GetBlockInfo<TBlockAttributes>(localObjectSize)[pageNumber];

    bool lastObjectOnPreviousPageMarked = false;
    // Calculate the mark count here since we no longer keep track during marking
    uint rescanMarkCount = TBlockType::CalculateMarkCountForPage(heapBlockMarkBits, bucketIndex, pageStartBitIndex);
    const uint pageObjectCount = blockInfoForPage.pageObjectCount;
    const uint localObjectCount = (TBlockAttributes::PageCount * AutoSystemInfo::PageSize) / localObjectSize;

    // With protected unallocatable ending pages and reset writewatch, we should never be scanning on these pages.
    if (firstObjectOnPageIndex >= localObjectCount)
    {
        ReportFatalException(NULL, E_FAIL, Fatal_Recycler_MemoryCorruption, 3);
    }

    // If all objects are marked, rescan whole block at once
    if (TBlockType::CanRescanFullBlock() && rescanMarkCount == pageObjectCount)
    {
        // REVIEW: Can we optimize this more?
        if (!recycler->AddMark(pageAddress, AutoSystemInfo::PageSize))
        {
            // Failed to add to the mark stack due to OOM.
            return false;
        }

        RECYCLER_STATS_ADD(recycler, markData.rescanObjectCount, pageObjectCount);
        RECYCLER_STATS_ADD(recycler, markData.rescanObjectByteCount, localObjectSize * pageObjectCount);
        if (anyObjectRescanned != nullptr)
        {
            *anyObjectRescanned = true;
        }

        return true;
    }

    if (startObjectAddress != pageAddress)
    {
        // If the last object on the previous page that spans into the current page is marked,
        // we need to count that in the markCount for rescan
        Assert(startObjectAddress >= blockStartAddress && startObjectAddress < pageAddress);
        lastObjectOnPreviousPageMarked = (heapBlockMarkBits->Test(startBitIndex) == TRUE);
        if (lastObjectOnPreviousPageMarked)
        {
            rescanMarkCount++;
        }
    }

    const uint objectBitDelta = SmallHeapBlockT<TBlockAttributes>::GetObjectBitDeltaForBucketIndex(bucketIndex);

    uint rescanCount = 0;
    uint objectIndex = firstObjectOnPageIndex;

    for (uint bitIndex = startBitIndex; rescanCount < rescanMarkCount; objectIndex++, bitIndex += objectBitDelta)
    {
        Assert(objectIndex < localObjectCount);
        Assert(!HeapInfo::GetInvalidBitVectorForBucket<TBlockAttributes>(bucketIndex)->Test(bitIndex));

        if (heapBlockMarkBits->Test(bitIndex))
        {
            char * objectAddress = blockStartAddress + objectIndex * localObjectSize;
            if (!TBlockType::RescanObject(block, objectAddress, localObjectSize, objectIndex, recycler))
            {
                // Failed to add to the mark stack due to OOM.
                return false;
            }

            rescanCount++;
        }
    }

    // Mark bits should not have changed during the Rescan
    if (startObjectAddress != pageAddress && lastObjectOnPreviousPageMarked)
    {
        Assert(rescanMarkCount == TBlockType::CalculateMarkCountForPage(heapBlockMarkBits, bucketIndex, pageStartBitIndex) + 1);
    }
    else
    {
        Assert(rescanMarkCount == TBlockType::CalculateMarkCountForPage(heapBlockMarkBits, bucketIndex, pageStartBitIndex));
    }

#if DBG
    // We stopped when we hit the rescanMarkCount.
    // Make sure no other objects were marked, otherwise our rescanMarkCount was wrong.
    for (uint i = objectIndex + 1; i < blockInfoForPage.lastObjectIndexOnPage; i++)
    {
        Assert(!heapBlockMarkBits->Test(i * objectBitDelta));
    }
#endif

    // Let the caller know if we rescanned anything on this page
    if (anyObjectRescanned != nullptr)
    {
        (*anyObjectRescanned) = (rescanCount > 0);
    }

    return true;
}
Пример #2
0
char*
LargeHeapBucket::PageHeapAlloc(Recycler * recycler, size_t sizeCat, size_t size, ObjectInfoBits attributes, PageHeapMode mode, bool nothrow)
{
    Segment * segment;
    size_t pageCount = LargeHeapBlock::GetPagesNeeded(size, false);
    if (pageCount == 0)
    {
        if (nothrow == false)
        {
            // overflow
            // Since nothrow is false here, it's okay to throw
            recycler->OutOfMemory();
        }

        return nullptr;
    }

    if(size<sizeof(void*))
    {
        attributes = (ObjectInfoBits)(attributes | LeafBit);
    }


    size_t actualPageCount = pageCount + 1; // 1 for guard page
    auto pageAllocator = recycler->GetRecyclerLargeBlockPageAllocator();
    char * baseAddress = pageAllocator->Alloc(&actualPageCount, &segment);
    if (baseAddress == nullptr)
    {
        return nullptr;
    }

    size_t guardPageCount = actualPageCount - pageCount; // pageAllocator can return more than asked pages

    char* address = nullptr;
    char* guardPageAddress = nullptr;

    if (heapInfo->pageHeapMode == PageHeapMode::PageHeapModeBlockStart)
    {
        address = baseAddress + AutoSystemInfo::PageSize * guardPageCount;
        guardPageAddress = baseAddress;
    }
    else if (heapInfo->pageHeapMode == PageHeapMode::PageHeapModeBlockEnd)
    {
        address = baseAddress;
        guardPageAddress = baseAddress + pageCount * AutoSystemInfo::PageSize;
    }
    else
    {
        AnalysisAssert(false);
    }



    LargeHeapBlock * heapBlock = LargeHeapBlock::New(address, pageCount, segment, 1, nullptr);
    if (!heapBlock)
    {
        pageAllocator->SuspendIdleDecommit();
        pageAllocator->Release(baseAddress, actualPageCount, segment);
        pageAllocator->ResumeIdleDecommit();
        return nullptr;
    }

    heapBlock->heapInfo = this->heapInfo;
    heapBlock->actualPageCount = actualPageCount;
    heapBlock->guardPageAddress = guardPageAddress;

    // fill pattern before set pageHeapMode, so background scan stack may verify the pattern
    size_t usedSpace = sizeof(LargeObjectHeader) + size;
    memset(address + usedSpace, 0xF0, pageCount * AutoSystemInfo::PageSize - usedSpace);
    heapBlock->pageHeapMode = heapInfo->pageHeapMode;

    if (!recycler->heapBlockMap.SetHeapBlock(address, pageCount, heapBlock, HeapBlock::HeapBlockType::LargeBlockType, 0))
    {
        pageAllocator->SuspendIdleDecommit();
        heapBlock->ReleasePages(recycler);
        pageAllocator->ResumeIdleDecommit();
        LargeHeapBlock::Delete(heapBlock);
        return nullptr;
    }

    heapBlock->ResetMarks(ResetMarkFlags_None, recycler);

    char * memBlock = heapBlock->Alloc(size, attributes);
    Assert(memBlock != nullptr);


#pragma prefast(suppress:6250, "This method decommits memory")
    if (::VirtualFree(guardPageAddress, AutoSystemInfo::PageSize * guardPageCount, MEM_DECOMMIT) == FALSE)
    {
        AssertMsg(false, "Unable to decommit guard page.");
        ReportFatalException(NULL, E_FAIL, Fatal_Internal_Error, 2);
        return nullptr;
    }

    if (this->largePageHeapBlockList)
    {
        HeapBlockList::Tail(this->largePageHeapBlockList)->SetNextBlock(heapBlock);
    }
    else
    {
        this->largePageHeapBlockList = heapBlock;
    }

#if ENABLE_PARTIAL_GC
    recycler->autoHeap.uncollectedNewPageCount += pageCount;
#endif

    RECYCLER_SLOW_CHECK(this->heapInfo->heapBlockCount[HeapBlock::HeapBlockType::LargeBlockType]++);
    RECYCLER_PERF_COUNTER_ADD(FreeObjectSize, heapBlock->GetPageCount() * AutoSystemInfo::PageSize);


    if (recycler->ShouldCapturePageHeapAllocStack())
    {
        heapBlock->CapturePageHeapAllocStack();
    }

    return memBlock;
}
Пример #3
0
_NOINLINE void DebugHeap_OOM_fatal_error()
{
    int scenario = 3;
    ReportFatalException(NULL, E_OUTOFMEMORY, Fatal_Debug_Heap_OUTOFMEMORY, scenario);
}
Пример #4
0
_NOINLINE void Version_Inconsistency_fatal_error()
{
    int scenario = 4;
    ReportFatalException(NULL, E_UNEXPECTED, Fatal_Version_Inconsistency, scenario);
}
Пример #5
0
_NOINLINE void FailedToBox_OOM_fatal_error(
    __in ULONG_PTR context)
{
    int scenario = 1;
    ReportFatalException(context, E_UNEXPECTED, Fatal_FailedToBox_OUTOFMEMORY, scenario);
}
Пример #6
0
_NOINLINE void X64WriteBarrier_OOM_fatal_error()
{
    int scenario = 3;
    ReportFatalException(NULL, E_OUTOFMEMORY, WriteBarrier_OUTOFMEMORY, scenario);
}
Пример #7
0
_NOINLINE void MarkStack_OOM_fatal_error()
{
    int scenario = 1;
    ReportFatalException(NULL, E_OUTOFMEMORY, MarkStack_OUTOFMEMORY, scenario);
};
Пример #8
0
_NOINLINE void Amd64StackWalkerOutOfContexts_fatal_error(
    __in ULONG_PTR context)
{
    int scenario = 1;
    ReportFatalException(context, E_UNEXPECTED, Fatal_Amd64StackWalkerOutOfContexts, scenario);
}
Пример #9
0
_NOINLINE void JavascriptDispatch_OOM_fatal_error(
    __in ULONG_PTR context)
{
    int scenario = 1;
    ReportFatalException(context, E_OUTOFMEMORY, JavascriptDispatch_OUTOFMEMORY, scenario);
};
Пример #10
0
_NOINLINE void CustomHeap_BadPageState_fatal_error(
    __in ULONG_PTR context)
{
    int scenario = 1;
    ReportFatalException(context, E_UNEXPECTED, CustomHeap_MEMORYCORRUPTION, scenario);
};
Пример #11
0
_NOINLINE void UnexpectedExceptionHandling_fatal_error(EXCEPTION_POINTERS * originalException)
{
    int scenario = 7;
    ReportFatalException(NULL, E_UNEXPECTED, Fatal_UnexpectedExceptionHandling, scenario);
}
Пример #12
0
_NOINLINE void EntryExitRecord_Corrupted_fatal_error()
{
    int scenario = 6;
    ReportFatalException(NULL, E_UNEXPECTED, Fatal_EntryExitRecordCorruption, scenario);
}
Пример #13
0
_NOINLINE void Debugger_AttachDetach_fatal_error(HRESULT hr)
{
    int scenario = 5;
    ReportFatalException(NULL, hr, Fatal_Debugger_AttachDetach_Failure, scenario);
}
Пример #14
0
_NOINLINE void FromDOM_NoScriptScope_fatal_error()
{
    int scenario = 5;
    ReportFatalException(NULL, E_UNEXPECTED, EnterScript_FromDOM_NoScriptScope, scenario);
}
Пример #15
0
_NOINLINE void LargeHeapBlock_Metadata_Corrupted(
    __in ULONG_PTR context, __in unsigned char calculatedChecksum)
{
    int scenario = calculatedChecksum; /* For debugging purpose if checksum mismatch happen*/
    ReportFatalException(context, E_UNEXPECTED, LargeHeapBlock_Metadata_Corrupt, scenario);
};