bool
RecyclerPageAllocator::ResetWriteWatch()
{
    if (allocFlags != MEM_WRITE_WATCH)
    {
        return false;
    }

    GCETW(GC_RESETWRITEWATCH_START, (this));

    SuspendIdleDecommit();

    bool success = true;
    // Only reset write watch on allocated pages
    if (!ResetWriteWatch(&segments) ||
        !ResetWriteWatch(&decommitSegments) ||
        !ResetAllWriteWatch(&fullSegments) ||
        !ResetAllWriteWatch(&largeSegments))
    {
        allocFlags = 0;
        success = false;
    }

    ResumeIdleDecommit();

    GCETW(GC_RESETWRITEWATCH_STOP, (this));

    return success;
}
void
IdleDecommitPageAllocator::DecommitNow(bool all)
{
    SuspendIdleDecommit();

    // If we are in non-idle-decommit mode, then always decommit all.
    // Otherwise, we will end up with some un-decommitted pages and get confused later.
    if (maxFreePageCount == maxNonIdleDecommitFreePageCount)
        all = true;

    __super::DecommitNow(all);

    if (all)
    {
        if (this->hasDecommitTimer)
        {
            Assert(idleDecommitEnterCount == 0);
            Assert(this->maxFreePageCount == maxIdleDecommitFreePageCount);
            this->hasDecommitTimer = false;
            this->maxFreePageCount = maxNonIdleDecommitFreePageCount;
        }
        else
        {
            Assert((idleDecommitEnterCount > 0? maxIdleDecommitFreePageCount : maxNonIdleDecommitFreePageCount)
                == this->maxFreePageCount);
        }
        ClearMinFreePageCount();
    }
    else
    {
        ResetMinFreePageCount();
    }

    ResumeIdleDecommit();
}
size_t
RecyclerPageAllocator::GetWriteWatchPageCount()
{
    if (allocFlags != MEM_WRITE_WATCH)
    {
        return 0;
    }

    SuspendIdleDecommit();

    // Only reset write watch on allocated pages
    size_t count = GetWriteWatchPageCount(&segments)
        + GetWriteWatchPageCount(&decommitSegments)
        + GetAllWriteWatchPageCount(&fullSegments)
        + GetAllWriteWatchPageCount(&largeSegments);

    ResumeIdleDecommit();

    return count;
}
Exemplo n.º 4
0
char*
LargeHeapBucket::PageHeapAlloc(Recycler * recycler, size_t sizeCat, size_t size, ObjectInfoBits attributes, PageHeapMode mode, bool nothrow)
{
    Segment * segment;
    size_t pageCount = LargeHeapBlock::GetPagesNeeded(size, false);
    if (pageCount == 0)
    {
        if (nothrow == false)
        {
            // overflow
            // Since nothrow is false here, it's okay to throw
            recycler->OutOfMemory();
        }

        return nullptr;
    }

    if(size<sizeof(void*))
    {
        attributes = (ObjectInfoBits)(attributes | LeafBit);
    }


    size_t actualPageCount = pageCount + 1; // 1 for guard page
    auto pageAllocator = recycler->GetRecyclerLargeBlockPageAllocator();
    char * baseAddress = pageAllocator->Alloc(&actualPageCount, &segment);
    if (baseAddress == nullptr)
    {
        return nullptr;
    }

    size_t guardPageCount = actualPageCount - pageCount; // pageAllocator can return more than asked pages

    char* address = nullptr;
    char* guardPageAddress = nullptr;

    if (heapInfo->pageHeapMode == PageHeapMode::PageHeapModeBlockStart)
    {
        address = baseAddress + AutoSystemInfo::PageSize * guardPageCount;
        guardPageAddress = baseAddress;
    }
    else if (heapInfo->pageHeapMode == PageHeapMode::PageHeapModeBlockEnd)
    {
        address = baseAddress;
        guardPageAddress = baseAddress + pageCount * AutoSystemInfo::PageSize;
    }
    else
    {
        AnalysisAssert(false);
    }



    LargeHeapBlock * heapBlock = LargeHeapBlock::New(address, pageCount, segment, 1, nullptr);
    if (!heapBlock)
    {
        pageAllocator->SuspendIdleDecommit();
        pageAllocator->Release(baseAddress, actualPageCount, segment);
        pageAllocator->ResumeIdleDecommit();
        return nullptr;
    }

    heapBlock->heapInfo = this->heapInfo;
    heapBlock->actualPageCount = actualPageCount;
    heapBlock->guardPageAddress = guardPageAddress;

    // fill pattern before set pageHeapMode, so background scan stack may verify the pattern
    size_t usedSpace = sizeof(LargeObjectHeader) + size;
    memset(address + usedSpace, 0xF0, pageCount * AutoSystemInfo::PageSize - usedSpace);
    heapBlock->pageHeapMode = heapInfo->pageHeapMode;

    if (!recycler->heapBlockMap.SetHeapBlock(address, pageCount, heapBlock, HeapBlock::HeapBlockType::LargeBlockType, 0))
    {
        pageAllocator->SuspendIdleDecommit();
        heapBlock->ReleasePages(recycler);
        pageAllocator->ResumeIdleDecommit();
        LargeHeapBlock::Delete(heapBlock);
        return nullptr;
    }

    heapBlock->ResetMarks(ResetMarkFlags_None, recycler);

    char * memBlock = heapBlock->Alloc(size, attributes);
    Assert(memBlock != nullptr);


#pragma prefast(suppress:6250, "This method decommits memory")
    if (::VirtualFree(guardPageAddress, AutoSystemInfo::PageSize * guardPageCount, MEM_DECOMMIT) == FALSE)
    {
        AssertMsg(false, "Unable to decommit guard page.");
        ReportFatalException(NULL, E_FAIL, Fatal_Internal_Error, 2);
        return nullptr;
    }

    if (this->largePageHeapBlockList)
    {
        HeapBlockList::Tail(this->largePageHeapBlockList)->SetNextBlock(heapBlock);
    }
    else
    {
        this->largePageHeapBlockList = heapBlock;
    }

#if ENABLE_PARTIAL_GC
    recycler->autoHeap.uncollectedNewPageCount += pageCount;
#endif

    RECYCLER_SLOW_CHECK(this->heapInfo->heapBlockCount[HeapBlock::HeapBlockType::LargeBlockType]++);
    RECYCLER_PERF_COUNTER_ADD(FreeObjectSize, heapBlock->GetPageCount() * AutoSystemInfo::PageSize);


    if (recycler->ShouldCapturePageHeapAllocStack())
    {
        heapBlock->CapturePageHeapAllocStack();
    }

    return memBlock;
}