コード例 #1
0
void
SmallFinalizableHeapBucketBaseT<TBlockType>::TransferDisposedObjects()
{
    Assert(!this->IsAllocationStopped());
    TBlockType * currentPendingDisposeList = this->pendingDisposeList;
    if (currentPendingDisposeList != nullptr)
    {
        this->pendingDisposeList = nullptr;

        HeapBlockList::ForEach(currentPendingDisposeList, [=](TBlockType * heapBlock)
        {
            heapBlock->TransferDisposedObjects();

            // in pageheap, we actually always have free object
            Assert(heapBlock->template HasFreeObject<false>());
        });

        // For partial collect, dispose will modify the object, and we
        // also touch the page by chaining the object through the free list
        // might as well reuse the block for partial collect
        this->AppendAllocableHeapBlockList(currentPendingDisposeList);
    }

    RECYCLER_SLOW_CHECK(this->VerifyHeapBlockCount(false));
}
コード例 #2
0
void
SmallNormalHeapBucketBase<TBlockType>::SweepPartialReusePages(RecyclerSweep& recyclerSweep)
{
    RECYCLER_SLOW_CHECK(this->VerifyHeapBlockCount(recyclerSweep.IsBackground()));
    Assert(this->GetRecycler()->inPartialCollectMode);

    TBlockType * currentHeapBlockList = this->heapBlockList;
    this->heapBlockList = nullptr;
    SmallNormalHeapBucketBase<TBlockType>::SweepPartialReusePages(recyclerSweep, currentHeapBlockList, this->heapBlockList,
        this->partialHeapBlockList,
        [](TBlockType * heapBlock, bool isReused) {});

#if ENABLE_CONCURRENT_GC
    // only collect data for pending sweep list but don't sweep yet
    // until we have adjusted the heuristics, and SweepPartialReusePages will
    // sweep the page that we are going to reuse in thread.
    TBlockType *& pendingSweepList = recyclerSweep.GetPendingSweepBlockList(this);
    currentHeapBlockList = pendingSweepList;
    pendingSweepList = nullptr;
    Recycler * recycler = recyclerSweep.GetRecycler();
    SmallNormalHeapBucketBase<TBlockType>::SweepPartialReusePages(recyclerSweep, currentHeapBlockList, this->heapBlockList,
        pendingSweepList,
        [recycler](TBlockType * heapBlock, bool isReused)
        {
            if (isReused)
            {
                // Finalizable blocks are always swept in thread, so shouldn't be here
                Assert(!heapBlock->IsAnyFinalizableBlock());

                // Page heap blocks are never swept concurrently
                heapBlock->template SweepObjects<SweepMode_InThread>(recycler);

                // This block has been counted as concurrently swept, and now we changed our mind
                // and sweep it in thread. Remove the count
                RECYCLER_STATS_DEC(recycler, heapBlockConcurrentSweptCount[heapBlock->GetHeapBlockType()]);
            }
        }
    );
#endif

    RECYCLER_SLOW_CHECK(this->VerifyHeapBlockCount(recyclerSweep.IsBackground()));

    this->StartAllocationAfterSweep();

    // PARTIALGC-TODO: revisit partial heap blocks to see if they can be put back into use
    // since the heuristics limit may be been changed.
}
コード例 #3
0
size_t
SmallFinalizableHeapBucketBaseT<TBlockType>::GetNonEmptyHeapBlockCount(bool checkCount) const
{
    size_t currentHeapBlockCount =  __super::GetNonEmptyHeapBlockCount(false)
        + HeapBlockList::Count(pendingDisposeList)
        + HeapBlockList::Count(tempPendingDisposeList);
    RECYCLER_SLOW_CHECK(Assert(!checkCount || this->heapBlockCount == currentHeapBlockCount));
    return currentHeapBlockCount;
}
コード例 #4
0
void
LargeHeapBucket::SweepLargeHeapBlockList(RecyclerSweep& recyclerSweep, LargeHeapBlock * heapBlockList)
{
    Recycler * recycler = recyclerSweep.GetRecycler();
    HeapBlockList::ForEachEditing(heapBlockList, [this, &recyclerSweep, recycler](LargeHeapBlock * heapBlock)
    {
        this->UnregisterFreeList(heapBlock->GetFreeList());

        // CONCURRENT-TODO: Allow large block to be sweep in the background
        SweepState state = heapBlock->Sweep(recyclerSweep, false);

        // If the block is already in the pending dispose list (re-entrant GC scenario), do nothing, leave it there
        if (heapBlock->IsInPendingDisposeList()) return;

        switch (state)
        {
        case SweepStateEmpty:
            heapBlock->ReleasePagesSweep(recycler);
            LargeHeapBlock::Delete(heapBlock);
            RECYCLER_SLOW_CHECK(this->heapInfo->heapBlockCount[HeapBlock::HeapBlockType::LargeBlockType]--);
            break;
        case SweepStateFull:
            heapBlock->SetNextBlock(this->fullLargeBlockList);
            this->fullLargeBlockList = heapBlock;
            break;
        case SweepStateSwept:
            if (supportFreeList)
            {
                ConstructFreelist(heapBlock);
            }
            else
            {
                ReinsertLargeHeapBlock(heapBlock);
            }

            break;
        case SweepStatePendingDispose:
            Assert(!recyclerSweep.IsBackground());
            Assert(!recycler->hasPendingTransferDisposedObjects);
            heapBlock->SetNextBlock(this->pendingDisposeLargeBlockList);
            this->pendingDisposeLargeBlockList = heapBlock;
            heapBlock->SetIsInPendingDisposeList(true);
#if DBG
            heapBlock->SetHasDisposeBeenCalled(false);
#endif
            recycler->hasDisposableObject = true;
            break;
#if ENABLE_CONCURRENT_GC
        case SweepStatePendingSweep:
            heapBlock->SetNextBlock(this->pendingSweepLargeBlockList);
            this->pendingSweepLargeBlockList = heapBlock;
            break;
#endif
        }
    });
}
コード例 #5
0
void
LargeHeapBucket::ConcurrentPartialTransferSweptObjects(RecyclerSweep& recyclerSweep)
{
    Assert(recyclerSweep.InPartialCollectMode());
    Assert(!recyclerSweep.IsBackground());

    RECYCLER_SLOW_CHECK(this->VerifyLargeHeapBlockCount());

    LargeHeapBlock * list = this->pendingSweepLargeBlockList;
    this->pendingSweepLargeBlockList = nullptr;
    HeapBlockList::ForEachEditing(list, [this](LargeHeapBlock * heapBlock)
    {
        // GC-REVIEW: We could maybe reuse the large objects
        heapBlock->PartialTransferSweptObjects();
        heapBlock->SetNextBlock(this->partialSweptLargeBlockList);
        this->partialSweptLargeBlockList = heapBlock;
    });

    RECYCLER_SLOW_CHECK(this->VerifyLargeHeapBlockCount());
}
コード例 #6
0
size_t
SmallNormalHeapBucketBase<TBlockType>::GetNonEmptyHeapBlockCount(bool checkCount) const
{
    size_t currentHeapBlockCount = __super::GetNonEmptyHeapBlockCount(false);
    currentHeapBlockCount += HeapBlockList::Count(partialHeapBlockList);
#if ENABLE_CONCURRENT_GC
    currentHeapBlockCount += HeapBlockList::Count(partialSweptHeapBlockList);
#endif
    RECYCLER_SLOW_CHECK(Assert(!checkCount || heapBlockCount == currentHeapBlockCount));
    return currentHeapBlockCount;
}
コード例 #7
0
//=====================================================================================================
// Initialization
//=====================================================================================================
LargeHeapBucket::~LargeHeapBucket()
{
    Recycler* recycler = this->heapInfo->recycler;
    HeapInfo* autoHeap = this->heapInfo;

    ForEachEditingLargeHeapBlock([recycler, autoHeap](LargeHeapBlock * heapBlock)
    {
        heapBlock->ReleasePagesShutdown(recycler);
        LargeHeapBlock::Delete(heapBlock);
        RECYCLER_SLOW_CHECK(autoHeap->heapBlockCount[HeapBlock::HeapBlockType::LargeBlockType]--);
    });
}
コード例 #8
0
void
SmallHeapBlockAllocator<TBlockType>::Set(BlockType * heapBlock)
{
    Assert(this->endAddress == nullptr);
    Assert(this->heapBlock == nullptr);
    Assert(this->freeObjectList == nullptr);

    Assert(heapBlock != nullptr);
    Assert(heapBlock->freeObjectList != nullptr);
    Assert(heapBlock->lastFreeCount != 0);

    Assert(!heapBlock->isInAllocator);
    heapBlock->isInAllocator = true;

    this->heapBlock = heapBlock;
    RECYCLER_SLOW_CHECK(this->heapBlock->CheckDebugFreeBitVector(true));
    this->freeObjectList = this->heapBlock->freeObjectList;
}
コード例 #9
0
void
SmallFinalizableHeapBucketBaseT<TBlockType>::Sweep(RecyclerSweep& recyclerSweep)
{
    Assert(!recyclerSweep.IsBackground());

#if DBG || defined(RECYCLER_SLOW_CHECK_ENABLED)
    Assert(this->tempPendingDisposeList == nullptr);
    this->tempPendingDisposeList = pendingDisposeList;
#endif

    TBlockType * currentDisposeList = pendingDisposeList;
    this->pendingDisposeList = nullptr;

    BaseT::SweepBucket(recyclerSweep, [=](RecyclerSweep& recyclerSweep)
    {
#if DBG
        if (TBlockType::HeapBlockAttributes::IsSmallBlock)
        {
            recyclerSweep.SetupVerifyListConsistencyDataForSmallBlock(nullptr, false, true);
        }
        else if (TBlockType::HeapBlockAttributes::IsMediumBlock)
        {
            recyclerSweep.SetupVerifyListConsistencyDataForMediumBlock(nullptr, false, true);
        }
        else
        {
            Assert(false);
        }
#endif

        HeapBucketT<TBlockType>::SweepHeapBlockList(recyclerSweep, currentDisposeList, false);

#if DBG || defined(RECYCLER_SLOW_CHECK_ENABLED)
        Assert(this->tempPendingDisposeList == currentDisposeList);
        this->tempPendingDisposeList = nullptr;
#endif
        RECYCLER_SLOW_CHECK(this->VerifyHeapBlockCount(recyclerSweep.IsBackground()));
    });

}
コード例 #10
0
LargeHeapBlock*
LargeHeapBucket::AddLargeHeapBlock(size_t size, bool nothrow)
{
    Recycler* recycler = this->heapInfo->recycler;
    Segment * segment;
    size_t pageCount = LargeHeapBlock::GetPagesNeeded(size, this->supportFreeList);
    if (pageCount == 0)
    {
        if (nothrow == false)
        {
            // overflow
            // Since nothrow is false here, it's okay to throw
            recycler->OutOfMemory();
        }

        return nullptr;
    }

    char * address = nullptr;

    size_t realPageCount = pageCount;
    address = recycler->GetRecyclerLargeBlockPageAllocator()->Alloc(&realPageCount, &segment);
    pageCount = realPageCount;

    if (address == nullptr)
    {
        return nullptr;
    }
#ifdef RECYCLER_ZERO_MEM_CHECK
    recycler->VerifyZeroFill(address, pageCount * AutoSystemInfo::PageSize);
#endif
    uint objectCount = LargeHeapBlock::GetMaxLargeObjectCount(pageCount, size);
    LargeHeapBlock * heapBlock = LargeHeapBlock::New(address, pageCount, segment, objectCount, supportFreeList ? this : nullptr);
#if DBG
    LargeAllocationVerboseTrace(recycler->GetRecyclerFlagsTable(), _u("Allocated new large heap block 0x%p for sizeCat 0x%x\n"), heapBlock, sizeCat);
#endif

#ifdef ENABLE_JS_ETW
#if ENABLE_DEBUG_CONFIG_OPTIONS
    if (segment->GetPageCount() > recycler->GetRecyclerLargeBlockPageAllocator()->GetMaxAllocPageCount())
    {
        EventWriteJSCRIPT_INTERNAL_RECYCLER_EXTRALARGE_OBJECT_ALLOC(size);
    }
#endif
#endif
    if (!heapBlock)
    {
        recycler->GetRecyclerLargeBlockPageAllocator()->SuspendIdleDecommit();
        recycler->GetRecyclerLargeBlockPageAllocator()->Release(address, pageCount, segment);
        recycler->GetRecyclerLargeBlockPageAllocator()->ResumeIdleDecommit();
        return nullptr;
    }
#if ENABLE_PARTIAL_GC
    recycler->autoHeap.uncollectedNewPageCount += pageCount;
#endif

    RECYCLER_SLOW_CHECK(this->heapInfo->heapBlockCount[HeapBlock::HeapBlockType::LargeBlockType]++);

    heapBlock->heapInfo = this->heapInfo;

    heapBlock->lastCollectAllocCount = 0;

    Assert(recycler->collectionState != CollectionStateMark);

    if (!recycler->heapBlockMap.SetHeapBlock(address, pageCount, heapBlock, HeapBlock::HeapBlockType::LargeBlockType, 0))
    {
        recycler->GetRecyclerLargeBlockPageAllocator()->SuspendIdleDecommit();
        heapBlock->ReleasePages(recycler);
        recycler->GetRecyclerLargeBlockPageAllocator()->ResumeIdleDecommit();
        LargeHeapBlock::Delete(heapBlock);
        RECYCLER_SLOW_CHECK(this->heapInfo->heapBlockCount[HeapBlock::HeapBlockType::LargeBlockType]--);
        return nullptr;
    }

    heapBlock->SetNextBlock(this->largeBlockList);
    this->largeBlockList = heapBlock;

    RECYCLER_PERF_COUNTER_ADD(FreeObjectSize, heapBlock->GetPageCount() * AutoSystemInfo::PageSize);
    return heapBlock;
}
コード例 #11
0
char*
LargeHeapBucket::PageHeapAlloc(Recycler * recycler, size_t sizeCat, size_t size, ObjectInfoBits attributes, PageHeapMode mode, bool nothrow)
{
    Segment * segment;
    size_t pageCount = LargeHeapBlock::GetPagesNeeded(size, false);
    if (pageCount == 0)
    {
        if (nothrow == false)
        {
            // overflow
            // Since nothrow is false here, it's okay to throw
            recycler->OutOfMemory();
        }

        return nullptr;
    }

    if(size<sizeof(void*))
    {
        attributes = (ObjectInfoBits)(attributes | LeafBit);
    }


    size_t actualPageCount = pageCount + 1; // 1 for guard page
    auto pageAllocator = recycler->GetRecyclerLargeBlockPageAllocator();
    char * baseAddress = pageAllocator->Alloc(&actualPageCount, &segment);
    if (baseAddress == nullptr)
    {
        return nullptr;
    }

    size_t guardPageCount = actualPageCount - pageCount; // pageAllocator can return more than asked pages

    char* address = nullptr;
    char* guardPageAddress = nullptr;

    if (heapInfo->pageHeapMode == PageHeapMode::PageHeapModeBlockStart)
    {
        address = baseAddress + AutoSystemInfo::PageSize * guardPageCount;
        guardPageAddress = baseAddress;
    }
    else if (heapInfo->pageHeapMode == PageHeapMode::PageHeapModeBlockEnd)
    {
        address = baseAddress;
        guardPageAddress = baseAddress + pageCount * AutoSystemInfo::PageSize;
    }
    else
    {
        AnalysisAssert(false);
    }



    LargeHeapBlock * heapBlock = LargeHeapBlock::New(address, pageCount, segment, 1, nullptr);
    if (!heapBlock)
    {
        pageAllocator->SuspendIdleDecommit();
        pageAllocator->Release(baseAddress, actualPageCount, segment);
        pageAllocator->ResumeIdleDecommit();
        return nullptr;
    }

    heapBlock->heapInfo = this->heapInfo;
    heapBlock->actualPageCount = actualPageCount;
    heapBlock->guardPageAddress = guardPageAddress;

    // fill pattern before set pageHeapMode, so background scan stack may verify the pattern
    size_t usedSpace = sizeof(LargeObjectHeader) + size;
    memset(address + usedSpace, 0xF0, pageCount * AutoSystemInfo::PageSize - usedSpace);
    heapBlock->pageHeapMode = heapInfo->pageHeapMode;

    if (!recycler->heapBlockMap.SetHeapBlock(address, pageCount, heapBlock, HeapBlock::HeapBlockType::LargeBlockType, 0))
    {
        pageAllocator->SuspendIdleDecommit();
        heapBlock->ReleasePages(recycler);
        pageAllocator->ResumeIdleDecommit();
        LargeHeapBlock::Delete(heapBlock);
        return nullptr;
    }

    heapBlock->ResetMarks(ResetMarkFlags_None, recycler);

    char * memBlock = heapBlock->Alloc(size, attributes);
    Assert(memBlock != nullptr);


#pragma prefast(suppress:6250, "This method decommits memory")
    if (::VirtualFree(guardPageAddress, AutoSystemInfo::PageSize * guardPageCount, MEM_DECOMMIT) == FALSE)
    {
        AssertMsg(false, "Unable to decommit guard page.");
        ReportFatalException(NULL, E_FAIL, Fatal_Internal_Error, 2);
        return nullptr;
    }

    if (this->largePageHeapBlockList)
    {
        HeapBlockList::Tail(this->largePageHeapBlockList)->SetNextBlock(heapBlock);
    }
    else
    {
        this->largePageHeapBlockList = heapBlock;
    }

#if ENABLE_PARTIAL_GC
    recycler->autoHeap.uncollectedNewPageCount += pageCount;
#endif

    RECYCLER_SLOW_CHECK(this->heapInfo->heapBlockCount[HeapBlock::HeapBlockType::LargeBlockType]++);
    RECYCLER_PERF_COUNTER_ADD(FreeObjectSize, heapBlock->GetPageCount() * AutoSystemInfo::PageSize);


    if (recycler->ShouldCapturePageHeapAllocStack())
    {
        heapBlock->CapturePageHeapAllocStack();
    }

    return memBlock;
}
コード例 #12
0
char*
LargeHeapBucket::PageHeapAlloc(Recycler * recycler, size_t size, ObjectInfoBits attributes, PageHeapMode mode, bool nothrow)
{
    size_t sizeCat = HeapInfo::GetAlignedSizeNoCheck(size);

    Segment * segment;
    size_t pageCount = LargeHeapBlock::GetPagesNeeded(size, this->supportFreeList);
    if (pageCount == 0)
    {
        if (nothrow == false)
        {
            // overflow
            // Since nothrow is false here, it's okay to throw
            recycler->OutOfMemory();
        }

        return nullptr;
    }

    size_t actualPageCount = pageCount + 1; // for page heap

    char * baseAddress = recycler->GetRecyclerLargeBlockPageAllocator()->Alloc(&actualPageCount, &segment);
    if (baseAddress == nullptr)
    {
        return nullptr;
    }

    char* address = nullptr;
    char* guardPageAddress = nullptr;
    DWORD guardPageOldProtectFlags = PAGE_NOACCESS;

    if (heapInfo->pageHeapMode == PageHeapMode::PageHeapModeBlockStart)
    {
        address = baseAddress + AutoSystemInfo::PageSize;
        guardPageAddress = baseAddress;
    }
    else if (heapInfo->pageHeapMode == PageHeapMode::PageHeapModeBlockEnd)
    {
        address = baseAddress;
        guardPageAddress = baseAddress + pageCount* AutoSystemInfo::PageSize;
    }
    else
    {
        AnalysisAssert(false);
    }

    if (::VirtualProtect(static_cast<LPVOID>(guardPageAddress), AutoSystemInfo::PageSize, PAGE_NOACCESS, &guardPageOldProtectFlags) == FALSE)
    {
        AssertMsg(false, "Unable to set permission for guard page.");
        return nullptr;
    }

#ifdef RECYCLER_ZERO_MEM_CHECK
    recycler->VerifyZeroFill(address, pageCount * AutoSystemInfo::PageSize);
#endif

    LargeHeapBlock * heapBlock = LargeHeapBlock::New(address, pageCount, segment, 1, nullptr);
    if (!heapBlock)
    {
        recycler->GetRecyclerLargeBlockPageAllocator()->SuspendIdleDecommit();
        recycler->GetRecyclerLargeBlockPageAllocator()->Release(address, actualPageCount, segment);
        recycler->GetRecyclerLargeBlockPageAllocator()->ResumeIdleDecommit();
        return nullptr;
    }
    heapBlock->actualPageCount = actualPageCount;
    heapBlock->guardPageAddress = guardPageAddress;
    heapBlock->guardPageOldProtectFlags = guardPageOldProtectFlags;
    heapBlock->pageHeapMode = heapInfo->pageHeapMode;

    if (heapBlock->pageHeapMode == PageHeapMode::PageHeapModeBlockEnd)
    {
        // TODO: pad the address to close-most to the guard page to increase the chance to hit guard page when overflow
        // some Mark code need to be updated to support this
        // heapBlock->SetEndAllocAddress(address
        //    + AutoSystemInfo::PageSize - (((AllocSizeMath::Add(sizeCat, sizeof(LargeObjectHeader)) - 1) % AutoSystemInfo::PageSize) / HeapInfo::ObjectGranularity + 1) * HeapInfo::ObjectGranularity);
    }

#if DBG
    LargeAllocationVerboseTrace(recycler->GetRecyclerFlagsTable(), _u("Allocated new large heap block 0x%p for sizeCat 0x%x\n"), heapBlock, sizeCat);
#endif

#ifdef ENABLE_JS_ETW
#if ENABLE_DEBUG_CONFIG_OPTIONS
    if (segment->GetPageCount() > recycler->GetRecyclerLargeBlockPageAllocator()->GetMaxAllocPageCount())
    {
        EventWriteJSCRIPT_INTERNAL_RECYCLER_EXTRALARGE_OBJECT_ALLOC(size);
    }
#endif
#endif

#if ENABLE_PARTIAL_GC
    recycler->autoHeap.uncollectedNewPageCount += pageCount;
#endif

    RECYCLER_SLOW_CHECK(this->heapInfo->heapBlockCount[HeapBlock::HeapBlockType::LargeBlockType]++);

    heapBlock->heapInfo = this->heapInfo;

    Assert(recycler->collectionState != CollectionStateMark);

    if (!recycler->heapBlockMap.SetHeapBlock(address, pageCount, heapBlock, HeapBlock::HeapBlockType::LargeBlockType, 0))
    {
        recycler->GetRecyclerLargeBlockPageAllocator()->SuspendIdleDecommit();
        heapBlock->ReleasePages<true>(recycler);
        recycler->GetRecyclerLargeBlockPageAllocator()->ResumeIdleDecommit();
        LargeHeapBlock::Delete(heapBlock);
        RECYCLER_SLOW_CHECK(this->heapInfo->heapBlockCount[HeapBlock::HeapBlockType::LargeBlockType]--);
        return nullptr;
    }

    heapBlock->ResetMarks(ResetMarkFlags_None, recycler);

    if (this->largePageHeapBlockList)
    {
        HeapBlockList::Tail(this->largePageHeapBlockList)->SetNextBlock(heapBlock);
    }
    else
    {
        this->largePageHeapBlockList = heapBlock;
    }

    RECYCLER_PERF_COUNTER_ADD(FreeObjectSize, heapBlock->GetPageCount() * AutoSystemInfo::PageSize);

    char * memBlock = heapBlock->Alloc(sizeCat, attributes);
    Assert(memBlock != nullptr);
    if (recycler->ShouldCapturePageHeapAllocStack())
    {
        heapBlock->CapturePageHeapAllocStack();
    }

    return memBlock;
}
コード例 #13
0
void
SmallNormalHeapBucketBase<TBlockType>::FinishPartialCollect(RecyclerSweep * recyclerSweep)
{
    RECYCLER_SLOW_CHECK(this->VerifyHeapBlockCount(recyclerSweep != nullptr && recyclerSweep->IsBackground()));

    Assert(this->GetRecycler()->inPartialCollectMode);
    Assert(recyclerSweep == nullptr || this->IsAllocationStopped());

#if ENABLE_CONCURRENT_GC
    // Process the partial Swept block and move it to the partial heap block list
    TBlockType * partialSweptList = this->partialSweptHeapBlockList;
    if (partialSweptList)
    {
        this->partialSweptHeapBlockList = nullptr;
        TBlockType *  tail = nullptr;
        HeapBlockList::ForEach(partialSweptList, [this, &tail](TBlockType * heapBlock)
        {
            heapBlock->FinishPartialCollect();
            Assert(heapBlock->HasFreeObject());
            tail = heapBlock;
        });
        Assert(tail != nullptr);
        tail->SetNextBlock(this->partialHeapBlockList);
        this->partialHeapBlockList = partialSweptList;
    }
#endif

    TBlockType * currentPartialHeapBlockList = this->partialHeapBlockList;
    if (recyclerSweep == nullptr)
    {
        if (currentPartialHeapBlockList != nullptr)
        {
            this->partialHeapBlockList = nullptr;
            this->AppendAllocableHeapBlockList(currentPartialHeapBlockList);
        }
    }
    else
    {
        if (currentPartialHeapBlockList != nullptr)
        {
            this->partialHeapBlockList = nullptr;
            TBlockType * list = this->heapBlockList;
            if (list == nullptr)
            {
                this->heapBlockList = currentPartialHeapBlockList;
            }
            else
            {
                // CONCURRENT-TODO: Optimize this?
                TBlockType * tail = HeapBlockList::Tail(this->heapBlockList);
                tail->SetNextBlock(currentPartialHeapBlockList);
            }
        }
#if ENABLE_CONCURRENT_GC
        if (recyclerSweep->GetPendingSweepBlockList(this) == nullptr)
#endif
        {
            // nothing else to sweep now,  we can start allocating now.
            this->StartAllocationAfterSweep();
        }
    }

    RECYCLER_SLOW_CHECK(this->VerifyHeapBlockCount(recyclerSweep != nullptr && recyclerSweep->IsBackground()));
}
コード例 #14
0
void
SmallNormalHeapBucketBase<TBlockType>::SweepPendingObjects(RecyclerSweep& recyclerSweep)
{
    RECYCLER_SLOW_CHECK(VerifyHeapBlockCount(recyclerSweep.IsBackground()));

    CompileAssert(!BaseT::IsLeafBucket);
    TBlockType *& pendingSweepList = recyclerSweep.GetPendingSweepBlockList(this);
    TBlockType * const list = pendingSweepList;
    Recycler * const recycler = recyclerSweep.GetRecycler();
#if ENABLE_PARTIAL_GC
    bool const partialSweep = recycler->inPartialCollectMode;
#endif
    if (list)
    {
        pendingSweepList = nullptr;
#if ENABLE_PARTIAL_GC
        if (partialSweep)
        {
            // We did a partial sweep.
            // Blocks in the pendingSweepList are the ones we decided not to reuse.

            HeapBlockList::ForEachEditing(list, [this, recycler](TBlockType * heapBlock)
            {
                // We are not going to reuse this block.
                // SweepMode_ConcurrentPartial will not actually collect anything, it will just update some state.
                // The sweepable objects will be collected in a future Sweep.

                // Note, page heap blocks are never swept concurrently
                heapBlock->template SweepObjects<SweepMode_ConcurrentPartial>(recycler);

                // page heap mode should never reach here, so don't check pageheap enabled or not
                if (heapBlock->template HasFreeObject<false>())
                {
                    // We have pre-existing free objects, so put this in the partialSweptHeapBlockList
                    heapBlock->SetNextBlock(this->partialSweptHeapBlockList);
                    this->partialSweptHeapBlockList = heapBlock;
                }
                else
                {
                    // No free objects, so put in the fullBlockList
                    heapBlock->SetNextBlock(this->fullBlockList);
                    this->fullBlockList = heapBlock;
                }
            });
        }
        else
#endif
        {
            // We decided not to do a partial sweep.
            // Blocks in the pendingSweepList need to have a regular sweep.

            TBlockType * tail = SweepPendingObjects<SweepMode_Concurrent>(recycler, list);
            tail->SetNextBlock(this->heapBlockList);
            this->heapBlockList = list;

            this->StartAllocationAfterSweep();
        }

        RECYCLER_SLOW_CHECK(VerifyHeapBlockCount(recyclerSweep.IsBackground()));
    }

    Assert(!this->IsAllocationStopped());
}
コード例 #15
0
void
SmallHeapBlockAllocator<TBlockType>::Clear()
{
    TBlockType * heapBlock = this->heapBlock;
    if (heapBlock != nullptr)
    {
        Assert(heapBlock->isInAllocator);
        heapBlock->isInAllocator = false;
        FreeObject * remainingFreeObjectList = nullptr;
        if (this->endAddress != nullptr)
        {
#ifdef RECYCLER_TRACK_NATIVE_ALLOCATED_OBJECTS
            TrackNativeAllocatedObjects();
            lastNonNativeBumpAllocatedBlock = nullptr;
#endif
#ifdef PROFILE_RECYCLER_ALLOC
            // Need to tell the tracker
            this->bucket->heapInfo->recycler->TrackUnallocated((char *)this->freeObjectList, this->endAddress, this->bucket->sizeCat);
#endif
            RecyclerMemoryTracking::ReportUnallocated(this->heapBlock->heapBucket->heapInfo->recycler, (char *)this->freeObjectList, this->endAddress, heapBlock->heapBucket->sizeCat);
#ifdef RECYCLER_PERF_COUNTERS
            size_t unallocatedObjects = heapBlock->objectCount - ((char *)this->freeObjectList - heapBlock->address) / heapBlock->objectSize;
            size_t unallocatedObjectBytes = unallocatedObjects * heapBlock->GetObjectSize();
            RECYCLER_PERF_COUNTER_ADD(LiveObject, unallocatedObjects);
            RECYCLER_PERF_COUNTER_ADD(LiveObjectSize, unallocatedObjectBytes);
            RECYCLER_PERF_COUNTER_SUB(FreeObjectSize, unallocatedObjectBytes);
            RECYCLER_PERF_COUNTER_ADD(SmallHeapBlockLiveObject, unallocatedObjects);
            RECYCLER_PERF_COUNTER_ADD(SmallHeapBlockLiveObjectSize, unallocatedObjectBytes);
            RECYCLER_PERF_COUNTER_SUB(SmallHeapBlockFreeObjectSize, unallocatedObjectBytes);
#endif
            Assert(heapBlock->freeObjectList == nullptr);
            this->endAddress = nullptr;
        }
        else
        {
            remainingFreeObjectList = this->freeObjectList;
            heapBlock->freeObjectList = remainingFreeObjectList;
        }
        this->freeObjectList = nullptr;

        // this->freeObjectList and this->lastFreeCount are accessed in SmallHeapBlock::ResetMarks
        // the order of access there is first we see if lastFreeCount = 0, and if it is, we assert
        // that freeObjectList = null. Because of ARM's memory model, we need to insert barriers
        // so that the two variables can be accessed correctly across threads. Here, after we write
        // to this->freeObjectList, we insert a write barrier so that if this->lastFreeCount is 0,
        // this->freeObjectList must have been set to null. On the other end, we stick a read barrier
        // We use the MemoryBarrier macro because of ARMs lack of a separate read barrier
#if defined(_M_ARM32_OR_ARM64)
#if DBG
        MemoryBarrier();
#endif
#endif

        if (remainingFreeObjectList == nullptr)
        {
            uint lastFreeCount = heapBlock->GetAndClearLastFreeCount();
            heapBlock->heapBucket->heapInfo->uncollectedAllocBytes += lastFreeCount * heapBlock->GetObjectSize();
            Assert(heapBlock->lastUncollectedAllocBytes == 0);
            DebugOnly(heapBlock->lastUncollectedAllocBytes = lastFreeCount * heapBlock->GetObjectSize());
        }
        else
        {
            DebugOnly(heapBlock->SetIsClearedFromAllocator(true));
        }
        this->heapBlock = nullptr;

        RECYCLER_SLOW_CHECK(heapBlock->CheckDebugFreeBitVector(false));
    }
    else if (this->freeObjectList != nullptr)
    {
        // Explicit Free Object List
#ifdef RECYCLER_MEMORY_VERIFY
        FreeObject* freeObject = this->freeObjectList;

        while (freeObject)
        {
            HeapBlock* heapBlock = this->bucket->GetRecycler()->FindHeapBlock((void*) freeObject);
            Assert(heapBlock != nullptr);
            Assert(!heapBlock->IsLargeHeapBlock());
            TBlockType* smallBlock = (TBlockType*)heapBlock;

            smallBlock->ClearExplicitFreeBitForObject((void*) freeObject);
            freeObject = freeObject->GetNext();
        }
#endif
        this->freeObjectList = nullptr;
    }

}