Пример #1
0
static bool isBlockListPagedOut(double deadline, DoublyLinkedList<CopiedBlock>* list)
{
    unsigned itersSinceLastTimeCheck = 0;
    CopiedBlock* current = list->head();
    while (current) {
        current = current->next();
        ++itersSinceLastTimeCheck;
        if (itersSinceLastTimeCheck >= Heap::s_timeCheckResolution) {
            double currentTime = WTF::monotonicallyIncreasingTime();
            if (currentTime > deadline)
                return true;
            itersSinceLastTimeCheck = 0;
        }
    }

    return false;
}
Пример #2
0
void SlotVisitor::copyLater(JSCell* owner, CopyToken token, void* ptr, size_t bytes)
{
    ASSERT(bytes);
    CopiedBlock* block = CopiedSpace::blockFor(ptr);
    if (block->isOversize()) {
        ASSERT(bytes <= block->size());
        // FIXME: We should be able to shrink the allocation if bytes went below the block size.
        // For now, we just make sure that our accounting of how much memory we are actually using
        // is correct.
        // https://bugs.webkit.org/show_bug.cgi?id=144749
        bytes = block->size();
        m_heap.m_storageSpace.pin(block);
    }

    ASSERT(heap()->m_storageSpace.contains(block));

    LockHolder locker(&block->workListLock());
    // We always report live bytes, except if during an eden collection we see an old object pointing to an
    // old backing store and the old object is being marked because of the remembered set. Note that if we
    // ask the object itself, it will always tell us that it's an old black object - because even during an
    // eden collection we have already indicated that the object is old. That's why we use the
    // SlotVisitor's cache of the object's old state.
    if (heap()->operationInProgress() == FullCollection
        || !block->isOld()
        || m_currentObjectCellStateBeforeVisiting != CellState::OldGrey) {
        m_bytesCopied += bytes;
        block->reportLiveBytes(locker, owner, token, bytes);
    }
}
Пример #3
0
CheckedBoolean CopiedSpace::tryAllocateOversize(size_t bytes, void** outPtr)
{
    ASSERT(isOversize(bytes));
    
    CopiedBlock* block = CopiedBlock::create(m_heap->blockAllocator().allocateCustomSize(sizeof(CopiedBlock) + bytes, CopiedBlock::blockSize));
    m_oversizeBlocks.push(block);
    m_blockFilter.add(reinterpret_cast<Bits>(block));
    m_blockSet.add(block);
    
    CopiedAllocator allocator;
    allocator.setCurrentBlock(block);
    *outPtr = allocator.forceAllocate(bytes);
    allocator.resetCurrentBlock();

    m_heap->didAllocate(block->region()->blockSize());

    return true;
}
Пример #4
0
size_t CopiedSpace::capacity()
{
    size_t calculatedCapacity = 0;

    for (CopiedBlock* block = m_toSpace->head(); block; block = block->next())
        calculatedCapacity += block->capacity();

    for (CopiedBlock* block = m_fromSpace->head(); block; block = block->next())
        calculatedCapacity += block->capacity();

    for (CopiedBlock* block = m_oversizeBlocks.head(); block; block = block->next())
        calculatedCapacity += block->capacity();

    return calculatedCapacity;
}
Пример #5
0
size_t CopiedSpace::size()
{
    size_t calculatedSize = 0;

    for (CopiedBlock* block = m_toSpace->head(); block; block = block->next())
        calculatedSize += block->size();

    for (CopiedBlock* block = m_fromSpace->head(); block; block = block->next())
        calculatedSize += block->size();

    for (CopiedBlock* block = m_oversizeBlocks.head(); block; block = block->next())
        calculatedSize += block->size();

    return calculatedSize;
}
Пример #6
0
CheckedBoolean CopiedSpace::tryAllocateOversize(size_t bytes, void** outPtr)
{
    ASSERT(isOversize(bytes));
    
    CopiedBlock* block = CopiedBlock::create(*m_heap, WTF::roundUpToMultipleOf<sizeof(double)>(sizeof(CopiedBlock) + bytes));
    m_newGen.oversizeBlocks.push(block);
    m_newGen.blockFilter.add(reinterpret_cast<Bits>(block));
    m_blockSet.add(block);
    ASSERT(!block->isOld());
    
    CopiedAllocator allocator;
    allocator.setCurrentBlock(block);
    *outPtr = allocator.forceAllocate(bytes);
    allocator.resetCurrentBlock();

    m_heap->didAllocate(block->capacity());

    return true;
}
Пример #7
0
void CopiedSpace::doneCopying()
{
    RELEASE_ASSERT(!m_numberOfLoanedBlocks);
    RELEASE_ASSERT(m_inCopyingPhase == m_shouldDoCopyPhase);
    m_inCopyingPhase = false;

    DoublyLinkedList<CopiedBlock>* toSpace;
    DoublyLinkedList<CopiedBlock>* fromSpace;
    TinyBloomFilter* blockFilter;
    if (heap()->operationInProgress() == FullCollection) {
        toSpace = m_oldGen.toSpace;
        fromSpace = m_oldGen.fromSpace;
        blockFilter = &m_oldGen.blockFilter;
    } else {
        toSpace = m_newGen.toSpace;
        fromSpace = m_newGen.fromSpace;
        blockFilter = &m_newGen.blockFilter;
    }

    while (!fromSpace->isEmpty()) {
        CopiedBlock* block = fromSpace->removeHead();
        // We don't add the block to the blockSet because it was never removed.
        ASSERT(m_blockSet.contains(block));
        blockFilter->add(reinterpret_cast<Bits>(block));
        block->didSurviveGC();
        toSpace->push(block);
    }

    if (heap()->operationInProgress() == EdenCollection) {
        m_oldGen.toSpace->append(*m_newGen.toSpace);
        m_oldGen.oversizeBlocks.append(m_newGen.oversizeBlocks);
        m_oldGen.blockFilter.add(m_newGen.blockFilter);
        m_newGen.blockFilter.reset();
    }

    ASSERT(m_newGen.toSpace->isEmpty());
    ASSERT(m_newGen.fromSpace->isEmpty());
    ASSERT(m_newGen.oversizeBlocks.isEmpty());

    allocateBlock();

    m_shouldDoCopyPhase = false;
}
Пример #8
0
void CopiedSpace::doneCopying()
{
    {
        MutexLocker locker(m_loanedBlocksLock);
        while (m_numberOfLoanedBlocks > 0)
            m_loanedBlocksCondition.wait(m_loanedBlocksLock);
    }

    ASSERT(m_inCopyingPhase);
    m_inCopyingPhase = false;
    while (!m_fromSpace->isEmpty()) {
        CopiedBlock* block = static_cast<CopiedBlock*>(m_fromSpace->removeHead());
        if (block->m_isPinned) {
            block->m_isPinned = false;
            // We don't add the block to the toSpaceSet because it was never removed.
            ASSERT(m_toSpaceSet.contains(block));
            m_toSpaceFilter.add(reinterpret_cast<Bits>(block));
            m_toSpace->push(block);
            continue;
        }

        m_toSpaceSet.remove(block);
        m_heap->blockAllocator().deallocate(block);
    }

    CopiedBlock* curr = static_cast<CopiedBlock*>(m_oversizeBlocks.head());
    while (curr) {
        CopiedBlock* next = static_cast<CopiedBlock*>(curr->next());
        if (!curr->m_isPinned) {
            m_oversizeBlocks.remove(curr);
            curr->m_allocation.deallocate();
        } else
            curr->m_isPinned = false;
        curr = next;
    }

    if (!m_toSpace->head()) {
        if (!addNewBlock())
            CRASH();
    } else
        m_allocator.resetCurrentBlock(static_cast<CopiedBlock*>(m_toSpace->head()));
}
Пример #9
0
void CopiedSpace::didStartFullCollection()
{
    ASSERT(heap()->operationInProgress() == FullCollection);

    ASSERT(m_fromSpace->isEmpty());

    for (CopiedBlock* block = m_toSpace->head(); block; block = block->next())
        block->didSurviveGC();

    for (CopiedBlock* block = m_oversizeBlocks.head(); block; block = block->next())
        block->didSurviveGC();
}
Пример #10
0
void CopiedSpace::didStartFullCollection()
{
    ASSERT(heap()->operationInProgress() == FullCollection);
    ASSERT(m_oldGen.fromSpace->isEmpty());
    ASSERT(m_newGen.fromSpace->isEmpty());

#ifndef NDEBUG
    for (CopiedBlock* block = m_newGen.toSpace->head(); block; block = block->next())
        ASSERT(!block->liveBytes());

    for (CopiedBlock* block = m_newGen.oversizeBlocks.head(); block; block = block->next())
        ASSERT(!block->liveBytes());
#endif

    for (CopiedBlock* block = m_oldGen.toSpace->head(); block; block = block->next())
        block->didSurviveGC();

    for (CopiedBlock* block = m_oldGen.oversizeBlocks.head(); block; block = block->next())
        block->didSurviveGC();
}
Пример #11
0
void CopiedSpace::startedCopying()
{
    std::swap(m_fromSpace, m_toSpace);

    m_blockFilter.reset();
    m_allocator.resetCurrentBlock();

    CopiedBlock* next = 0;
    size_t totalLiveBytes = 0;
    size_t totalUsableBytes = 0;
    for (CopiedBlock* block = m_fromSpace->head(); block; block = next) {
        next = block->next();
        if (!block->isPinned() && block->canBeRecycled()) {
            recycleEvacuatedBlock(block);
            continue;
        }
        totalLiveBytes += block->liveBytes();
        totalUsableBytes += block->payloadCapacity();
    }

    CopiedBlock* block = m_oversizeBlocks.head();
    while (block) {
        CopiedBlock* next = block->next();
        if (block->isPinned()) {
            m_blockFilter.add(reinterpret_cast<Bits>(block));
            totalLiveBytes += block->payloadCapacity();
            totalUsableBytes += block->payloadCapacity();
            block->didSurviveGC();
        } else {
            m_oversizeBlocks.remove(block);
            m_blockSet.remove(block);
            m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(block));
        } 
        block = next;
    }

    double markedSpaceBytes = m_heap->objectSpace().capacity();
    double totalFragmentation = ((double)totalLiveBytes + markedSpaceBytes) / ((double)totalUsableBytes + markedSpaceBytes);
    m_shouldDoCopyPhase = totalFragmentation <= Options::minHeapUtilization();
    if (!m_shouldDoCopyPhase)
        return;

    ASSERT(m_shouldDoCopyPhase);
    ASSERT(!m_inCopyingPhase);
    ASSERT(!m_numberOfLoanedBlocks);
    m_inCopyingPhase = true;
}
Пример #12
0
CopiedBlock* CopiedBlock::create(size_t capacity)
{
    CopiedBlock* newBlock = createNoZeroFill(capacity);
    newBlock->zeroFillWilderness();
    return newBlock;
}