CheckedBoolean CopiedSpace::tryReallocateOversize(void** ptr, size_t oldSize, size_t newSize) { ASSERT(isOversize(oldSize) || isOversize(newSize)); ASSERT(newSize > oldSize); void* oldPtr = *ptr; void* newPtr = 0; if (!tryAllocateOversize(newSize, &newPtr)) { *ptr = 0; return false; } memcpy(newPtr, oldPtr, oldSize); CopiedBlock* oldBlock = CopiedSpace::blockFor(oldPtr); if (oldBlock->isOversize()) { if (oldBlock->isOld()) m_oldGen.oversizeBlocks.remove(oldBlock); else m_newGen.oversizeBlocks.remove(oldBlock); m_blockSet.remove(oldBlock); m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(oldBlock)); } *ptr = newPtr; return true; }
CheckedBoolean CopiedSpace::tryReallocateOversize(void** ptr, size_t oldSize, size_t newSize) { ASSERT(isOversize(oldSize) || isOversize(newSize)); ASSERT(newSize > oldSize); void* oldPtr = *ptr; void* newPtr = 0; if (!tryAllocateOversize(newSize, &newPtr)) { *ptr = 0; return false; } memcpy(newPtr, oldPtr, oldSize); CopiedBlock* oldBlock = CopiedSpace::blockFor(oldPtr); if (oldBlock->isOversize()) { // FIXME: Eagerly deallocating the old space block probably buys more confusion than // value. // https://bugs.webkit.org/show_bug.cgi?id=144750 if (oldBlock->isOld()) { m_bytesRemovedFromOldSpaceDueToReallocation += oldBlock->size(); m_oldGen.oversizeBlocks.remove(oldBlock); } else m_newGen.oversizeBlocks.remove(oldBlock); m_blockSet.remove(oldBlock); CopiedBlock::destroy(*heap(), oldBlock); } *ptr = newPtr; return true; }
void SlotVisitor::copyLater(JSCell* owner, CopyToken token, void* ptr, size_t bytes) { ASSERT(bytes); CopiedBlock* block = CopiedSpace::blockFor(ptr); if (block->isOversize()) { ASSERT(bytes <= block->size()); // FIXME: We should be able to shrink the allocation if bytes went below the block size. // For now, we just make sure that our accounting of how much memory we are actually using // is correct. // https://bugs.webkit.org/show_bug.cgi?id=144749 bytes = block->size(); m_heap.m_storageSpace.pin(block); } ASSERT(heap()->m_storageSpace.contains(block)); LockHolder locker(&block->workListLock()); // We always report live bytes, except if during an eden collection we see an old object pointing to an // old backing store and the old object is being marked because of the remembered set. Note that if we // ask the object itself, it will always tell us that it's an old black object - because even during an // eden collection we have already indicated that the object is old. That's why we use the // SlotVisitor's cache of the object's old state. if (heap()->operationInProgress() == FullCollection || !block->isOld() || m_currentObjectCellStateBeforeVisiting != CellState::OldGrey) { m_bytesCopied += bytes; block->reportLiveBytes(locker, owner, token, bytes); } }
CheckedBoolean CopiedSpace::tryAllocateOversize(size_t bytes, void** outPtr) { ASSERT(isOversize(bytes)); CopiedBlock* block = CopiedBlock::create(m_heap->blockAllocator().allocateCustomSize(sizeof(CopiedBlock) + bytes, CopiedBlock::blockSize)); m_newGen.oversizeBlocks.push(block); m_newGen.blockFilter.add(reinterpret_cast<Bits>(block)); m_blockSet.add(block); ASSERT(!block->isOld()); CopiedAllocator allocator; allocator.setCurrentBlock(block); *outPtr = allocator.forceAllocate(bytes); allocator.resetCurrentBlock(); m_heap->didAllocate(block->region()->blockSize()); return true; }
CheckedBoolean CopiedSpace::tryAllocateOversize(size_t bytes, void** outPtr) { ASSERT(isOversize(bytes)); CopiedBlock* block = CopiedBlock::create(*m_heap, WTF::roundUpToMultipleOf<sizeof(double)>(sizeof(CopiedBlock) + bytes)); m_newGen.oversizeBlocks.push(block); m_newGen.blockFilter.add(reinterpret_cast<Bits>(block)); m_blockSet.add(block); ASSERT(!block->isOld()); CopiedAllocator allocator; allocator.setCurrentBlock(block); *outPtr = allocator.forceAllocate(bytes); allocator.resetCurrentBlock(); m_heap->didAllocate(block->capacity()); return true; }