void* MarkedAllocator::allocateSlowCase(size_t bytes) { ASSERT(m_heap->vm()->currentThreadIsHoldingAPILock()); #if COLLECT_ON_EVERY_ALLOCATION if (!m_heap->isDeferred()) m_heap->collectAllGarbage(); ASSERT(m_heap->m_operationInProgress == NoOperation); #endif ASSERT(!m_markedSpace->isIterating()); ASSERT(!m_freeList.head); m_heap->didAllocate(m_freeList.bytes); void* result = tryAllocate(bytes); if (LIKELY(result != 0)) return result; if (m_heap->collectIfNecessaryOrDefer()) { result = tryAllocate(bytes); if (result) return result; } ASSERT(!m_heap->shouldCollect()); MarkedBlock* block = allocateBlock(bytes); ASSERT(block); addBlock(block); result = tryAllocate(bytes); ASSERT(result); return result; }
void* MarkedAllocator::allocateSlowCase(size_t bytes) { ASSERT(m_heap->vm()->apiLock().currentThreadIsHoldingLock()); #if COLLECT_ON_EVERY_ALLOCATION m_heap->collectAllGarbage(); ASSERT(m_heap->m_operationInProgress == NoOperation); #endif ASSERT(!m_freeList.head); m_heap->didAllocate(m_freeList.bytes); void* result = tryAllocate(bytes); if (LIKELY(result != 0)) return result; if (m_heap->shouldCollect()) { m_heap->collect(Heap::DoNotSweep); result = tryAllocate(bytes); if (result) return result; } ASSERT(!m_heap->shouldCollect()); MarkedBlock* block = allocateBlock(bytes); ASSERT(block); addBlock(block); result = tryAllocate(bytes); ASSERT(result); return result; }
void* MarkedAllocator::allocateSlowCase(size_t bytes) { ASSERT(m_heap->vm()->currentThreadIsHoldingAPILock()); doTestCollectionsIfNeeded(); ASSERT(!m_markedSpace->isIterating()); ASSERT(!m_freeList.head); m_heap->didAllocate(m_freeList.bytes); void* result = tryAllocate(bytes); if (LIKELY(result != 0)) return result; if (m_heap->collectIfNecessaryOrDefer()) { result = tryAllocate(bytes); if (result) return result; } ASSERT(!m_heap->shouldCollect()); MarkedBlock* block = allocateBlock(bytes); ASSERT(block); addBlock(block); result = tryAllocate(bytes); ASSERT(result); return result; }
int main(int argc, char *argv[]) { checkpointNext("Establishing base values:"); SceSize baseMaxFree = sceKernelMaxFreeMemSize(); if (baseMaxFree < 0) { checkpoint("sceKernelMaxFreeMemSize: %08x", baseMaxFree); } else { checkpoint("sceKernelMaxFreeMemSize: OK"); } SceSize baseTotal = sceKernelTotalFreeMemSize(); if (baseTotal < 0) { checkpoint("sceKernelTotalFreeMemSize: %08x", baseTotal); } else { checkpoint("sceKernelTotalFreeMemSize: OK"); } checkpointNext("After allocating:"); SceUID blocks[8]; int i; for (i = 0; i < 8; ++i) { blocks[i] = sceKernelAllocPartitionMemory(PSP_MEMORY_PARTITION_USER, "test", PSP_SMEM_Low, 0x8000, NULL); } checkpoint("sceKernelMaxFreeMemSize: at base - %d", baseMaxFree - sceKernelMaxFreeMemSize()); checkpoint("sceKernelTotalFreeMemSize: at base - %d", baseTotal - sceKernelTotalFreeMemSize()); checkpointNext("After fragmenting:"); sceKernelFreePartitionMemory(blocks[5]); blocks[5] = -1; checkpoint("sceKernelMaxFreeMemSize: at base - %d", baseMaxFree - sceKernelMaxFreeMemSize()); checkpoint("sceKernelTotalFreeMemSize: at base - %d", baseTotal - sceKernelTotalFreeMemSize()); checkpointNext("After free again:"); for (i = 0; i < 8; ++i) { if (blocks[i] >= 0) { sceKernelFreePartitionMemory(blocks[i]); } } checkpoint("sceKernelMaxFreeMemSize: at base - %d", baseMaxFree - sceKernelMaxFreeMemSize()); checkpoint("sceKernelTotalFreeMemSize: at base - %d", baseTotal - sceKernelTotalFreeMemSize()); checkpointNext("Allocate near limits:"); tryAllocate("Allocate sceKernelMaxFreeMemSize", sceKernelMaxFreeMemSize()); tryAllocate("Allocate sceKernelMaxFreeMemSize + 0x100", sceKernelMaxFreeMemSize() + 0x100); tryAllocate("Allocate sceKernelTotalFreeMemSize", sceKernelTotalFreeMemSize()); return 0; }
void* AllocationSpace::allocateSlowCase(MarkedSpace::SizeClass& sizeClass) { #if COLLECT_ON_EVERY_ALLOCATION m_heap->collectAllGarbage(); ASSERT(m_heap->m_operationInProgress == NoOperation); #endif void* result = tryAllocate(sizeClass); if (LIKELY(result != 0)) return result; AllocationEffort allocationEffort; if (( #if ENABLE(GGC) m_markedSpace.nurseryWaterMark() < m_heap->m_minBytesPerCycle #else m_heap->waterMark() < m_heap->highWaterMark() #endif ) || !m_heap->m_isSafeToCollect) allocationEffort = AllocationMustSucceed; else allocationEffort = AllocationCanFail; MarkedBlock* block = allocateBlock(sizeClass.cellSize, allocationEffort); if (block) { m_markedSpace.addBlock(sizeClass, block); void* result = tryAllocate(sizeClass); ASSERT(result); return result; } m_heap->collect(Heap::DoNotSweep); result = tryAllocate(sizeClass); if (result) return result; ASSERT(m_heap->waterMark() < m_heap->highWaterMark()); m_markedSpace.addBlock(sizeClass, allocateBlock(sizeClass.cellSize, AllocationMustSucceed)); result = tryAllocate(sizeClass); ASSERT(result); return result; }
PassRefPtr<ArrayBuffer> ArrayBuffer::create(unsigned numElements, unsigned elementByteSize) { void* data = tryAllocate(numElements, elementByteSize); if (!data) return 0; return adoptRef(new ArrayBuffer(data, numElements * elementByteSize)); }
CheckedBoolean CopiedSpace::tryReallocate(void** ptr, size_t oldSize, size_t newSize) { if (oldSize >= newSize) return true; void* oldPtr = *ptr; ASSERT(!m_heap->globalData()->isInitializingObject()); if (isOversize(oldSize) || isOversize(newSize)) return tryReallocateOversize(ptr, oldSize, newSize); if (m_allocator.wasLastAllocation(oldPtr, oldSize)) { size_t delta = newSize - oldSize; if (m_allocator.fitsInCurrentBlock(delta)) { (void)m_allocator.allocate(delta); return true; } } void* result = 0; if (!tryAllocate(newSize, &result)) { *ptr = 0; return false; } memcpy(result, oldPtr, oldSize); *ptr = result; return true; }
void HippoCanvas::onSizeAllocated() { // Go through all possibilities for scrollbars allowed by the current scrollbar // policy and use the first one that is possible. (Note that this is different // from getHeightRequestImpl(int forWidth) where we need to find the possible // variant with the minimum required height, so we examine all possibilities) for (int hscrollbar = 0; hscrollbar <= 1; hscrollbar++) { if (!hscrollbar && hscrollbarPolicy_ == HIPPO_SCROLLBAR_ALWAYS) continue; if (hscrollbar && hscrollbarPolicy_ == HIPPO_SCROLLBAR_NEVER) continue; for (int vscrollbar = 0; vscrollbar <= 1; vscrollbar++) { if (!vscrollbar && vscrollbarPolicy_ == HIPPO_SCROLLBAR_ALWAYS) continue; if (vscrollbar && vscrollbarPolicy_ == HIPPO_SCROLLBAR_NEVER) continue; if (tryAllocate(hscrollbar != 0, vscrollbar != 0)) return; } } // This should not happen if our logic is correct g_warning("HippoCanvas::onSizeAllocated didn't find a possible scrollbar combination!"); }
PassRefPtr<ArrayBuffer> ArrayBuffer::create(void* source, unsigned byteLength) { void* data = tryAllocate(byteLength, 1); if (!data) return 0; RefPtr<ArrayBuffer> buffer = adoptRef(new ArrayBuffer(data, byteLength)); memcpy(buffer->data(), source, byteLength); return buffer.release(); }
PassRefPtr<ArrayBuffer> ArrayBuffer::create(ArrayBuffer* other) { void* data = tryAllocate(other->byteLength(), 1); if (!data) return 0; RefPtr<ArrayBuffer> buffer = adoptRef(new ArrayBuffer(data, other->byteLength())); memcpy(buffer->data(), other->data(), other->byteLength()); return buffer.release(); }
void* MarkedAllocator::allocateSlowCase() { #if COLLECT_ON_EVERY_ALLOCATION m_heap->collectAllGarbage(); ASSERT(m_heap->m_operationInProgress == NoOperation); #endif void* result = tryAllocate(); if (LIKELY(result != 0)) return result; AllocationEffort allocationEffort; if (m_heap->shouldCollect()) allocationEffort = AllocationCanFail; else allocationEffort = AllocationMustSucceed; MarkedBlock* block = allocateBlock(allocationEffort); if (block) { addBlock(block); void* result = tryAllocate(); ASSERT(result); return result; } m_heap->collect(Heap::DoNotSweep); result = tryAllocate(); if (result) return result; ASSERT(m_heap->waterMark() < m_heap->highWaterMark()); addBlock(allocateBlock(AllocationMustSucceed)); result = tryAllocate(); ASSERT(result); return result; }
CheckedBoolean CopiedSpace::tryReallocate(void** ptr, size_t oldSize, size_t newSize) { if (oldSize >= newSize) return true; void* oldPtr = *ptr; ASSERT(!m_heap->vm()->isInitializingObject()); if (CopiedSpace::blockFor(oldPtr)->isOversize() || isOversize(newSize)) return tryReallocateOversize(ptr, oldSize, newSize); if (m_allocator.tryReallocate(oldPtr, oldSize, newSize)) return true; void* result = 0; if (!tryAllocate(newSize, &result)) { *ptr = 0; return false; } memcpy(result, oldPtr, oldSize); *ptr = result; return true; }