Ejemplo n.º 1
0
void* FMallocTBB::Malloc( SIZE_T Size, uint32 Alignment )
{
	IncrementTotalMallocCalls();

	MEM_TIME(MemTime -= FPlatformTime::Seconds());

	void* NewPtr = NULL;
	if( Alignment != DEFAULT_ALIGNMENT )
	{
		Alignment = FMath::Max(Size >= 16 ? (uint32)16 : (uint32)8, Alignment);
		NewPtr = scalable_aligned_malloc( Size, Alignment );
	}
	else
	{
		NewPtr = scalable_malloc( Size );
	}

	if( !NewPtr && Size )
	{
		OutOfMemory(Size, Alignment);
	}
#if UE_BUILD_DEBUG || UE_BUILD_DEVELOPMENT
	else if (Size)
	{
		FMemory::Memset(NewPtr, DEBUG_FILL_NEW, Size); 
	}
#endif
	MEM_TIME(MemTime += FPlatformTime::Seconds());
	return NewPtr;
}
Ejemplo n.º 2
0
void FMallocTBB::Free( void* Ptr )
{
	if( !Ptr )
	{
		return;
	}
	MEM_TIME(MemTime -= FPlatformTime::Seconds())
#if UE_BUILD_DEBUG || UE_BUILD_DEVELOPMENT
	FMemory::Memset(Ptr, DEBUG_FILL_FREED, scalable_msize(Ptr)); 
#endif
	IncrementTotalFreeCalls();
	scalable_free(Ptr);

	MEM_TIME(MemTime += FPlatformTime::Seconds())
}
Ejemplo n.º 3
0
void* FMallocTBB::Realloc( void* Ptr, SIZE_T NewSize, uint32 Alignment )
{
	IncrementTotalReallocCalls();

	MEM_TIME(MemTime -= FPlatformTime::Seconds())
#if UE_BUILD_DEBUG || UE_BUILD_DEVELOPMENT
	SIZE_T OldSize = 0;
	if (Ptr)
	{
		OldSize = scalable_msize(Ptr);
		if (NewSize < OldSize)
		{
			FMemory::Memset((uint8*)Ptr + NewSize, DEBUG_FILL_FREED, OldSize - NewSize); 
		}
	}
#endif
	void* NewPtr = NULL;
	if (Alignment != DEFAULT_ALIGNMENT)
	{
		Alignment = FMath::Max(NewSize >= 16 ? (uint32)16 : (uint32)8, Alignment);
		NewPtr = scalable_aligned_realloc(Ptr, NewSize, Alignment);
	}
	else
	{
		NewPtr = scalable_realloc(Ptr, NewSize);
	}
#if UE_BUILD_DEBUG || UE_BUILD_DEVELOPMENT
	if (NewPtr && NewSize > OldSize )
	{
		FMemory::Memset((uint8*)NewPtr + OldSize, DEBUG_FILL_NEW, NewSize - OldSize); 
	}
#endif
	if( !NewPtr && NewSize )
	{
		OutOfMemory(NewSize, Alignment);
	}
	MEM_TIME(MemTime += FPlatformTime::Seconds())
	return NewPtr;
}
Ejemplo n.º 4
0
void* FMallocBinned::realloc(void* origin, size_t newSize, uint32_t alignment)
{
    // Handle DefaultAlignment for binned allocator.
    if (alignment == DefaultAlignment)
    {
        alignment = default_binned_allocator_alignment;
    }

    FAssert(alignment <= pageSize);

    alignment = std::max<uint32_t>(alignment, default_binned_allocator_alignment);

    if (newSize)
    {
        newSize = std::max<size_t>(alignment, FAlign(newSize, alignment));
    }

    MEM_TIME(MemTime -= FPlatformTime::Seconds());

    UIntPtr_t basePtr;

    void* newPtr = origin;

    if( origin && newSize )
    {
        FPoolInfo* pool = findPoolInfo((UIntPtr_t)origin, basePtr);

        if( pool->tableIndex < binnedOSTableIndex )
        {
            // Allocated from pool, so grow or shrink if necessary.
            FAssert(pool->tableIndex > 0); // it isn't possible to allocate a size of 0, Malloc will increase the size to default_binned_allocator_alignment

            const uint32_t thisTableBlockSize = memSizeToPoolTable[pool->tableIndex]->blockSize;

            if( newSize > thisTableBlockSize || newSize <= memSizeToPoolTable[pool->tableIndex - 1]->blockSize )
            {
                newPtr = this->malloc( newSize, alignment );
                FMemory::memcpy( newPtr, origin, std::min<size_t>( newSize, thisTableBlockSize ) );
                this->free( origin );
            }
        }
        else
        {
            // Allocated from OS.
            FAssert(!((UIntPtr_t)origin & (pageSize-1)));

            if( newSize > pool->getOsBytes(pageSize, (int32_t)binnedOSTableIndex)
               || newSize * 3 < pool->getOsBytes(pageSize, (uint32_t)binnedOSTableIndex) * 2 )
            {
                // Grow or shrink.
                newPtr = this->malloc( newSize, alignment );
                FMemory::memcpy( newPtr, origin, std::min<size_t>(newSize, pool->getBytes()) );
                this->free( origin );
            }
            else
            {
                // Keep as-is, reallocation isn't worth the overhead.
                STAT(usedCurrent += newSize - pool->getBytes());
                STAT(usedPeak = std::max(usedPeak, usedCurrent));
                STAT(wasteCurrent += pool->getBytes() - newSize);
                pool->setAllocationSizes((uint32_t)newSize, pool->getOsBytes(pageSize, (uint32_t)binnedOSTableIndex), (uint32_t)binnedOSTableIndex, (uint32_t)binnedOSTableIndex);
            }
        }
    }
    else if( origin == nullptr )
    {
        newPtr = this->malloc( newSize, alignment );
    }
    else
    {
        this->free( origin );
        newPtr = nullptr;
    }
    
    MEM_TIME(MemTime += FPlatformTime::Seconds());
    return newPtr;
}
Ejemplo n.º 5
0
void* FMallocBinned::malloc(size_t size, uint32_t alignment)
{
#ifdef USE_COARSE_GRAIN_LOCKS
    FScopeLock ScopedLock(&AccessGuard);
#endif

    flushPendingFrees();

    // Handle DEFAULT_ALIGNMENT for binned allocator.
    if (alignment == DefaultAlignment)
    {
        alignment = default_binned_allocator_alignment;
    }

    FAssert(alignment <= pageSize);

    alignment = std::max<uint32_t>(alignment, default_binned_allocator_alignment);

    size = std::max<size_t>(alignment, FAlign(size, alignment));

    MEM_TIME(MemTime -= FPlatformTime::Seconds());

    STAT(currentAllocs++);
    STAT(totalAllocs++);

    FFreeMem* free = nullptr;

    if( size < binnedSizeLimit )
    {
        // Allocate from pool.
        FPoolTable* table = memSizeToPoolTable[size];
#ifdef USE_FINE_GRAIN_LOCKS
        std::lock_guard<std::mutex> tableLock(table->mutex);
#endif
        FAssert(size <= table->blockSize);

        trackStats(table, (uint32_t)size);

        FPoolInfo* pool = table->firstPool;
        if( !pool )
        {
            pool = allocatePoolMemory(table, binned_alloc_pool_size/*PageSize*/, size);
        }

        free = allocateBlockFromPool(table, pool);
    }
    else if ( ((size >= binnedSizeLimit && size <= pagePoolTable[0].blockSize) ||
               (size > pageSize && size <= pagePoolTable[1].blockSize))
             && alignment == default_binned_allocator_alignment )
    {
        // Bucket in a pool of 3*PageSize or 6*PageSize
        uint32_t binType = size < pageSize ? 0 : 1;
        uint32_t pageCount = 3 * binType + 3;

        FPoolTable* table = &pagePoolTable[binType];
#ifdef USE_FINE_GRAIN_LOCKS
        std::lock_guard<std::mutex> tableLock(table->mutex);
#endif
        FAssert(size <= table->blockSize);

        trackStats(table, (uint32_t)size);

        FPoolInfo* pool = table->firstPool;
        if( !pool )
        {
            pool = allocatePoolMemory(table, pageCount * pageSize, binnedSizeLimit + binType);
        }

        free = allocateBlockFromPool(table, pool);
    }
    else
    {
        // Use OS for large allocations.
        UIntPtr_t alignedSize = FAlign(size, pageSize);

        size_t actualPoolSize; //TODO: use this to reduce waste?
        free = (FFreeMem*)osAlloc(alignedSize, actualPoolSize);
        if( !free )
        {
            outOfMemory(alignedSize);
        }

        FAssert(!((size_t)free & (pageSize - 1)));

        // Create indirect.
        FPoolInfo* pool;
        {
#ifdef USE_FINE_GRAIN_LOCKS
            std::lock_guard<std::mutex> poolInfoLock(accessGuard);
#endif
            pool = getPoolInfo((UIntPtr_t)free);
        }

        pool->setAllocationSizes((uint32_t)size, alignedSize, (uint32_t)binnedOSTableIndex, (uint32_t)binnedOSTableIndex);

        STAT(osPeak = std::max(osPeak, osCurrent += alignedSize));
        STAT(usedPeak = std::max(usedPeak, usedCurrent += size));
        STAT(wastePeak = std::max(wastePeak, wasteCurrent += alignedSize - size));
    }
    
    MEM_TIME(MemTime += FPlatformTime::Seconds());
    return free;

}