globle void *RequestChunk( void *theEnv, size_t requestSize) { struct chunkInfo *chunkPtr; struct blockInfo *blockPtr; /*==================================================*/ /* Allocate initial memory pool block if it has not */ /* already been allocated. */ /*==================================================*/ if (MemoryData(theEnv)->BlockMemoryInitialized == FALSE) { if (InitializeBlockMemory(theEnv,requestSize) == 0) return(NULL); } /*====================================================*/ /* Make sure that the amount of memory requested will */ /* fall on a boundary of strictest alignment */ /*====================================================*/ requestSize = (((requestSize - 1) / STRICT_ALIGN_SIZE) + 1) * STRICT_ALIGN_SIZE; /*=====================================================*/ /* Search through the list of free memory for a block */ /* of the appropriate size. If a block is found, then */ /* allocate and return a pointer to it. */ /*=====================================================*/ blockPtr = MemoryData(theEnv)->TopMemoryBlock; while (blockPtr != NULL) { chunkPtr = blockPtr->nextFree; while (chunkPtr != NULL) { if ((chunkPtr->size == requestSize) || (chunkPtr->size > (requestSize + MemoryData(theEnv)->ChunkInfoSize))) { AllocateChunk(theEnv,blockPtr,chunkPtr,requestSize); return((void *) (((char *) chunkPtr) + MemoryData(theEnv)->ChunkInfoSize)); } chunkPtr = chunkPtr->nextFree; } if (blockPtr->nextBlock == NULL) { if (AllocateBlock(theEnv,blockPtr,requestSize) == 0) /* get another block */ { return(NULL); } } blockPtr = blockPtr->nextBlock; } SystemError(theEnv,(char*)"MEMORY",2); EnvExitRouter(theEnv,EXIT_FAILURE); return(NULL); /* Unreachable, but prevents warning. */ }
//------------------------------------------------------------------------- Manager::Manager() { if( g_manager ) { throw std::runtime_error( "Cannot create multiple arena managers." ); } g_manager = this; AllocateChunk(); }
void Space::UpdateChunks( float dt ) { const Vec2i chunk_index = CurrentChunkIndex(); if( SETTINGS->GetValue<bool>( "show_pos" ) ) { std::stringstream ss; Vec2i ip = satellite.GetPos(); ss << "pos: " << ip << " in chunk " << chunk_index << '\n'; Tree::VisualDebug( ss.str() ); } // Not yet checked chunk if( checked_chunks.find( chunk_index ) == checked_chunks.end() ) { // Allocate this chunk (probably done but it gets checked anyway AllocateChunk( chunk_index ); // Allocate surrounding chunks AllocateChunk( chunk_index.x - 1, chunk_index.y ); AllocateChunk( chunk_index.x + 1, chunk_index.y ); AllocateChunk( chunk_index.x, chunk_index.y - 1 ); AllocateChunk( chunk_index.x, chunk_index.y + 1 ); AllocateChunk( chunk_index.x - 1, chunk_index.y - 1 ); AllocateChunk( chunk_index.x - 1, chunk_index.y + 1 ); AllocateChunk( chunk_index.x + 1, chunk_index.y - 1 ); AllocateChunk( chunk_index.x + 1, chunk_index.y + 1 ); } // Debug current chunks if( SETTINGS->GetValue<bool>( "chunk_count" ) ) { std::stringstream ss; ss << chunks.size() << " chunks\n"; Tree::VisualDebug( ss.str() ); } // Update all visible chunks! UpdateChunk( chunk_index, dt ); UpdateChunk( chunk_index.x - 1, chunk_index.y, dt ); UpdateChunk( chunk_index.x + 1, chunk_index.y, dt ); UpdateChunk( chunk_index.x, chunk_index.y - 1, dt ); UpdateChunk( chunk_index.x, chunk_index.y + 1, dt ); UpdateChunk( chunk_index.x - 1, chunk_index.y - 1, dt ); UpdateChunk( chunk_index.x - 1, chunk_index.y + 1, dt ); UpdateChunk( chunk_index.x + 1, chunk_index.y - 1, dt ); UpdateChunk( chunk_index.x + 1, chunk_index.y + 1, dt ); }
//------------------------------------------------------------------------- void *Manager::Alloc( int size, int aligned ) { if( size > Chunk::MaxSize() ) { throw std::length_error( "Request for arena memory too big." ); } Lock guard(m_mut); void *ptr = m_current_chunk->Get( size, aligned ); if( !ptr ) { AllocateChunk(); ptr = m_current_chunk->Get( size, aligned ); } if( !ptr ) { throw std::runtime_error( "Error allocating from arena." ); } m_allocated += size; return ptr; }
coResult coVulkanDeviceAllocator::Allocate(coVulkanDeviceAllocation*& _alloc, const VkMemoryPropertyFlags& _flags_vk, const VkDeviceSize& _size_vk, const VkDeviceSize& _alignment) { delete _alloc; _alloc = nullptr; // Initial raw implementation from https://cpp-rendering.io/vulkan-and-pipelines/. VkDeviceSize size_vk = coAlignSize(_size_vk, _alignment); // Find chunk for (coVulkanDeviceMemoryChunk* chunk : chunks) { coASSERT(chunk); if ((chunk->flags_vk & _flags_vk) == _flags_vk) { // Find block coList<coVulkanDeviceMemoryBlock>::Entry* selectedBlockEntry = nullptr; for (auto& entry : chunk->blocks) { const auto& block = entry.data; if (block.isFree) { if (block.size_vk > size_vk) { selectedBlockEntry = &entry; break; } } } if (!selectedBlockEntry) continue; coVulkanDeviceMemoryBlock newBlock; newBlock.isFree = true; newBlock.offset_vk = selectedBlockEntry->data.offset_vk + size_vk; newBlock.size_vk = selectedBlockEntry->data.size_vk - size_vk; selectedBlockEntry->data.isFree = false; selectedBlockEntry->data.size_vk = size_vk; if (newBlock.size_vk != 0) { coInsertAfter(*selectedBlockEntry, newBlock); } coVulkanDeviceAllocation* allocation = new coVulkanDeviceAllocation(); allocation->allocator = this; allocation->chunk = chunk; allocation->offset_vk = selectedBlockEntry->data.offset_vk; allocation->size_vk = size_vk; coASSERT(!_alloc); _alloc = allocation; return true; } } coVulkanDeviceMemoryChunk* chunk = nullptr; coTRY(AllocateChunk(chunk, _flags_vk, 256 * 1024 * 1024), "Failed to allocate new Vulkan memory chunk."); coASSERT(chunk); return Allocate(_alloc, _flags_vk, _size_vk, _alignment); }
void Space::AllocateChunk( int x, int y ) { AllocateChunk( Vec2i( x, y ) ); }