/** For each specified Memory Allocator's Item, the function asks VMA for a memory region that * can be assigned to corresponding wrapper instance. For each successfully handled request, * a MemoryBlock instance is created, using the feedback provided by the library. * * This function can be called multiple times. * * @return true if all allocations have been handled successfully, false if there was at least * one failure. **/ bool Anvil::MemoryAllocatorBackends::VMA::bake(Anvil::MemoryAllocator::Items& in_items) { bool result = true; VkResult result_vk = VK_ERROR_DEVICE_LOST; /* Go through all scheduled items and call the underlying library API to handle the request. * * For each successful allocation, wrap it with a MemoryBlock wrapper with a custom delete * handler, so that VMA is notified whenever a memory block it has provided memory backing for * has gone out of scope. */ for (auto& current_item_ptr : in_items) { MemoryBlockUniquePtr new_memory_block_ptr(nullptr, std::default_delete<Anvil::MemoryBlock>() ); VmaAllocation allocation = VK_NULL_HANDLE; VmaAllocationCreateInfo allocation_create_info = {}; VmaAllocationInfo allocation_info = {}; VkMemoryRequirements memory_requirements_vk; Anvil::OnMemoryBlockReleaseCallbackFunction release_callback_function; VkMemoryHeapFlags required_mem_heap_flags = 0; VkMemoryPropertyFlags required_mem_property_flags = 0; Anvil::Utils::get_vk_property_flags_from_memory_feature_flags(current_item_ptr->alloc_memory_required_features, &required_mem_property_flags, &required_mem_heap_flags); /* NOTE: VMA does not take required memory heap flags at the moment. Adding this is on their radar. */ anvil_assert(required_mem_heap_flags == 0); memory_requirements_vk.alignment = current_item_ptr->alloc_memory_required_alignment; memory_requirements_vk.memoryTypeBits = current_item_ptr->alloc_memory_supported_memory_types; memory_requirements_vk.size = current_item_ptr->alloc_size; allocation_create_info.requiredFlags = required_mem_property_flags; result_vk = vmaAllocateMemory(m_vma_allocator_ptr->get_handle(), &memory_requirements_vk, &allocation_create_info, &allocation, &allocation_info); if (!is_vk_call_successful(result_vk) ) { result = false; continue; } /* Bake the block and stash it */ release_callback_function = std::bind( &VMAAllocator::on_vma_alloced_mem_block_gone_out_of_scope, m_vma_allocator_ptr, std::placeholders::_1, allocation ); { auto create_info_ptr = Anvil::MemoryBlockCreateInfo::create_derived_with_custom_delete_proc(m_device_ptr, allocation_info.deviceMemory, memory_requirements_vk.memoryTypeBits, current_item_ptr->alloc_memory_required_features, allocation_info.memoryType, memory_requirements_vk.size, allocation_info.offset, release_callback_function); new_memory_block_ptr = Anvil::MemoryBlock::create(std::move(create_info_ptr) ); } if (new_memory_block_ptr == nullptr) { anvil_assert(new_memory_block_ptr != nullptr); result = false; continue; } dynamic_cast<IMemoryBlockBackendSupport*>(new_memory_block_ptr.get() )->set_parent_memory_allocator_backend_ptr(shared_from_this(), allocation); current_item_ptr->alloc_memory_block_ptr = std::move(new_memory_block_ptr); current_item_ptr->alloc_size = memory_requirements_vk.size; current_item_ptr->is_baked = true; m_vma_allocator_ptr->on_new_vma_mem_block_alloced(); } return result; }
/** For each specified Memory Allocator's Item, the function asks VMA for a memory region that * can be assigned to corresponding wrapper instance. For each successfully handled request, * a MemoryBlock instance is created, using the feedback provided by the library. * * This function can be called multiple times. * * @return true if all allocations have been handled successfully, false if there was at least * one failure. **/ bool Anvil::MemoryAllocatorBackends::VMA::bake(Anvil::MemoryAllocator::Items& in_items) { bool result = true; VkResult result_vk = VK_ERROR_DEVICE_LOST; /* Go through all scheduled items and call the underlying library API to handle the request. * * For each successful allocation, wrap it with a MemoryBlock wrapper with a custom delete * handler, so that VMA is notified whenever a memory block it has provided memory backing for * has gone out of scope. */ for (auto& current_item_ptr : in_items) { anvil_assert(current_item_ptr->memory_priority == FLT_MAX); /* VMA doesn't support memory_priority */ MemoryBlockUniquePtr new_memory_block_ptr(nullptr, std::default_delete<Anvil::MemoryBlock>() ); VmaAllocation allocation = VK_NULL_HANDLE; VmaAllocationCreateInfo allocation_create_info = {}; VmaAllocationInfo allocation_info = {}; bool is_dedicated_alloc = false; VkMemoryRequirements memory_requirements_vk; Anvil::OnMemoryBlockReleaseCallbackFunction release_callback_function; Anvil::MemoryHeapFlags required_mem_heap_flags; Anvil::MemoryPropertyFlags required_mem_property_flags; Anvil::Utils::get_vk_property_flags_from_memory_feature_flags(current_item_ptr->alloc_memory_required_features, &required_mem_property_flags, &required_mem_heap_flags); /* NOTE: VMA does not take required memory heap flags at the moment. Adding this is on their radar. */ anvil_assert(required_mem_heap_flags == Anvil::MemoryHeapFlagBits::NONE); memory_requirements_vk.alignment = current_item_ptr->alloc_memory_required_alignment; memory_requirements_vk.memoryTypeBits = current_item_ptr->alloc_memory_supported_memory_types; memory_requirements_vk.size = current_item_ptr->alloc_size; allocation_create_info.flags = (current_item_ptr->alloc_is_dedicated_memory) ? VmaAllocationCreateFlagBits::VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT : 0; allocation_create_info.requiredFlags = required_mem_property_flags.get_vk(); result_vk = vmaAllocateMemory(m_vma_allocator_ptr->get_handle(), &memory_requirements_vk, &allocation_create_info, &allocation, &allocation_info); if (!is_vk_call_successful(result_vk) ) { result = false; continue; } else { is_dedicated_alloc = (allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); } /* Bake the block and stash it */ release_callback_function = std::bind( &VMAAllocator::on_vma_alloced_mem_block_gone_out_of_scope, m_vma_allocator_ptr, std::placeholders::_1, allocation ); { auto create_info_ptr = Anvil::MemoryBlockCreateInfo::create_derived_with_custom_delete_proc(m_device_ptr, allocation_info.deviceMemory, memory_requirements_vk.memoryTypeBits, current_item_ptr->alloc_memory_required_features, allocation_info.memoryType, memory_requirements_vk.size, allocation_info.offset, release_callback_function); if (is_dedicated_alloc) { if (current_item_ptr->type == Anvil::MemoryAllocator::ITEM_TYPE_BUFFER || current_item_ptr->type == Anvil::MemoryAllocator::ITEM_TYPE_SPARSE_BUFFER_REGION) { anvil_assert(current_item_ptr->buffer_ptr != nullptr); create_info_ptr->use_dedicated_allocation(current_item_ptr->buffer_ptr, nullptr); /* in_opt_image_ptr */ } else if (current_item_ptr->type == Anvil::MemoryAllocator::ITEM_TYPE_IMAGE_WHOLE || current_item_ptr->type == Anvil::MemoryAllocator::ITEM_TYPE_SPARSE_IMAGE_MIPTAIL || current_item_ptr->type == Anvil::MemoryAllocator::ITEM_TYPE_SPARSE_IMAGE_SUBRESOURCE) { anvil_assert(current_item_ptr->image_ptr != nullptr); create_info_ptr->use_dedicated_allocation(nullptr, /* in_opt_buffer_ptr */ current_item_ptr->image_ptr); } } new_memory_block_ptr = Anvil::MemoryBlock::create(std::move(create_info_ptr) ); } if (new_memory_block_ptr == nullptr) { anvil_assert(new_memory_block_ptr != nullptr); result = false; continue; } dynamic_cast<IMemoryBlockBackendSupport*>(new_memory_block_ptr.get() )->set_parent_memory_allocator_backend_ptr(shared_from_this(), allocation); current_item_ptr->alloc_memory_block_ptr = std::move(new_memory_block_ptr); current_item_ptr->alloc_size = memory_requirements_vk.size; current_item_ptr->is_baked = true; m_vma_allocator_ptr->on_new_vma_mem_block_alloced(); } return result; }