Esempio n. 1
0
/* Please see header for specification */
bool Anvil::DescriptorPool::reset()
{
    VkResult result_vk;

    if (m_pool != VK_NULL_HANDLE)
    {
        result_vk = vkResetDescriptorPool(m_device_ptr->get_device_vk(),
                                          m_pool,
                                          0 /* flags */);
        anvil_assert_vk_call_succeeded(result_vk);

        if (is_vk_call_successful(result_vk) )
        {
            /* Alloced descriptor sets went out of scope. Send out a call-back, so that descriptor set
             * wrapper instances can mark themselves as unusable */
            callback(DESCRIPTOR_POOL_CALLBACK_ID_POOL_RESET,
                     this);
        }
    }
    else
    {
        result_vk = VK_SUCCESS;
    }

    return is_vk_call_successful(result_vk); 
}
/** Please see header for specification */
bool Anvil::DescriptorSetLayout::init()
{
    bool     result    = false;
    VkResult result_vk;

    anvil_assert(m_layout == VK_NULL_HANDLE);

    /* Bake the Vulkan object */
    auto create_info_ptr = m_create_info_ptr->create_descriptor_set_layout_create_info(m_device_ptr);

    if (create_info_ptr == nullptr)
    {
        anvil_assert(create_info_ptr != nullptr);

        goto end;
    }

    result_vk = Anvil::Vulkan::vkCreateDescriptorSetLayout(m_device_ptr->get_device_vk(),
                                                           create_info_ptr->struct_chain_ptr->get_root_struct(),
                                                           nullptr, /* pAllocator */
                                                          &m_layout);

    anvil_assert_vk_call_succeeded(result_vk);
    if (is_vk_call_successful(result_vk) )
    {
        set_vk_handle(m_layout);
    }

    result = is_vk_call_successful(result_vk);

end:
    return result;
}
/** Please see header for specification */
bool Anvil::PipelineCache::merge(uint32_t                           in_n_pipeline_caches,
                                 const Anvil::PipelineCache* const* in_src_cache_ptrs)
{
    VkResult                     result_vk;
    std::vector<VkPipelineCache> src_pipeline_caches(in_n_pipeline_caches);

    anvil_assert(in_n_pipeline_caches < sizeof(src_pipeline_caches) / sizeof(src_pipeline_caches[0]) );


    for (uint32_t n_pipeline_cache = 0;
                  n_pipeline_cache < in_n_pipeline_caches;
                ++n_pipeline_cache)
    {
        src_pipeline_caches[n_pipeline_cache] = in_src_cache_ptrs[n_pipeline_cache]->get_pipeline_cache();
    }

    lock();
    {
        result_vk = Anvil::Vulkan::vkMergePipelineCaches(m_device_ptr->get_device_vk(),
                                                         m_pipeline_cache,
                                                         in_n_pipeline_caches,
                                                        &src_pipeline_caches.at(0) );
    }
    unlock();

    anvil_assert(result_vk);

    return is_vk_call_successful(result_vk);
}
Esempio n. 4
0
/* Please see header for specification */
bool Anvil::DescriptorPool::alloc_descriptor_sets(uint32_t                     n_sets,
                                                  Anvil::DescriptorSetLayout** descriptor_set_layouts_ptr,
                                                  VkDescriptorSet*             out_descriptor_sets_vk_ptr)
{
    VkDescriptorSetAllocateInfo ds_alloc_info;
    VkResult                    result_vk;

    m_ds_layout_cache.resize(n_sets);

    for (uint32_t n_set = 0;
                  n_set < n_sets;
                ++n_set)
    {
        m_ds_layout_cache[n_set] = descriptor_set_layouts_ptr[n_set]->get_layout();
    }

    ds_alloc_info.descriptorPool     = m_pool;
    ds_alloc_info.descriptorSetCount = n_sets;
    ds_alloc_info.pNext              = nullptr;
    ds_alloc_info.pSetLayouts        = &m_ds_layout_cache[0];
    ds_alloc_info.sType              = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;

    result_vk = vkAllocateDescriptorSets(m_device_ptr->get_device_vk(),
                                        &ds_alloc_info,
                                         out_descriptor_sets_vk_ptr);

    anvil_assert_vk_call_succeeded(result_vk);
    return is_vk_call_successful(result_vk);
}
Esempio n. 5
0
/** Creates a new VMA allocator instance.
 *
 *  @return true if successful, false otherwise.
 **/
bool Anvil::MemoryAllocatorBackends::VMA::VMAAllocator::init()
{
    VmaAllocatorCreateInfo create_info = {};
    VkResult               result        (VK_ERROR_DEVICE_LOST);

    switch (m_device_ptr->get_type() )
    {
        case Anvil::DEVICE_TYPE_SINGLE_GPU:
        {
            const Anvil::SGPUDevice* sgpu_device_ptr(dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr) );

            create_info.physicalDevice = sgpu_device_ptr->get_physical_device()->get_physical_device();
            break;
        }

        default:
        {
            anvil_assert_fail();
        }
    }

    create_info.device                      = m_device_ptr->get_device_vk();
    create_info.pAllocationCallbacks        = nullptr;
    create_info.preferredLargeHeapBlockSize = 0;

    result = vmaCreateAllocator(&create_info,
                                &m_allocator);

    anvil_assert_vk_call_succeeded(result);
    return is_vk_call_successful(result);
}
Esempio n. 6
0
/* Please see header for specification */
bool Anvil::Buffer::set_memory(Anvil::MemoryBlock* memory_block_ptr)
{
    bool     result = false;
    VkResult result_vk;

    if (memory_block_ptr == nullptr)
    {
        anvil_assert(!(memory_block_ptr == nullptr) );

        goto end;
    }

    if (m_memory_block_ptr != nullptr)
    {
        anvil_assert( (memory_block_ptr == nullptr) );

        goto end;
    }

    /* Bind the memory object to the buffer object */
    m_memory_block_ptr = memory_block_ptr;
    m_memory_block_ptr->retain();

    result_vk = vkBindBufferMemory(m_device_ptr->get_device_vk(),
                                   m_buffer,
                                   m_memory_block_ptr->get_memory(),
                                   memory_block_ptr->get_start_offset() );
    anvil_assert_vk_call_succeeded(result_vk);

    result = is_vk_call_successful(result_vk);
end:
    return result;
}
Esempio n. 7
0
/* Please see header for specification */
bool Anvil::Fence::reset_fences(const uint32_t in_n_fences,
                                Fence*         in_fences)
{
    const Anvil::BaseDevice* device_ptr           = nullptr;
    auto                     fence_cache          = std::vector<VkFence>(in_n_fences);
    static const uint32_t    fence_cache_capacity = sizeof(fence_cache) / sizeof(fence_cache[0]);
    bool                     result               = true;
    VkResult                 result_vk;

    if (in_n_fences == 0)
    {
        goto end;
    }

    for (uint32_t n_fence_batch = 0;
                  n_fence_batch < 1 + in_n_fences / fence_cache_capacity;
                ++n_fence_batch)
    {
        const uint32_t n_fences_remaining = in_n_fences - n_fence_batch * fence_cache_capacity;

        for (uint32_t n_fence = 0;
                      n_fence < n_fences_remaining;
                    ++n_fence)
        {
            Anvil::Fence& current_fence = in_fences[n_fence_batch * fence_cache_capacity + n_fence];

            anvil_assert(device_ptr == nullptr                                          ||
                         device_ptr != nullptr && current_fence.m_device_ptr != nullptr);

            device_ptr           = current_fence.m_device_ptr;
            fence_cache[n_fence] = current_fence.m_fence;

            current_fence.lock();
        }
        {
            result_vk = vkResetFences(device_ptr->get_device_vk(),
                                      n_fences_remaining,
                                      (n_fences_remaining > 0) ? &fence_cache.at(0) : nullptr);
        }
        for (uint32_t n_fence = 0;
                      n_fence < n_fences_remaining;
                    ++n_fence)
        {
            Anvil::Fence& current_fence = in_fences[n_fence_batch * fence_cache_capacity + n_fence];

            current_fence.unlock();
        }

        anvil_assert_vk_call_succeeded(result_vk);

        if (!is_vk_call_successful(result_vk) )
        {
            result = false;
        }
    }

end:
    return result;
}
/** Please see header for specification */
bool Anvil::PipelineCache::get_data(size_t* out_n_data_bytes_ptr,
                                    void*   out_data_ptr)
{
    VkResult result_vk;

    result_vk = Anvil::Vulkan::vkGetPipelineCacheData(m_device_ptr->get_device_vk(),
                                                      m_pipeline_cache,
                                                      out_n_data_bytes_ptr,
                                                      out_data_ptr);

    return is_vk_call_successful(result_vk);
}
Esempio n. 9
0
bool Anvil::Event::init()
{
    VkEventCreateInfo event_create_info;
    VkResult          result           (VK_ERROR_INITIALIZATION_FAILED);

    /* Spawn a new event */
    event_create_info.flags = 0;
    event_create_info.pNext = nullptr;
    event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;

    result = vkCreateEvent(m_device_ptr->get_device_vk(),
                          &event_create_info,
                           nullptr, /* pAllocator */
                          &m_event);

    anvil_assert_vk_call_succeeded(result);
    if (is_vk_call_successful(result) )
    {
        set_vk_handle(m_event);
    }

    return is_vk_call_successful(result);
}
/** Please see header for specification */
Anvil::PipelineCache::PipelineCache(const Anvil::BaseDevice* in_device_ptr,
                                    bool                     in_mt_safe,
                                    size_t                   in_initial_data_size,
                                    const void*              in_initial_data)
    :DebugMarkerSupportProvider(in_device_ptr,
                                Anvil::ObjectType::PIPELINE_CACHE),
     MTSafetySupportProvider   (in_mt_safe),
     m_device_ptr              (in_device_ptr),
     m_pipeline_cache          (VK_NULL_HANDLE)
{
    VkPipelineCacheCreateInfo cache_create_info;
    VkResult                  result_vk        (VK_ERROR_INITIALIZATION_FAILED);

    ANVIL_REDUNDANT_VARIABLE(result_vk);

    cache_create_info.flags           = 0;
    cache_create_info.initialDataSize = in_initial_data_size;
    cache_create_info.pInitialData    = in_initial_data;
    cache_create_info.pNext           = nullptr;
    cache_create_info.sType           = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;

    result_vk = Anvil::Vulkan::vkCreatePipelineCache(m_device_ptr->get_device_vk(),
                                                    &cache_create_info,
                                                     nullptr, /* pAllocator */
                                                    &m_pipeline_cache);

    anvil_assert_vk_call_succeeded(result_vk);
    if (is_vk_call_successful(result_vk) )
    {
        set_vk_handle(m_pipeline_cache);
    }

    anvil_assert(m_pipeline_cache != VK_NULL_HANDLE);

    /* Register the instance */
    Anvil::ObjectTracker::get()->register_object(Anvil::ObjectType::PIPELINE_CACHE,
                                                  this);
}
Esempio n. 11
0
/* Please see header for specification */
bool Anvil::RenderPass::init()
{
    std::vector<VkAttachmentDescription> renderpass_attachments_vk;
    VkRenderPassCreateInfo               render_pass_create_info;
    bool                                 result                           (false);
    VkResult                             result_vk;
    std::vector<VkSubpassDependency>     subpass_dependencies_vk;
    std::vector<VkSubpassDescription>    subpass_descriptions_vk;

    /* NOTE: We need to reserve storage in advance for each of the vectors below,
     *       so that it is guaranteed the push_back() calls do not cause a realloc()
     *       and invalidate already cached pointers to filled Vulkan descriptors.
     *       To achieve this, we could encapsulate the code below in a two-iteration loop,
     *       whose first iteration would count how many elements we need for each vector,
     *       and the second one would reserve that space and proceed with inserting the elements.
     *
     *       That would look ugly though.
     *
     *       In order to keep things clean & simple, we instantiate the following structure on heap
     *       for each subpass. On subpass level, we can easily predict how many elements in the worst
     *       case scenario we're going to insert, so that will do the trick. Slight performance cost,
     *       but baking is an offline task, so we should be OK.
     **/
    typedef struct SubPassAttachmentSet
    {
        /** Constructor.
         *
         *  @param in_n_max_color_attachments    Maximum number of color attachments the subpass will define.
         *  @param in_n_max_input_attachments    Maximum number of input attachments the subpass will define.
         *  @param in_n_max_preserve_attachments Maximum number of preserve attachments the subpass will define.
         **/
        explicit SubPassAttachmentSet(uint32_t in_n_max_color_attachments,
                                      uint32_t in_n_max_input_attachments,
                                      uint32_t in_n_max_preserve_attachments)
            :n_max_color_attachments   (in_n_max_color_attachments),
             n_max_input_attachments   (in_n_max_input_attachments),
             n_max_preserve_attachments(in_n_max_preserve_attachments)
        {
            color_attachments_vk.reserve        (n_max_color_attachments);
            input_attachments_vk.reserve        (n_max_input_attachments);
            preserve_attachments_vk.reserve     (n_max_preserve_attachments);
            resolve_color_attachments_vk.reserve(n_max_color_attachments);
        }

        /** Helper function which verifies the maximum number of attachments specified at
         *  creation time is not exceeded.
         **/
        void do_sanity_checks()
        {
            anvil_assert(color_attachments_vk.size()         <= n_max_color_attachments);
            anvil_assert(input_attachments_vk.size()         <= n_max_input_attachments);
            anvil_assert(preserve_attachments_vk.size()      <= n_max_preserve_attachments);
            anvil_assert(resolve_color_attachments_vk.size() <= n_max_color_attachments);
        }

        std::vector<VkAttachmentReference> color_attachments_vk;
        VkAttachmentReference              depth_attachment_vk;
        std::vector<VkAttachmentReference> input_attachments_vk;
        std::vector<uint32_t>              preserve_attachments_vk;
        std::vector<VkAttachmentReference> resolve_color_attachments_vk;
    private:
        uint32_t n_max_color_attachments;
        uint32_t n_max_input_attachments;
        uint32_t n_max_preserve_attachments;
    } SubPassAttachmentSet;

    std::vector<std::unique_ptr<SubPassAttachmentSet> > subpass_attachment_sets;

    anvil_assert(m_render_pass == VK_NULL_HANDLE);

    /* Set up helper descriptor storage space */
    subpass_dependencies_vk.reserve(m_render_pass_create_info_ptr->m_subpass_dependencies.size() );
    subpass_descriptions_vk.reserve(m_render_pass_create_info_ptr->m_subpasses.size() );

    for (auto renderpass_attachment_iterator  = m_render_pass_create_info_ptr->m_attachments.cbegin();
              renderpass_attachment_iterator != m_render_pass_create_info_ptr->m_attachments.cend();
            ++renderpass_attachment_iterator)
    {
        VkAttachmentDescription attachment_vk;

        attachment_vk.finalLayout    = renderpass_attachment_iterator->final_layout;
        attachment_vk.flags          = (renderpass_attachment_iterator->may_alias) ? VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT
                                                                                   : 0u;
        attachment_vk.format         = renderpass_attachment_iterator->format;
        attachment_vk.initialLayout  = renderpass_attachment_iterator->initial_layout;
        attachment_vk.loadOp         = renderpass_attachment_iterator->color_depth_load_op;
        attachment_vk.samples        = static_cast<VkSampleCountFlagBits>(renderpass_attachment_iterator->sample_count);
        attachment_vk.stencilLoadOp  = renderpass_attachment_iterator->stencil_load_op;
        attachment_vk.stencilStoreOp = renderpass_attachment_iterator->stencil_store_op;
        attachment_vk.storeOp        = renderpass_attachment_iterator->color_depth_store_op;

        renderpass_attachments_vk.push_back(attachment_vk);
    }

    for (auto subpass_dependency_iterator  = m_render_pass_create_info_ptr->m_subpass_dependencies.cbegin();
              subpass_dependency_iterator != m_render_pass_create_info_ptr->m_subpass_dependencies.cend();
            ++subpass_dependency_iterator)
    {
        VkSubpassDependency dependency_vk;

        dependency_vk.dependencyFlags = ((subpass_dependency_iterator->by_region) ? VK_DEPENDENCY_BY_REGION_BIT : 0u);
        dependency_vk.dstAccessMask   = subpass_dependency_iterator->destination_access_mask;
        dependency_vk.dstStageMask    = subpass_dependency_iterator->destination_stage_mask;
        dependency_vk.dstSubpass      = (subpass_dependency_iterator->destination_subpass_ptr != nullptr) ? subpass_dependency_iterator->destination_subpass_ptr->index
                                                                                                         : VK_SUBPASS_EXTERNAL;
        dependency_vk.srcAccessMask   = subpass_dependency_iterator->source_access_mask;
        dependency_vk.srcStageMask    = subpass_dependency_iterator->source_stage_mask;
        dependency_vk.srcSubpass      = (subpass_dependency_iterator->source_subpass_ptr != nullptr) ? subpass_dependency_iterator->source_subpass_ptr->index
                                                                                                    : VK_SUBPASS_EXTERNAL;

        subpass_dependencies_vk.push_back(dependency_vk);
    }

    /* We now have all the data needed to create Vulkan subpass instances. */
    for (auto subpass_iterator  = m_render_pass_create_info_ptr->m_subpasses.cbegin();
              subpass_iterator != m_render_pass_create_info_ptr->m_subpasses.cend();
            ++subpass_iterator)
    {
        std::unique_ptr<SubPassAttachmentSet> current_subpass_attachment_set_ptr;
        uint32_t                              highest_subpass_color_attachment_location = UINT32_MAX;
        uint32_t                              highest_subpass_input_attachment_index    = UINT32_MAX;
        bool                                  need_color_resolve_attachments            = false;
        VkSubpassDescription                  subpass_vk;
        VkAttachmentReference                 unused_reference;

        unused_reference.attachment = VK_ATTACHMENT_UNUSED;
        unused_reference.layout     = VK_IMAGE_LAYOUT_UNDEFINED;

        /* Determine whether any of the color attachments are going to be resolved. */
        for (auto subpass_color_attachment_iterator  = (*subpass_iterator)->color_attachments_map.cbegin();
                  subpass_color_attachment_iterator != (*subpass_iterator)->color_attachments_map.cend();
                ++subpass_color_attachment_iterator)
        {
            if (subpass_color_attachment_iterator->second.resolve_attachment_index != UINT32_MAX)
            {
                need_color_resolve_attachments = true;

                break;
            }
        }

        /* Determine the highest color attachment location & input attachment index. */
        for (auto subpass_color_attachment_iterator  = (*subpass_iterator)->color_attachments_map.cbegin();
                  subpass_color_attachment_iterator != (*subpass_iterator)->color_attachments_map.cend();
                ++subpass_color_attachment_iterator)
        {
            if (highest_subpass_color_attachment_location == UINT32_MAX                                ||
                subpass_color_attachment_iterator->first  > highest_subpass_color_attachment_location)
            {
                highest_subpass_color_attachment_location = subpass_color_attachment_iterator->first;
            }
        }

        for (auto subpass_input_attachment_iterator  = (*subpass_iterator)->input_attachments_map.cbegin();
                  subpass_input_attachment_iterator != (*subpass_iterator)->input_attachments_map.cend();
                ++subpass_input_attachment_iterator)
        {
            if (highest_subpass_input_attachment_index   == UINT32_MAX                               ||
                subpass_input_attachment_iterator->first >  highest_subpass_input_attachment_index)
            {
                highest_subpass_input_attachment_index = subpass_input_attachment_iterator->first;
            }
        }

        /* Instantiate a new subpass attachment set for current subpass */
        current_subpass_attachment_set_ptr.reset(
            new SubPassAttachmentSet(highest_subpass_color_attachment_location + 1,                             /* n_max_color_attachments     */
                                     static_cast<uint32_t>((*subpass_iterator)->input_attachments_map.size() ), /* n_max_input_attachments     */
                                     static_cast<uint32_t>((*subpass_iterator)->preserved_attachments.size() )  /* n_max_preserved_attachments */)
        );

        /* Prepare unused VK color, depth, input & resolve attachment descriptors */
        for (uint32_t n_color_attachment = 0;
                      n_color_attachment < static_cast<uint32_t>(highest_subpass_color_attachment_location + 1);
                    ++n_color_attachment)
        {
            current_subpass_attachment_set_ptr->color_attachments_vk.push_back(unused_reference);

            if (need_color_resolve_attachments)
            {
                current_subpass_attachment_set_ptr->resolve_color_attachments_vk.push_back(unused_reference);
            }
        }

        for (uint32_t n_input_attachment = 0;
                      n_input_attachment < static_cast<uint32_t>(highest_subpass_input_attachment_index + 1);
                    ++n_input_attachment)
        {
            current_subpass_attachment_set_ptr->input_attachments_vk.push_back(unused_reference);
        }

        /* Update those of the color/depth/input references, for which we have been provided actual descriptors */
        for (auto subpass_color_attachment_iterator  = (*subpass_iterator)->color_attachments_map.cbegin();
                  subpass_color_attachment_iterator != (*subpass_iterator)->color_attachments_map.cend();
                ++subpass_color_attachment_iterator)
        {
            current_subpass_attachment_set_ptr->color_attachments_vk[subpass_color_attachment_iterator->first] = m_render_pass_create_info_ptr->get_attachment_reference_from_subpass_attachment(subpass_color_attachment_iterator->second);

            if (need_color_resolve_attachments)
            {
                if (subpass_color_attachment_iterator->second.resolve_attachment_index != UINT32_MAX)
                {
                    current_subpass_attachment_set_ptr->resolve_color_attachments_vk[subpass_color_attachment_iterator->first] = m_render_pass_create_info_ptr->get_attachment_reference_for_resolve_attachment(subpass_iterator,
                                                                                                                                                                                                                subpass_color_attachment_iterator);
                }
            }
        }

        if ((*subpass_iterator)->depth_stencil_attachment.attachment_index != UINT32_MAX)
        {
            current_subpass_attachment_set_ptr->depth_attachment_vk = m_render_pass_create_info_ptr->get_attachment_reference_from_subpass_attachment((*subpass_iterator)->depth_stencil_attachment);
        }
        else
        {
            current_subpass_attachment_set_ptr->depth_attachment_vk = unused_reference;
        }

        for (auto subpass_input_attachment_iterator  = (*subpass_iterator)->input_attachments_map.cbegin();
                  subpass_input_attachment_iterator != (*subpass_iterator)->input_attachments_map.cend();
                ++subpass_input_attachment_iterator)
        {
            current_subpass_attachment_set_ptr->input_attachments_vk[subpass_input_attachment_iterator->first] = m_render_pass_create_info_ptr->get_attachment_reference_from_subpass_attachment(subpass_input_attachment_iterator->second);
        }

        /* Fill the preserved attachments vector. These do not use indices or locations, so the process is much simpler */
        for (auto subpass_preserve_attachment_iterator  = (*subpass_iterator)->preserved_attachments.cbegin();
                  subpass_preserve_attachment_iterator != (*subpass_iterator)->preserved_attachments.cend();
                ++subpass_preserve_attachment_iterator)
        {
            current_subpass_attachment_set_ptr->preserve_attachments_vk.push_back(
                m_render_pass_create_info_ptr->m_attachments.at(subpass_preserve_attachment_iterator->attachment_index).index
            );
        }

        /* Prepare the VK subpass descriptor */
        const uint32_t n_color_attachments     = highest_subpass_color_attachment_location + 1;
        const uint32_t n_input_attachments     = highest_subpass_input_attachment_index    + 1;
        const uint32_t n_preserved_attachments = static_cast<uint32_t>((*subpass_iterator)->preserved_attachments.size() );
        const uint32_t n_resolved_attachments  = ((*subpass_iterator)->resolved_attachments_map.size() == 0) ? 0
                                                                                                             : n_color_attachments;

        subpass_vk.colorAttachmentCount              = n_color_attachments;
        subpass_vk.flags                             = 0;
        subpass_vk.inputAttachmentCount              = n_input_attachments;
        subpass_vk.pColorAttachments                 = (n_color_attachments > 0)                                                      ? &current_subpass_attachment_set_ptr->color_attachments_vk.at(0)
                                                                                                                                      : nullptr;
        subpass_vk.pDepthStencilAttachment           = ((*subpass_iterator)->depth_stencil_attachment.attachment_index != UINT32_MAX) ? &current_subpass_attachment_set_ptr->depth_attachment_vk
                                                                                                                                      : nullptr;
        subpass_vk.pInputAttachments                 = (n_input_attachments > 0)                                                      ? &current_subpass_attachment_set_ptr->input_attachments_vk.at(0)
                                                                                                                                      : nullptr;
        subpass_vk.pipelineBindPoint                 = VK_PIPELINE_BIND_POINT_GRAPHICS;
        subpass_vk.pPreserveAttachments              = (n_preserved_attachments > 0) ? &current_subpass_attachment_set_ptr->preserve_attachments_vk.at(0)
                                                                                     : nullptr;
        subpass_vk.preserveAttachmentCount           = n_preserved_attachments;
        subpass_vk.pResolveAttachments               = (n_resolved_attachments > 0) ? &current_subpass_attachment_set_ptr->resolve_color_attachments_vk.at(0)
                                                                                    : nullptr;

        current_subpass_attachment_set_ptr->do_sanity_checks();

        subpass_attachment_sets.push_back(
            std::move(current_subpass_attachment_set_ptr)
        );

        subpass_descriptions_vk.push_back(subpass_vk);
    }

    /* Set up a create info descriptor and spawn a new Vulkan RenderPass object. */
    render_pass_create_info.attachmentCount = static_cast<uint32_t>(m_render_pass_create_info_ptr->m_attachments.size         () );
    render_pass_create_info.dependencyCount = static_cast<uint32_t>(m_render_pass_create_info_ptr->m_subpass_dependencies.size() );
    render_pass_create_info.subpassCount    = static_cast<uint32_t>(m_render_pass_create_info_ptr->m_subpasses.size           () );
    render_pass_create_info.flags           = 0;
    render_pass_create_info.pAttachments    = (render_pass_create_info.attachmentCount > 0) ? &renderpass_attachments_vk.at(0)
                                                                                            : nullptr;
    render_pass_create_info.pDependencies   = (render_pass_create_info.dependencyCount > 0) ? &subpass_dependencies_vk.at(0)
                                                                                            : nullptr;
    render_pass_create_info.pNext           = nullptr;
    render_pass_create_info.pSubpasses      = (render_pass_create_info.subpassCount > 0) ? &subpass_descriptions_vk.at(0)
                                                                                         : nullptr;
    render_pass_create_info.sType           = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;

    result_vk = vkCreateRenderPass(m_device_ptr->get_device_vk(),
                                  &render_pass_create_info,
                                   nullptr, /* pAllocator */
                                  &m_render_pass);

    if (!is_vk_call_successful(result_vk) )
    {
        anvil_assert_vk_call_succeeded(result_vk);

        goto end;
    }

    set_vk_handle(m_render_pass);

    result  = true;

end:
    return result;
}
Esempio n. 12
0
bool Anvil::Fence::init()
{
    VkFenceCreateInfo                              fence_create_info;
    VkResult                                       result           (VK_ERROR_INITIALIZATION_FAILED);
    Anvil::StructChainer<VkFenceCreateInfo>        struct_chainer;
    Anvil::StructChainUniquePtr<VkFenceCreateInfo> struct_chain_ptr;

    /* Sanity checks */
    if (m_create_info_ptr->get_exportable_external_fence_handle_types() != Anvil::EXTERNAL_FENCE_HANDLE_TYPE_NONE)
    {
        if (!m_device_ptr->get_extension_info()->khr_external_fence() )
        {
            anvil_assert(m_device_ptr->get_extension_info()->khr_external_fence() );

            goto end;
        }
    }

    /* Spawn a new fence */
    {
        fence_create_info.flags = (m_create_info_ptr->should_create_signalled() ) ? VK_FENCE_CREATE_SIGNALED_BIT
                                                                                  : 0u;
        fence_create_info.pNext = nullptr;
        fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;

        struct_chainer.append_struct(fence_create_info);
    }

    if (m_create_info_ptr->get_exportable_external_fence_handle_types() != Anvil::EXTERNAL_FENCE_HANDLE_TYPE_NONE)
    {
        VkExportFenceCreateInfo create_info;

        create_info.handleTypes = Anvil::Utils::convert_external_fence_handle_type_bits_to_vk_external_fence_handle_type_flags(m_create_info_ptr->get_exportable_external_fence_handle_types() );
        create_info.pNext       = nullptr;
        create_info.sType       = VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO_KHR;

        struct_chainer.append_struct(create_info);
    }

    #if defined(_WIN32)
    {
        const Anvil::ExternalNTHandleInfo* nt_handle_info_ptr = nullptr;

        if (m_create_info_ptr->get_exportable_nt_handle_info(&nt_handle_info_ptr) )
        {
            VkExportFenceWin32HandleInfoKHR handle_info;

            anvil_assert(nt_handle_info_ptr                                                                                                   != nullptr);
            anvil_assert(m_create_info_ptr->get_exportable_external_fence_handle_types() & Anvil::EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT);

            handle_info.dwAccess    = nt_handle_info_ptr->access;
            handle_info.name        = (nt_handle_info_ptr->name.size() > 0) ? &nt_handle_info_ptr->name.at(0)
                                                                            : nullptr;
            handle_info.pAttributes = nt_handle_info_ptr->attributes_ptr;
            handle_info.pNext       = nullptr;
            handle_info.sType       = VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR;

            struct_chainer.append_struct(handle_info);
        }
    }
    #endif

    struct_chain_ptr = struct_chainer.create_chain();
    if (struct_chain_ptr == nullptr)
    {
        anvil_assert(struct_chain_ptr != nullptr);

        goto end;
    }

    result = vkCreateFence(m_device_ptr->get_device_vk(),
                           struct_chain_ptr->get_root_struct(),
                           nullptr, /* pAllocator */
                          &m_fence);

    anvil_assert_vk_call_succeeded(result);
    if (is_vk_call_successful(result) )
    {
        set_vk_handle(m_fence);
    }

end:
    return is_vk_call_successful(result);
}
Esempio n. 13
0
/** Please see header for specification */
VkResult Anvil::Queue::present(Anvil::Swapchain*        in_swapchain_ptr,
                               uint32_t                 in_swapchain_image_index,
                               uint32_t                 in_n_wait_semaphores,
                               Anvil::Semaphore* const* in_wait_semaphore_ptrs)
{
    VkResult                                presentation_results   [MAX_SWAPCHAINS];
    VkResult                                result;
    Anvil::StructChainer<VkPresentInfoKHR>  struct_chainer;
    const ExtensionKHRSwapchainEntrypoints* swapchain_entrypoints_ptr(nullptr);
    VkSwapchainKHR                          swapchains_vk          [MAX_SWAPCHAINS];
    std::vector<VkSemaphore>                wait_semaphores_vk     (in_n_wait_semaphores);

    /* If the application is only interested in off-screen rendering, do *not* post the present request,
     * since the fake swapchain image is not presentable. We still have to wait on the user-specified
     * semaphores though. */
    if (in_swapchain_ptr != nullptr)
    {
        Anvil::Window* window_ptr = nullptr;

        window_ptr = in_swapchain_ptr->get_create_info_ptr()->get_window();

        if (window_ptr != nullptr)
        {
            const WindowPlatform window_platform = window_ptr->get_platform();

            if (window_platform == WINDOW_PLATFORM_DUMMY                    ||
                window_platform == WINDOW_PLATFORM_DUMMY_WITH_PNG_SNAPSHOTS)
            {
                static const VkPipelineStageFlags dst_stage_mask(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);

                m_device_ptr->get_universal_queue(0)->submit(
                    SubmitInfo::create(nullptr,
                                       0,       /* in_n_semaphores_to_signal           */
                                       nullptr, /* in_opt_semaphore_to_signal_ptrs_ptr */
                                       in_n_wait_semaphores,
                                       in_wait_semaphore_ptrs,
                                      &dst_stage_mask,
                                       true) /* in_should_block */
                );

                for (uint32_t n_presentation = 0;
                              n_presentation < 1;
                            ++n_presentation)
                {
                    OnPresentRequestIssuedCallbackArgument callback_argument(in_swapchain_ptr);

                    CallbacksSupportProvider::callback(QUEUE_CALLBACK_ID_PRESENT_REQUEST_ISSUED,
                                                      &callback_argument);
                }

                result = VK_SUCCESS;
                goto end;
            }
        }
    }

    /* Convert arrays of Anvil objects to raw Vulkan handle arrays */
    for (uint32_t n_swapchain = 0;
                  n_swapchain < 1;
                ++n_swapchain)
    {
        swapchains_vk[n_swapchain] = in_swapchain_ptr->get_swapchain_vk();
    }

    for (uint32_t n_wait_semaphore = 0;
                  n_wait_semaphore < in_n_wait_semaphores;
                ++n_wait_semaphore)
    {
        wait_semaphores_vk[n_wait_semaphore] = in_wait_semaphore_ptrs[n_wait_semaphore]->get_semaphore();
    }

    {
        VkPresentInfoKHR image_presentation_info;

        image_presentation_info.pImageIndices      = &in_swapchain_image_index;
        image_presentation_info.pNext              = nullptr;
        image_presentation_info.pResults           = presentation_results;
        image_presentation_info.pSwapchains        = swapchains_vk;
        image_presentation_info.pWaitSemaphores    = (in_n_wait_semaphores != 0) ? &wait_semaphores_vk.at(0) : nullptr;
        image_presentation_info.sType              = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
        image_presentation_info.swapchainCount     = 1;
        image_presentation_info.waitSemaphoreCount = in_n_wait_semaphores;

        struct_chainer.append_struct(image_presentation_info);
    }

    swapchain_entrypoints_ptr = &m_device_ptr->get_extension_khr_swapchain_entrypoints();

    present_lock_unlock(1,
                       &in_swapchain_ptr,
                        in_n_wait_semaphores,
                        in_wait_semaphore_ptrs,
                        true);
    {
        auto chain_ptr = struct_chainer.create_chain();

        result = swapchain_entrypoints_ptr->vkQueuePresentKHR(m_queue,
                                                              chain_ptr->get_root_struct() );
    }
    present_lock_unlock(1,
                       &in_swapchain_ptr,
                        in_n_wait_semaphores,
                        in_wait_semaphore_ptrs,
                        false);

    anvil_assert_vk_call_succeeded(result);

    if (is_vk_call_successful(result) )
    {
        for (uint32_t n_presentation = 0;
                      n_presentation < 1;
                    ++n_presentation)
        {
            anvil_assert(is_vk_call_successful(presentation_results[n_presentation]));

            /* Return the most important error code reported */
            if (result != VK_ERROR_DEVICE_LOST)
            {
                switch (presentation_results[n_presentation])
                {
                    case VK_ERROR_DEVICE_LOST:
                    {
                        result = VK_ERROR_DEVICE_LOST;

                        break;
                    }

                    case VK_ERROR_SURFACE_LOST_KHR:
                    {
                        if (result != VK_ERROR_DEVICE_LOST)
                        {
                            result = VK_ERROR_SURFACE_LOST_KHR;
                        }

                        break;
                    }

                    case VK_ERROR_OUT_OF_DATE_KHR:
                    {
                        if (result != VK_ERROR_DEVICE_LOST      &&
                            result != VK_ERROR_SURFACE_LOST_KHR)
                        {
                            result = VK_ERROR_OUT_OF_DATE_KHR;
                        }

                        break;
                    }

                    case VK_SUBOPTIMAL_KHR:
                    {
                        if (result != VK_ERROR_DEVICE_LOST      &&
                            result != VK_ERROR_SURFACE_LOST_KHR &&
                            result != VK_ERROR_OUT_OF_DATE_KHR)
                        {
                            result = VK_SUBOPTIMAL_KHR;
                        }

                        break;
                    }

                    default:
                    {
                        anvil_assert(presentation_results[n_presentation] == VK_SUCCESS);
                    }
                }
            }

            {
                OnPresentRequestIssuedCallbackArgument callback_argument(in_swapchain_ptr);

                CallbacksSupportProvider::callback(QUEUE_CALLBACK_ID_PRESENT_REQUEST_ISSUED,
                                                  &callback_argument);
            }
        }
    }

end:
    return result;
}
Esempio n. 14
0
/** For each specified Memory Allocator's Item, the function asks VMA for a memory region that
 *  can be assigned to corresponding wrapper instance. For each successfully handled request,
 *  a MemoryBlock instance is created, using the feedback provided by the library.
 *
 *  This function can be called multiple times.
 *
 *  @return true if all allocations have been handled successfully, false if there was at least
 *               one failure.
 **/
bool Anvil::MemoryAllocatorBackends::VMA::bake(Anvil::MemoryAllocator::Items& in_items)
{
    bool     result    = true;
    VkResult result_vk = VK_ERROR_DEVICE_LOST;

    /* Go through all scheduled items and call the underlying library API to handle the request.
     *
     * For each successful allocation, wrap it with a MemoryBlock wrapper with a custom delete
     * handler, so that VMA is notified whenever a memory block it has provided memory backing for
     * has gone out of scope.
     */
    for (auto& current_item_ptr : in_items)
    {
        MemoryBlockUniquePtr new_memory_block_ptr(nullptr,
                                                  std::default_delete<Anvil::MemoryBlock>() );

        VmaAllocation                               allocation                  = VK_NULL_HANDLE;
        VmaAllocationCreateInfo                     allocation_create_info      = {};
        VmaAllocationInfo                           allocation_info             = {};
        VkMemoryRequirements                        memory_requirements_vk;
        Anvil::OnMemoryBlockReleaseCallbackFunction release_callback_function;
        VkMemoryHeapFlags                           required_mem_heap_flags     = 0;
        VkMemoryPropertyFlags                       required_mem_property_flags = 0;

        Anvil::Utils::get_vk_property_flags_from_memory_feature_flags(current_item_ptr->alloc_memory_required_features,
                                                                     &required_mem_property_flags,
                                                                     &required_mem_heap_flags);

        /* NOTE: VMA does not take required memory heap flags at the moment. Adding this is on their radar. */
        anvil_assert(required_mem_heap_flags == 0);

        memory_requirements_vk.alignment      = current_item_ptr->alloc_memory_required_alignment;
        memory_requirements_vk.memoryTypeBits = current_item_ptr->alloc_memory_supported_memory_types;
        memory_requirements_vk.size           = current_item_ptr->alloc_size;

        allocation_create_info.requiredFlags = required_mem_property_flags;

        result_vk = vmaAllocateMemory(m_vma_allocator_ptr->get_handle(),
                                     &memory_requirements_vk,
                                     &allocation_create_info,
                                     &allocation,
                                     &allocation_info);

        if (!is_vk_call_successful(result_vk) )
        {
            result = false;

            continue;
        }

        /* Bake the block and stash it */
        release_callback_function = std::bind(
            &VMAAllocator::on_vma_alloced_mem_block_gone_out_of_scope,
            m_vma_allocator_ptr,
            std::placeholders::_1,
            allocation
        );

        {
            auto create_info_ptr = Anvil::MemoryBlockCreateInfo::create_derived_with_custom_delete_proc(m_device_ptr,
                                                                                                        allocation_info.deviceMemory,
                                                                                                        memory_requirements_vk.memoryTypeBits,
                                                                                                        current_item_ptr->alloc_memory_required_features,
                                                                                                        allocation_info.memoryType,
                                                                                                        memory_requirements_vk.size,
                                                                                                        allocation_info.offset,
                                                                                                        release_callback_function);

            new_memory_block_ptr = Anvil::MemoryBlock::create(std::move(create_info_ptr) );
        }

        if (new_memory_block_ptr == nullptr)
        {
            anvil_assert(new_memory_block_ptr != nullptr);

            result = false;
            continue;
        }

        dynamic_cast<IMemoryBlockBackendSupport*>(new_memory_block_ptr.get() )->set_parent_memory_allocator_backend_ptr(shared_from_this(),
                                                                                                                        allocation);

        current_item_ptr->alloc_memory_block_ptr = std::move(new_memory_block_ptr);
        current_item_ptr->alloc_size             = memory_requirements_vk.size;
        current_item_ptr->is_baked               = true;

        m_vma_allocator_ptr->on_new_vma_mem_block_alloced();
    }

    return result;
}
/* Please see header for specification */
bool Anvil::RenderingSurface::init()
{
    const Anvil::DeviceType& device_type       (m_device_ptr->get_type() );
    bool                     init_successful   (false);
    auto                     instance_ptr      (m_create_info_ptr->get_instance_ptr() );
    uint32_t                 n_physical_devices(0);
    VkResult                 result            (VK_SUCCESS);
    const WindowPlatform     window_platform   (m_create_info_ptr->get_window_ptr()->get_platform());

    const bool               is_dummy_window_platform(window_platform == WINDOW_PLATFORM_DUMMY                     ||
                                                      window_platform == WINDOW_PLATFORM_DUMMY_WITH_PNG_SNAPSHOTS);


    switch (device_type)
    {
        case Anvil::DeviceType::MULTI_GPU:
        {
            const Anvil::MGPUDevice* mgpu_device_ptr(dynamic_cast<const Anvil::MGPUDevice*>(m_device_ptr) );

            n_physical_devices = mgpu_device_ptr->get_n_physical_devices();

            break;
        }

        case Anvil::DeviceType::SINGLE_GPU:
        {
            n_physical_devices = 1;

            break;
        }

        default:
        {
            anvil_assert_fail();

            goto end;
        }
    }


    if (!is_dummy_window_platform)
    {
        auto window_ptr = m_create_info_ptr->get_window_ptr();

        #if defined(ANVIL_INCLUDE_WIN3264_WINDOW_SYSTEM_SUPPORT) && defined(_WIN32)
        {
            VkWin32SurfaceCreateInfoKHR surface_create_info;

            surface_create_info.flags     = 0;
            surface_create_info.hinstance = GetModuleHandle(nullptr);
            surface_create_info.hwnd      = window_ptr->get_handle();
            surface_create_info.pNext     = nullptr;
            surface_create_info.sType     = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR;

            result = instance_ptr->get_extension_khr_win32_surface_entrypoints().vkCreateWin32SurfaceKHR(instance_ptr->get_instance_vk(),
                                                                                                        &surface_create_info,
                                                                                                         nullptr, /* pAllocator */
                                                                                                        &m_surface);
        }
        #endif
        #if defined(ANVIL_INCLUDE_XCB_WINDOW_SYSTEM_SUPPORT) && !defined(_WIN32)
        {
            VkXcbSurfaceCreateInfoKHR surface_create_info;

            surface_create_info.flags       = 0;
            surface_create_info.window      = window_ptr->get_handle();
            surface_create_info.connection  = static_cast<xcb_connection_t*>(window_ptr->get_connection());
            surface_create_info.pNext       = nullptr;
            surface_create_info.sType       = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR;

            result = instance_ptr->get_extension_khr_xcb_surface_entrypoints().vkCreateXcbSurfaceKHR(instance_ptr->get_instance_vk(),
                                                                                                    &surface_create_info,
                                                                                                     nullptr, /* pAllocator */
                                                                                                    &m_surface);
            }
        #endif

        anvil_assert_vk_call_succeeded(result);
        if (is_vk_call_successful(result) )
        {
            set_vk_handle(m_surface);
        }
    }
    else
    {
        anvil_assert(window_platform != WINDOW_PLATFORM_UNKNOWN);
    }

    if (is_dummy_window_platform == false)
    {
        /* Is there at least one queue fam that can be used together with at least one physical device associated with
         * the logical device to present using the surface we've just spawned and the physical device user has specified? */
        const auto& queue_families(m_device_ptr->get_physical_device_queue_families() );

        for (uint32_t n_physical_device = 0;
                      n_physical_device < n_physical_devices;
                    ++n_physical_device)
        {
            Anvil::RenderingSurface::PhysicalDeviceCapabilities* physical_device_caps_ptr = nullptr;
            const Anvil::PhysicalDevice*                         physical_device_ptr      = nullptr;

            switch (device_type)
            {
                case Anvil::DeviceType::MULTI_GPU:
                {
                    const Anvil::MGPUDevice* mgpu_device_ptr(dynamic_cast<const Anvil::MGPUDevice*>(m_device_ptr) );

                    physical_device_ptr      = mgpu_device_ptr->get_physical_device(n_physical_device);
                    physical_device_caps_ptr = &m_physical_device_capabilities[physical_device_ptr->get_device_group_device_index()];

                    break;
                }

                case Anvil::DeviceType::SINGLE_GPU:
                {
                    const Anvil::SGPUDevice* sgpu_device_ptr(dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr) );

                    physical_device_ptr      = sgpu_device_ptr->get_physical_device();
                    physical_device_caps_ptr = &m_physical_device_capabilities[physical_device_ptr->get_device_group_device_index()];

                    break;
                }

                default:
                {
                    anvil_assert_fail();

                    goto end;
                }
            }

            for (uint32_t n_queue_family = 0;
                          n_queue_family < static_cast<uint32_t>(queue_families.size() );
                        ++n_queue_family)
            {
                VkBool32 is_presentation_supported = VK_FALSE;

                {
                    const auto& khr_surface_entrypoints = instance_ptr->get_extension_khr_surface_entrypoints();

                    result = khr_surface_entrypoints.vkGetPhysicalDeviceSurfaceSupportKHR(physical_device_ptr->get_physical_device(),
                                                                                          n_queue_family,
                                                                                          m_surface,
                                                                                         &is_presentation_supported);
                }

                if (is_vk_call_successful(result)         &&
                    is_presentation_supported == VK_TRUE)
                {
                    physical_device_caps_ptr->present_capable_queue_fams.push_back(n_queue_family);
                }
            }
        }
    }
    else
    {
        /* offscreen rendering. Any physical device that offers universal queue can be used to "present" */
        for (uint32_t n_physical_device = 0;
                      n_physical_device < n_physical_devices;
                    ++n_physical_device)
        {
            switch (device_type)
            {
                case Anvil::DeviceType::MULTI_GPU:
                {
                    const Anvil::MGPUDevice* mgpu_device_ptr(dynamic_cast<const Anvil::MGPUDevice*>(m_device_ptr) );

                    if (mgpu_device_ptr->get_n_universal_queues() > 0)
                    {
                        const Anvil::PhysicalDevice* physical_device_ptr = mgpu_device_ptr->get_physical_device(n_physical_device);
                        auto&                        result_caps         = m_physical_device_capabilities[physical_device_ptr->get_device_group_device_index()];

                        result_caps.present_capable_queue_fams.push_back(mgpu_device_ptr->get_universal_queue(0)->get_queue_family_index() );
                    }

                    break;
                }

                case Anvil::DeviceType::SINGLE_GPU:
                {
                    const Anvil::SGPUDevice* sgpu_device_ptr(dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr) );

                    if (sgpu_device_ptr->get_n_universal_queues() > 0)
                    {
                        const Anvil::PhysicalDevice* physical_device_ptr = sgpu_device_ptr->get_physical_device();
                        auto&                        result_caps         = m_physical_device_capabilities[physical_device_ptr->get_device_group_device_index()];

                        result_caps.present_capable_queue_fams.push_back(sgpu_device_ptr->get_universal_queue(0)->get_queue_family_index() );
                    }

                    break;
                }

                default:
                {
                    anvil_assert_fail();

                    goto end;
                }
            }
        }

        result = VK_SUCCESS;
    }

    if (!is_vk_call_successful(result) )
    {
        anvil_assert_vk_call_succeeded(result);

        init_successful = false;
    }
    else
    {
        /* Retrieve Vulkan object capabilities and cache them */
        cache_surface_properties();

        init_successful = true;
    }

end:
    return init_successful;
}
/* Please see header for specification */
bool Anvil::BasePipelineManager::get_shader_statistics(PipelineID                  in_pipeline_id,
                                                       Anvil::ShaderStage          in_shader_stage,
                                                       VkShaderStatisticsInfoAMD*  out_shader_statistics_ptr)
{
    Anvil::ExtensionAMDShaderInfoEntrypoints entrypoints            = m_device_ptr->get_extension_amd_shader_info_entrypoints();
    std::unique_lock<std::recursive_mutex>   mutex_lock;
    auto                                     mutex_ptr              = get_mutex();
    Pipelines::const_iterator                pipeline_iterator;
    Pipeline*                                pipeline_ptr           = nullptr;
    bool                                     result                 = false;
    const auto                               shader_stage_vk        = Anvil::Utils::get_shader_stage_flag_bits_from_shader_stage(in_shader_stage);
    size_t                                   shader_statistics_size = sizeof(VkShaderStatisticsInfoAMD);
    VkResult                                 vk_result;

    if (entrypoints.vkGetShaderInfoAMD == nullptr)
    {
        anvil_assert(!(entrypoints.vkGetShaderInfoAMD == nullptr));

        goto end;
    }

    if (mutex_ptr != nullptr)
    {
        mutex_lock = std::move(
            std::unique_lock<std::recursive_mutex>(*mutex_ptr)
        );
    }

    if (m_outstanding_pipelines.size() > 0)
    {
        bake();
    }

    pipeline_iterator = m_baked_pipelines.find(in_pipeline_id);
    if (pipeline_iterator == m_baked_pipelines.end())
    {
        anvil_assert(!(pipeline_iterator == m_baked_pipelines.end()));

        goto end;
    }

    pipeline_ptr = pipeline_iterator->second.get();

    if (pipeline_ptr->baked_pipeline == VK_NULL_HANDLE)
    {
        bake();

        anvil_assert(!pipeline_ptr->baked_pipeline != VK_NULL_HANDLE);
    }

    vk_result = entrypoints.vkGetShaderInfoAMD(m_device_ptr->get_device_vk(),
                                               pipeline_ptr->baked_pipeline,
                                               static_cast<VkShaderStageFlagBits>(shader_stage_vk),
                                               VK_SHADER_INFO_TYPE_STATISTICS_AMD,
                                              &shader_statistics_size,
                                               out_shader_statistics_ptr);

    if (vk_result == VK_ERROR_FEATURE_NOT_PRESENT)
    {
        goto end;
    }

    if (!is_vk_call_successful(vk_result)                                      ||
         shader_statistics_size           != sizeof(VkShaderStatisticsInfoAMD))
    {
        goto end;
    }

    result = true;

end:
    return result;
}
/* Please see header for specification */
bool Anvil::BasePipelineManager::get_shader_info(PipelineID                  in_pipeline_id,
                                                 Anvil::ShaderStage          in_shader_stage,
                                                 Anvil::ShaderInfoType       in_info_type,
                                                 std::vector<unsigned char>* out_data_ptr)
{
    Anvil::ExtensionAMDShaderInfoEntrypoints entrypoints       = m_device_ptr->get_extension_amd_shader_info_entrypoints();
    std::unique_lock<std::recursive_mutex>   mutex_lock;
    auto                                     mutex_ptr         = get_mutex();
    Pipelines::const_iterator                pipeline_iterator;
    Pipeline*                                pipeline_ptr      = nullptr;
    size_t                                   out_data_size     = out_data_ptr->size();
    bool                                     result            = false;
    const auto                               shader_stage_vk   = Anvil::Utils::get_shader_stage_flag_bits_from_shader_stage(in_shader_stage);
    VkShaderInfoTypeAMD                      vk_info_type;
    VkResult                                 vk_result;

    if (entrypoints.vkGetShaderInfoAMD == nullptr)
    {
        anvil_assert(!(entrypoints.vkGetShaderInfoAMD == nullptr));

        goto end;
    }

    if (mutex_ptr != nullptr)
    {
        mutex_lock = std::move(
            std::unique_lock<std::recursive_mutex>(*mutex_ptr)
        );
    }

    if (m_outstanding_pipelines.size() > 0)
    {
        bake();
    }

    pipeline_iterator = m_baked_pipelines.find(in_pipeline_id);
    if (pipeline_iterator == m_baked_pipelines.end())
    {
        anvil_assert(!(pipeline_iterator == m_baked_pipelines.end()));

        goto end;
    }

    pipeline_ptr = pipeline_iterator->second.get();

    if (pipeline_ptr->baked_pipeline == VK_NULL_HANDLE)
    {
        bake();

        anvil_assert(!pipeline_ptr->baked_pipeline != VK_NULL_HANDLE);
    }

    switch (in_info_type)
    {
        case ShaderInfoType::BINARY:
        {
            vk_info_type = VK_SHADER_INFO_TYPE_BINARY_AMD;

            break;
        }

        case ShaderInfoType::DISASSEMBLY:
        {
            vk_info_type = VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD;

            break;
        }

        default:
        {
            anvil_assert(!"Unknown shader info type");

            goto end;
        }
    }

    if (out_data_size == 0)
    {
        vk_result = entrypoints.vkGetShaderInfoAMD(m_device_ptr->get_device_vk(),
                                                   pipeline_ptr->baked_pipeline,
                                                   static_cast<VkShaderStageFlagBits>(shader_stage_vk),
                                                   vk_info_type,
                                                  &out_data_size,
                                                   nullptr);

        if (vk_result == VK_ERROR_FEATURE_NOT_PRESENT)
        {
            goto end;
        }

        if (!is_vk_call_successful(vk_result)       ||
             out_data_size                    == 0)
        {
            goto end;
        }

        out_data_ptr->resize(out_data_size);
    }

    vk_result = entrypoints.vkGetShaderInfoAMD(m_device_ptr->get_device_vk(),
                                               pipeline_ptr->baked_pipeline,
                                               static_cast<VkShaderStageFlagBits>(shader_stage_vk),
                                               vk_info_type,
                                              &out_data_size,
                                              &(*out_data_ptr).at(0));

    if (vk_result == VK_ERROR_FEATURE_NOT_PRESENT)
    {
        goto end;
    }

    if (!is_vk_call_successful(vk_result)      ||
         out_data_size                    == 0)
    {
        goto end;
    }

    result = true;
end:
    return result;
}
Esempio n. 18
0
/** Performs a number of image view type-specific sanity checks and creates the requested
 *  Vulkan image view instance.
 *
 *  For argument discussion, please see documentation for the constructors above.
 *
 *  @return true if the function executed successfully, false otherwise.
 **/
bool Anvil::ImageView::init()
{
    const auto                                  aspect_mask               = m_create_info_ptr->get_aspect           ();
    const auto                                  format                    = m_create_info_ptr->get_format           ();
    const auto                                  image_view_type           = m_create_info_ptr->get_type             ();
    const auto                                  n_base_layer              = m_create_info_ptr->get_base_layer       ();
    const auto                                  n_base_mip                = m_create_info_ptr->get_base_mipmap_level();
    const auto                                  n_layers                  = m_create_info_ptr->get_n_layers         ();
    const auto                                  n_mips                    = m_create_info_ptr->get_n_mipmaps        ();
    VkFormat                                    parent_image_format       = VK_FORMAT_UNDEFINED;
    uint32_t                                    parent_image_n_layers     = 0;
    uint32_t                                    parent_image_n_mipmaps    = 0;
    auto                                        parent_image_ptr          = m_create_info_ptr->get_parent_image();
    bool                                        result                    = false;
    VkResult                                    result_vk;
    Anvil::StructChainer<VkImageViewCreateInfo> struct_chainer;
    const auto&                                 swizzle_array             = m_create_info_ptr->get_swizzle_array();

    parent_image_format    = parent_image_ptr->get_create_info_ptr()->get_format();
    parent_image_n_mipmaps = parent_image_ptr->get_n_mipmaps                    ();

    if (parent_image_ptr->get_create_info_ptr()->get_type_vk() != VK_IMAGE_TYPE_3D)
    {
        parent_image_n_layers = parent_image_ptr->get_create_info_ptr()->get_n_layers();
    }
    else
    {
        parent_image_ptr->get_image_mipmap_size(0,       /* in_n_mipmap        */
                                                nullptr, /* out_opt_width_ptr  */
                                                nullptr, /* out_opt_height_ptr */
                                               &parent_image_n_layers);
    }

    if (!(parent_image_n_layers >= n_base_layer + n_layers))
    {
        anvil_assert(parent_image_n_layers >= n_base_layer + n_layers);

        goto end;
    }

    if (!(parent_image_n_mipmaps >= n_base_mip + n_mips))
    {
        anvil_assert(parent_image_n_mipmaps >= n_base_mip + n_mips);

        goto end;
    }

    if (((parent_image_ptr->get_create_info_ptr()->get_create_flags() & Anvil::IMAGE_CREATE_FLAG_MUTABLE_FORMAT_BIT) == 0)      &&
         (parent_image_format                                                                                        != format))
    {
        anvil_assert(parent_image_format == format);

        goto end;
    }

    if (parent_image_ptr->get_create_info_ptr()->get_type_vk() == VK_IMAGE_TYPE_3D)
    {
        if (image_view_type == VK_IMAGE_VIEW_TYPE_2D       ||
            image_view_type == VK_IMAGE_VIEW_TYPE_2D_ARRAY)
        {
            if (!m_device_ptr->get_extension_info()->khr_maintenance1() )
            {
                anvil_assert(m_device_ptr->get_extension_info()->khr_maintenance1());

                goto end;
            }

            if ((parent_image_ptr->get_create_info_ptr()->get_create_flags() & Anvil::IMAGE_CREATE_FLAG_2D_ARRAY_COMPATIBLE_BIT) == 0)
            {
                anvil_assert((parent_image_ptr->get_create_info_ptr()->get_create_flags() & Anvil::IMAGE_CREATE_FLAG_2D_ARRAY_COMPATIBLE_BIT) != 0);

                goto end;
            }
        }
    }

    /* Create the image view instance */
    {
        VkImageViewCreateInfo image_view_create_info;

        image_view_create_info.components.a                    = swizzle_array[3];
        image_view_create_info.components.b                    = swizzle_array[2];
        image_view_create_info.components.g                    = swizzle_array[1];
        image_view_create_info.components.r                    = swizzle_array[0];
        image_view_create_info.flags                           = 0;
        image_view_create_info.format                          = format;
        image_view_create_info.image                           = parent_image_ptr->get_image();
        image_view_create_info.pNext                           = nullptr;
        image_view_create_info.sType                           = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
        image_view_create_info.subresourceRange.aspectMask     = aspect_mask;
        image_view_create_info.subresourceRange.baseArrayLayer = n_base_layer;
        image_view_create_info.subresourceRange.baseMipLevel   = n_base_mip;
        image_view_create_info.subresourceRange.layerCount     = n_layers;
        image_view_create_info.subresourceRange.levelCount     = n_mips;
        image_view_create_info.viewType                        = image_view_type;

        struct_chainer.append_struct(image_view_create_info);
    }

    {
        auto chain_ptr = struct_chainer.create_chain();

        result_vk = vkCreateImageView(m_device_ptr->get_device_vk(),
                                      chain_ptr->get_root_struct(),
                                      nullptr, /* pAllocator */
                                     &m_image_view);
    }

    if (!is_vk_call_successful(result_vk) )
    {
        anvil_assert_vk_call_succeeded(result_vk);

        goto end;
    }

    /* Cache the properties */
    set_vk_handle(m_image_view);

    /* All done */
    result = true;

end:
    return result;
}
Esempio n. 19
0
/* Please see header for specification */
bool Anvil::Semaphore::reset()
{
    VkResult                                           result           (VK_ERROR_INITIALIZATION_FAILED);
    Anvil::StructChainer<VkSemaphoreCreateInfo>        struct_chainer;
    Anvil::StructChainUniquePtr<VkSemaphoreCreateInfo> struct_chain_ptr;

    release_semaphore();

    /* Sanity checks */
    if (m_create_info_ptr->get_exportable_external_semaphore_handle_types() != Anvil::ExternalSemaphoreHandleTypeFlagBits::NONE)
    {
        if (!m_device_ptr->get_extension_info()->khr_external_semaphore() )
        {
            anvil_assert(m_device_ptr->get_extension_info()->khr_external_semaphore() );

            goto end;
        }
    }

    /* Spawn a new semaphore */
    {
        VkSemaphoreCreateInfo semaphore_create_info;

        semaphore_create_info.flags = 0;
        semaphore_create_info.pNext = nullptr;
        semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;

        struct_chainer.append_struct(semaphore_create_info);
    }

    if (m_create_info_ptr->get_exportable_external_semaphore_handle_types() != Anvil::ExternalSemaphoreHandleTypeFlagBits::NONE)
    {
        VkExportSemaphoreCreateInfo create_info;

        create_info.handleTypes = m_create_info_ptr->get_exportable_external_semaphore_handle_types().get_vk();
        create_info.pNext       = nullptr;
        create_info.sType       = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR;

        struct_chainer.append_struct(create_info);
    }

    #if defined(_WIN32)
    {
        const Anvil::ExternalNTHandleInfo* nt_handle_info_ptr = nullptr;

        if (m_create_info_ptr->get_exportable_nt_handle_info(&nt_handle_info_ptr) )
        {
            VkExportSemaphoreWin32HandleInfoKHR handle_info;

            anvil_assert( nt_handle_info_ptr                                                                                                                   != nullptr);
            anvil_assert(((m_create_info_ptr->get_exportable_external_semaphore_handle_types() & Anvil::ExternalSemaphoreHandleTypeFlagBits::OPAQUE_WIN32_BIT) != 0)       ||
                         ((m_create_info_ptr->get_exportable_external_semaphore_handle_types() & Anvil::ExternalSemaphoreHandleTypeFlagBits::D3D12_FENCE_BIT)  != 0));

            handle_info.dwAccess    = nt_handle_info_ptr->access;
            handle_info.name        = (nt_handle_info_ptr->name.size() > 0) ? &nt_handle_info_ptr->name.at(0)
                                                                            : nullptr;
            handle_info.pAttributes = nt_handle_info_ptr->attributes_ptr;
            handle_info.pNext       = nullptr;
            handle_info.sType       = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR;

            struct_chainer.append_struct(handle_info);
        }
    }
    #endif

    struct_chain_ptr = struct_chainer.create_chain();
    if (struct_chain_ptr == nullptr)
    {
        anvil_assert(struct_chain_ptr != nullptr);

        goto end;
    }

    result = Anvil::Vulkan::vkCreateSemaphore(m_device_ptr->get_device_vk(),
                                              struct_chain_ptr->get_root_struct(),
                                              nullptr, /* pAllocator */
                                             &m_semaphore);

    anvil_assert_vk_call_succeeded(result);
    if (is_vk_call_successful(result) )
    {
        set_vk_handle(m_semaphore);
    }

end:
    return is_vk_call_successful(result);
}
Esempio n. 20
0
/** Initializes the swapchain object. */
bool Anvil::Swapchain::init()
{
    uint32_t                                              n_swapchain_images             = 0;
    auto                                                  parent_surface_ptr             = m_create_info_ptr->get_rendering_surface();
    VkResult                                              result                         = VK_ERROR_INITIALIZATION_FAILED;
    Anvil::StructChainUniquePtr<VkSwapchainCreateInfoKHR> struct_chain_ptr;
    std::vector<VkImage>                                  swapchain_images;
    const VkSurfaceTransformFlagBitsKHR                   swapchain_transformation       = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
    const WindowPlatform                                  window_platform                = m_create_info_ptr->get_window()->get_platform();
    const bool                                            is_offscreen_rendering_enabled = (window_platform   == WINDOW_PLATFORM_DUMMY                     ||
                                                                                            window_platform   == WINDOW_PLATFORM_DUMMY_WITH_PNG_SNAPSHOTS);

    m_size.width  = parent_surface_ptr->get_width ();
    m_size.height = parent_surface_ptr->get_height();

    /* not doing offscreen rendering */
    if (!is_offscreen_rendering_enabled)
    {
        const auto&                                    khr_swapchain_entrypoints = m_device_ptr->get_extension_khr_swapchain_entrypoints();
        Anvil::StructChainer<VkSwapchainCreateInfoKHR> struct_chainer;

        #ifdef _DEBUG
        {
            const Anvil::SGPUDevice* sgpu_device_ptr(dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr) );

            const Anvil::DeviceType    device_type                     = m_device_ptr->get_type();
            uint32_t                   n_physical_devices              = 0;
            bool                       result_bool                     = false;
            const char*                required_surface_extension_name = nullptr;
            VkSurfaceCapabilitiesKHR   surface_caps;
            VkCompositeAlphaFlagsKHR   supported_composite_alpha_flags = static_cast<VkCompositeAlphaFlagsKHR>(0);
            VkSurfaceTransformFlagsKHR supported_surface_transform_flags;

            #ifdef _WIN32
                #if defined(ANVIL_INCLUDE_WIN3264_WINDOW_SYSTEM_SUPPORT)
                    required_surface_extension_name = VK_KHR_WIN32_SURFACE_EXTENSION_NAME;
                #endif
            #else
                #if defined(ANVIL_INCLUDE_XCB_WINDOW_SYSTEM_SUPPORT)
                    required_surface_extension_name = VK_KHR_XCB_SURFACE_EXTENSION_NAME;
                #endif
            #endif

            anvil_assert(required_surface_extension_name == nullptr                                                            ||
                         m_device_ptr->get_parent_instance()->is_instance_extension_supported(required_surface_extension_name) );

            switch (device_type)
            {
                case Anvil::DEVICE_TYPE_SINGLE_GPU: n_physical_devices = 1; break;

                default:
                {
                    anvil_assert_fail();
                }
            }

            for (uint32_t n_physical_device = 0;
                          n_physical_device < n_physical_devices;
                        ++n_physical_device)
            {
                const Anvil::PhysicalDevice* current_physical_device_ptr = nullptr;

                switch (device_type)
                {
                    case Anvil::DEVICE_TYPE_SINGLE_GPU: current_physical_device_ptr = sgpu_device_ptr->get_physical_device(); break;

                    default:
                    {
                        anvil_assert_fail();
                    }
                }

                /* Ensure opaque composite alpha mode is supported */
                anvil_assert(parent_surface_ptr->get_supported_composite_alpha_flags(&supported_composite_alpha_flags) );

                anvil_assert(supported_composite_alpha_flags & VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR);

                /* Ensure we can use the swapchain image format  */
                anvil_assert(parent_surface_ptr->is_compatible_with_image_format(m_create_info_ptr->get_format(),
                                                                                &result_bool) );
                anvil_assert(result_bool);

                /* Ensure the transformation we're about to request is supported by the rendering surface */
                anvil_assert(parent_surface_ptr->get_supported_transformations(&supported_surface_transform_flags) );

                anvil_assert(supported_surface_transform_flags & swapchain_transformation);

                /* Ensure the requested number of swapchain images is reasonable*/
                anvil_assert(parent_surface_ptr->get_capabilities(&surface_caps) );

                anvil_assert(surface_caps.maxImageCount == 0                                 ||
                             surface_caps.maxImageCount >= m_create_info_ptr->get_n_images() );
            }
        }
        #endif

        {
            VkSwapchainCreateInfoKHR create_info;

            create_info.clipped               = true; /* we won't be reading from the presentable images */
            create_info.compositeAlpha        = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
            create_info.flags                 = m_create_info_ptr->get_flags();
            create_info.imageArrayLayers      = 1;
            create_info.imageColorSpace       = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
            create_info.imageExtent.height    = parent_surface_ptr->get_height();
            create_info.imageExtent.width     = parent_surface_ptr->get_width ();
            create_info.imageFormat           = m_create_info_ptr->get_format ();
            create_info.imageSharingMode      = VK_SHARING_MODE_EXCLUSIVE;
            create_info.imageUsage            = m_create_info_ptr->get_usage_flags();
            create_info.minImageCount         = m_create_info_ptr->get_n_images   ();
            create_info.oldSwapchain          = VK_NULL_HANDLE;
            create_info.pNext                 = nullptr;
            create_info.pQueueFamilyIndices   = nullptr;
            create_info.presentMode           = m_create_info_ptr->get_present_mode();
            create_info.preTransform          = swapchain_transformation;
            create_info.queueFamilyIndexCount = 0;
            create_info.sType                 = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
            create_info.surface               = parent_surface_ptr->get_surface();

            struct_chainer.append_struct(create_info);
        }

        struct_chain_ptr = struct_chainer.create_chain();

        parent_surface_ptr->lock();
        {
            result = khr_swapchain_entrypoints.vkCreateSwapchainKHR(m_device_ptr->get_device_vk(),
                                                                    struct_chain_ptr->get_root_struct(),
                                                                    nullptr, /* pAllocator */
                                                                   &m_swapchain);
        }
        parent_surface_ptr->unlock();

        anvil_assert_vk_call_succeeded(result);
        if (is_vk_call_successful(result) )
        {
            set_vk_handle(m_swapchain);
        }

        /* Retrieve swap-chain images */
        result = khr_swapchain_entrypoints.vkGetSwapchainImagesKHR(m_device_ptr->get_device_vk(),
                                                                   m_swapchain,
                                                                  &n_swapchain_images,
                                                                   nullptr); /* pSwapchainImages */

        anvil_assert_vk_call_succeeded(result);
        anvil_assert                  (n_swapchain_images >  0);

        swapchain_images.resize(n_swapchain_images);

        result = khr_swapchain_entrypoints.vkGetSwapchainImagesKHR(m_device_ptr->get_device_vk(),
                                                                   m_swapchain,
                                                                  &n_swapchain_images,
                                                                  &swapchain_images[0]);

        anvil_assert_vk_call_succeeded(result);
    }
    else /* offscreen rendering */
    {
        m_create_info_ptr->set_usage_flags(m_create_info_ptr->get_usage_flags() | VK_IMAGE_USAGE_TRANSFER_SRC_BIT);

        n_swapchain_images = m_create_info_ptr->get_n_images();
    }

    for (uint32_t n_result_image = 0;
                  n_result_image < n_swapchain_images;
                ++n_result_image)
    {
        /* Spawn an Image wrapper class for the swap-chain image. */
        if (!is_offscreen_rendering_enabled)
        {
            auto create_info_ptr = Anvil::ImageCreateInfo::create_swapchain_wrapper(m_device_ptr,
                                                                                    this,
                                                                                    swapchain_images[n_result_image],
                                                                                    n_result_image);

            create_info_ptr->set_mt_safety(Anvil::Utils::convert_boolean_to_mt_safety_enum(is_mt_safe() ) );

            m_image_ptrs[n_result_image] = Anvil::Image::create(std::move(create_info_ptr) );
        }
        else
        {
            auto create_info_ptr = Anvil::ImageCreateInfo::create_nonsparse_alloc(m_device_ptr,
                                                                                  VK_IMAGE_TYPE_2D,
                                                                                  m_create_info_ptr->get_format(),
                                                                                  VK_IMAGE_TILING_OPTIMAL,
                                                                                  m_create_info_ptr->get_usage_flags(),
                                                                                  m_size.width,
                                                                                  m_size.height,
                                                                                  1, /* base_mipmap_depth */
                                                                                  1,
                                                                                  VK_SAMPLE_COUNT_1_BIT,
                                                                                  QUEUE_FAMILY_GRAPHICS_BIT,
                                                                                  VK_SHARING_MODE_EXCLUSIVE,
                                                                                  false, /* in_use_full_mipmap_chain */
                                                                                  0,     /* in_memory_features       */
                                                                                  0,     /* in_create_flags          */
                                                                                  VK_IMAGE_LAYOUT_GENERAL,
                                                                                  nullptr);

            create_info_ptr->set_mt_safety(Anvil::Utils::convert_boolean_to_mt_safety_enum(is_mt_safe() ) );

            m_image_ptrs[n_result_image] = Anvil::Image::create(std::move(create_info_ptr) );
        }

        /* For each swap-chain image, create a relevant view */
        {
            auto create_info_ptr = Anvil::ImageViewCreateInfo::create_2D(m_device_ptr,
                                                                         m_image_ptrs[n_result_image].get(),
                                                                         0, /* n_base_layer */
                                                                         0, /* n_base_mipmap_level */
                                                                         1, /* n_mipmaps           */
                                                                         VK_IMAGE_ASPECT_COLOR_BIT,
                                                                         m_create_info_ptr->get_format(),
                                                                         VK_COMPONENT_SWIZZLE_R,
                                                                         VK_COMPONENT_SWIZZLE_G,
                                                                         VK_COMPONENT_SWIZZLE_B,
                                                                         VK_COMPONENT_SWIZZLE_A);

            create_info_ptr->set_mt_safety(Anvil::Utils::convert_boolean_to_mt_safety_enum(is_mt_safe() ) );

            m_image_view_ptrs[n_result_image] = Anvil::ImageView::create(std::move(create_info_ptr) );
        }

        result = VK_SUCCESS;
    }

    /* Sign up for present submission notifications. This is needed to ensure that number of presented frames ==
     * number of acquired frames at destruction time.
     */
    {
        std::vector<Anvil::Queue*> queues;

        switch (m_device_ptr->get_type() )
        {
            case Anvil::DEVICE_TYPE_SINGLE_GPU:
            {
                const std::vector<uint32_t>* queue_fams_with_present_support_ptr(nullptr);
                const auto                   rendering_surface_ptr              (m_create_info_ptr->get_rendering_surface() );
                const Anvil::SGPUDevice*     sgpu_device_ptr                    (dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr) );

                if (!rendering_surface_ptr->get_queue_families_with_present_support(&queue_fams_with_present_support_ptr) )
                {
                    break;
                }

                if (queue_fams_with_present_support_ptr == nullptr)
                {
                    anvil_assert(queue_fams_with_present_support_ptr != nullptr);
                }
                else
                {
                    for (const auto queue_fam : *queue_fams_with_present_support_ptr)
                    {
                        const uint32_t n_queues = sgpu_device_ptr->get_n_queues(queue_fam);

                        for (uint32_t n_queue = 0;
                                      n_queue < n_queues;
                                    ++n_queue)
                        {
                            auto queue_ptr = sgpu_device_ptr->get_queue_for_queue_family_index(queue_fam,
                                                                                               n_queue);

                            anvil_assert(queue_ptr != nullptr);

                            if (std::find(queues.begin(),
                                          queues.end(),
                                          queue_ptr) == queues.end() )
                            {
                                queues.push_back(queue_ptr);
                            }
                        }
                    }
                }

                break;
            }
        }

        for (auto queue_ptr : queues)
        {
            queue_ptr->register_for_callbacks(
                QUEUE_CALLBACK_ID_PRESENT_REQUEST_ISSUED,
                std::bind(&Swapchain::on_present_request_issued,
                          this,
                          std::placeholders::_1),
                this
            );

            m_observed_queues.push_back(queue_ptr);
        }
    }

    /* Sign up for "about to close the parent window" notifications. Swapchain instance SHOULD be deinitialized
     * before the window is destroyed, so we're going to act as nice citizens.
     */
    m_create_info_ptr->get_window()->register_for_callbacks(
        WINDOW_CALLBACK_ID_ABOUT_TO_CLOSE,
        std::bind(&Swapchain::on_parent_window_about_to_close,
                  this),
        this
    );

    return is_vk_call_successful(result);
}
/** Creates a new VMA allocator instance.
 *
 *  @return true if successful, false otherwise.
 **/
bool Anvil::MemoryAllocatorBackends::VMA::VMAAllocator::init()
{
    VmaAllocatorCreateInfo create_info                        = {};
    const bool             khr_dedicated_allocation_supported = m_device_ptr->get_extension_info()->khr_dedicated_allocation();
    VkResult               result                             = VK_ERROR_DEVICE_LOST;

    /* Prepare VK func ptr array */
    m_vma_func_ptrs.reset(
        new VmaVulkanFunctions()
    );

    if (m_vma_func_ptrs == nullptr)
    {
        anvil_assert(m_vma_func_ptrs != nullptr);

        goto end;
    }

    m_vma_func_ptrs->vkAllocateMemory                    = Vulkan::vkAllocateMemory;
    m_vma_func_ptrs->vkBindBufferMemory                  = Vulkan::vkBindBufferMemory;
    m_vma_func_ptrs->vkBindImageMemory                   = Vulkan::vkBindImageMemory;
    m_vma_func_ptrs->vkCreateBuffer                      = Vulkan::vkCreateBuffer;
    m_vma_func_ptrs->vkCreateImage                       = Vulkan::vkCreateImage;
    m_vma_func_ptrs->vkDestroyBuffer                     = Vulkan::vkDestroyBuffer;
    m_vma_func_ptrs->vkDestroyImage                      = Vulkan::vkDestroyImage;
    m_vma_func_ptrs->vkFreeMemory                        = Vulkan::vkFreeMemory;
    m_vma_func_ptrs->vkGetBufferMemoryRequirements       = Vulkan::vkGetBufferMemoryRequirements;
    m_vma_func_ptrs->vkGetImageMemoryRequirements        = Vulkan::vkGetImageMemoryRequirements;
    m_vma_func_ptrs->vkGetPhysicalDeviceMemoryProperties = Vulkan::vkGetPhysicalDeviceMemoryProperties;
    m_vma_func_ptrs->vkGetPhysicalDeviceProperties       = Vulkan::vkGetPhysicalDeviceProperties;
    m_vma_func_ptrs->vkMapMemory                         = Vulkan::vkMapMemory;
    m_vma_func_ptrs->vkUnmapMemory                       = Vulkan::vkUnmapMemory;

    if (m_device_ptr->get_extension_info()->khr_get_memory_requirements2() )
    {
        m_vma_func_ptrs->vkGetBufferMemoryRequirements2KHR = m_device_ptr->get_extension_khr_get_memory_requirements2_entrypoints().vkGetBufferMemoryRequirements2KHR;
        m_vma_func_ptrs->vkGetImageMemoryRequirements2KHR  = m_device_ptr->get_extension_khr_get_memory_requirements2_entrypoints().vkGetImageMemoryRequirements2KHR;
    }
    else
    {
        m_vma_func_ptrs->vkGetBufferMemoryRequirements2KHR = nullptr;
        m_vma_func_ptrs->vkGetImageMemoryRequirements2KHR  = nullptr;
    }

    /* Prepare VMA create info struct */
    switch (m_device_ptr->get_type() )
    {
        case Anvil::DeviceType::MULTI_GPU:
        {
            /* VMA library takes a physical device handle to extract info regarding supported
             * memory types and the like. As VK_KHR_device_group provide explicit mGPU support,
             * it is guaranteed all physical devices within a logical device offer exactly the
             * same capabilities. This means we're safe to pass zeroth physical device to the
             * library, and everything will still be OK.
             */
            const Anvil::MGPUDevice* mgpu_device_ptr(dynamic_cast<const Anvil::MGPUDevice*>(m_device_ptr) );

            create_info.physicalDevice = mgpu_device_ptr->get_physical_device(0)->get_physical_device();
            break;
        }

        case Anvil::DeviceType::SINGLE_GPU:
        {
            const Anvil::SGPUDevice* sgpu_device_ptr(dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr) );

            create_info.physicalDevice = sgpu_device_ptr->get_physical_device()->get_physical_device();
            break;
        }

        default:
        {
            anvil_assert_fail();
        }
    }

    create_info.flags                       = (khr_dedicated_allocation_supported) ? VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT : 0;
    create_info.device                      = m_device_ptr->get_device_vk();
    create_info.pAllocationCallbacks        = nullptr;
    create_info.preferredLargeHeapBlockSize = 0;
    create_info.pVulkanFunctions            = m_vma_func_ptrs.get();

    result = vmaCreateAllocator(&create_info,
                                &m_allocator);

    anvil_assert_vk_call_succeeded(result);
end:
    return is_vk_call_successful(result);
}
/** For each specified Memory Allocator's Item, the function asks VMA for a memory region that
 *  can be assigned to corresponding wrapper instance. For each successfully handled request,
 *  a MemoryBlock instance is created, using the feedback provided by the library.
 *
 *  This function can be called multiple times.
 *
 *  @return true if all allocations have been handled successfully, false if there was at least
 *               one failure.
 **/
bool Anvil::MemoryAllocatorBackends::VMA::bake(Anvil::MemoryAllocator::Items& in_items)
{
    bool     result    = true;
    VkResult result_vk = VK_ERROR_DEVICE_LOST;

    /* Go through all scheduled items and call the underlying library API to handle the request.
     *
     * For each successful allocation, wrap it with a MemoryBlock wrapper with a custom delete
     * handler, so that VMA is notified whenever a memory block it has provided memory backing for
     * has gone out of scope.
     */
    for (auto& current_item_ptr : in_items)
    {
        anvil_assert(current_item_ptr->memory_priority == FLT_MAX); /* VMA doesn't support memory_priority */

        MemoryBlockUniquePtr new_memory_block_ptr(nullptr,
                                                  std::default_delete<Anvil::MemoryBlock>() );

        VmaAllocation                               allocation                  = VK_NULL_HANDLE;
        VmaAllocationCreateInfo                     allocation_create_info      = {};
        VmaAllocationInfo                           allocation_info             = {};
        bool                                        is_dedicated_alloc          = false;
        VkMemoryRequirements                        memory_requirements_vk;
        Anvil::OnMemoryBlockReleaseCallbackFunction release_callback_function;
        Anvil::MemoryHeapFlags                      required_mem_heap_flags;
        Anvil::MemoryPropertyFlags                  required_mem_property_flags;

        Anvil::Utils::get_vk_property_flags_from_memory_feature_flags(current_item_ptr->alloc_memory_required_features,
                                                                     &required_mem_property_flags,
                                                                     &required_mem_heap_flags);

        /* NOTE: VMA does not take required memory heap flags at the moment. Adding this is on their radar. */
        anvil_assert(required_mem_heap_flags == Anvil::MemoryHeapFlagBits::NONE);

        memory_requirements_vk.alignment      = current_item_ptr->alloc_memory_required_alignment;
        memory_requirements_vk.memoryTypeBits = current_item_ptr->alloc_memory_supported_memory_types;
        memory_requirements_vk.size           = current_item_ptr->alloc_size;

        allocation_create_info.flags         = (current_item_ptr->alloc_is_dedicated_memory) ? VmaAllocationCreateFlagBits::VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
                                                                                             : 0;
        allocation_create_info.requiredFlags = required_mem_property_flags.get_vk();

        result_vk = vmaAllocateMemory(m_vma_allocator_ptr->get_handle(),
                                     &memory_requirements_vk,
                                     &allocation_create_info,
                                     &allocation,
                                     &allocation_info);

        if (!is_vk_call_successful(result_vk) )
        {
            result = false;

            continue;
        }
        else
        {
            is_dedicated_alloc = (allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
        }

        /* Bake the block and stash it */
        release_callback_function = std::bind(
            &VMAAllocator::on_vma_alloced_mem_block_gone_out_of_scope,
            m_vma_allocator_ptr,
            std::placeholders::_1,
            allocation
        );

        {
            auto create_info_ptr = Anvil::MemoryBlockCreateInfo::create_derived_with_custom_delete_proc(m_device_ptr,
                                                                                                        allocation_info.deviceMemory,
                                                                                                        memory_requirements_vk.memoryTypeBits,
                                                                                                        current_item_ptr->alloc_memory_required_features,
                                                                                                        allocation_info.memoryType,
                                                                                                        memory_requirements_vk.size,
                                                                                                        allocation_info.offset,
                                                                                                        release_callback_function);

            if (is_dedicated_alloc)
            {
                if (current_item_ptr->type == Anvil::MemoryAllocator::ITEM_TYPE_BUFFER               ||
                    current_item_ptr->type == Anvil::MemoryAllocator::ITEM_TYPE_SPARSE_BUFFER_REGION)
                {
                    anvil_assert(current_item_ptr->buffer_ptr != nullptr);

                    create_info_ptr->use_dedicated_allocation(current_item_ptr->buffer_ptr,
                                                              nullptr); /* in_opt_image_ptr */
                }
                else
                if (current_item_ptr->type == Anvil::MemoryAllocator::ITEM_TYPE_IMAGE_WHOLE              ||
                    current_item_ptr->type == Anvil::MemoryAllocator::ITEM_TYPE_SPARSE_IMAGE_MIPTAIL     ||
                    current_item_ptr->type == Anvil::MemoryAllocator::ITEM_TYPE_SPARSE_IMAGE_SUBRESOURCE)
                {
                    anvil_assert(current_item_ptr->image_ptr != nullptr);

                    create_info_ptr->use_dedicated_allocation(nullptr, /* in_opt_buffer_ptr */
                                                              current_item_ptr->image_ptr);
                }
            }

            new_memory_block_ptr = Anvil::MemoryBlock::create(std::move(create_info_ptr) );
        }

        if (new_memory_block_ptr == nullptr)
        {
            anvil_assert(new_memory_block_ptr != nullptr);

            result = false;
            continue;
        }

        dynamic_cast<IMemoryBlockBackendSupport*>(new_memory_block_ptr.get() )->set_parent_memory_allocator_backend_ptr(shared_from_this(),
                                                                                                                        allocation);

        current_item_ptr->alloc_memory_block_ptr = std::move(new_memory_block_ptr);
        current_item_ptr->alloc_size             = memory_requirements_vk.size;
        current_item_ptr->is_baked               = true;

        m_vma_allocator_ptr->on_new_vma_mem_block_alloced();
    }

    return result;
}