/** Please see header for specification */
bool Anvil::PipelineCache::merge(uint32_t                           in_n_pipeline_caches,
                                 const Anvil::PipelineCache* const* in_src_cache_ptrs)
{
    VkResult                     result_vk;
    std::vector<VkPipelineCache> src_pipeline_caches(in_n_pipeline_caches);

    anvil_assert(in_n_pipeline_caches < sizeof(src_pipeline_caches) / sizeof(src_pipeline_caches[0]) );


    for (uint32_t n_pipeline_cache = 0;
                  n_pipeline_cache < in_n_pipeline_caches;
                ++n_pipeline_cache)
    {
        src_pipeline_caches[n_pipeline_cache] = in_src_cache_ptrs[n_pipeline_cache]->get_pipeline_cache();
    }

    lock();
    {
        result_vk = Anvil::Vulkan::vkMergePipelineCaches(m_device_ptr->get_device_vk(),
                                                         m_pipeline_cache,
                                                         in_n_pipeline_caches,
                                                        &src_pipeline_caches.at(0) );
    }
    unlock();

    anvil_assert(result_vk);

    return is_vk_call_successful(result_vk);
}
Exemplo n.º 2
0
/* Please see header for specification */
Anvil::Buffer::Buffer(Anvil::Device*         device_ptr,
                      VkDeviceSize           size,
                      Anvil::QueueFamilyBits queue_families,
                      VkSharingMode          queue_sharing_mode,
                      VkBufferUsageFlags     usage_flags,
                      bool                   should_be_mappable,
                      bool                   should_be_coherent,
                      const void*            opt_client_data)
    :m_buffer           (VK_NULL_HANDLE),
     m_buffer_size      (size),
     m_device_ptr       (device_ptr),
     m_memory_block_ptr (nullptr),
     m_parent_buffer_ptr(0),
     m_start_offset     (0),
     m_usage_flags      (static_cast<VkBufferUsageFlagBits>(usage_flags) )
{
    /* Sanity checks */
    if (!should_be_mappable)
    {
        anvil_assert(!should_be_coherent);

        /* For host->gpu writes to work in this case, we will need the buffer to work as a target
         * for buffer->buffer copy operations.
         */
        m_usage_flags = static_cast<VkBufferUsageFlagBits>(m_usage_flags | VK_BUFFER_USAGE_TRANSFER_DST_BIT);
    }

    /* Create the buffer object */
    create_buffer(queue_families,
                  queue_sharing_mode,
                  size);

    /* Create a memory object and preallocate as much space as we need */
    {
        Anvil::MemoryBlock* memory_block_ptr = nullptr;

        memory_block_ptr = new Anvil::MemoryBlock(m_device_ptr,
                                                  m_buffer_memory_reqs.memoryTypeBits,
                                                  m_buffer_memory_reqs.size,
                                                  should_be_mappable,
                                                  should_be_coherent);

        anvil_assert(memory_block_ptr != nullptr);

        set_memory(memory_block_ptr);

        if (opt_client_data != nullptr)
        {
            write(0,
                  size,
                  opt_client_data);
        }

        memory_block_ptr->release();
    }

    /* Register the object */
    Anvil::ObjectTracker::get()->register_object(Anvil::ObjectTracker::OBJECT_TYPE_BUFFER,
                                                  this);
}
Exemplo n.º 3
0
 /** Helper function which verifies the maximum number of attachments specified at
  *  creation time is not exceeded.
  **/
 void do_sanity_checks()
 {
     anvil_assert(color_attachments_vk.size()         <= n_max_color_attachments);
     anvil_assert(input_attachments_vk.size()         <= n_max_input_attachments);
     anvil_assert(preserve_attachments_vk.size()      <= n_max_preserve_attachments);
     anvil_assert(resolve_color_attachments_vk.size() <= n_max_color_attachments);
 }
/** Please see header for specification */
bool Anvil::DescriptorSetLayout::init()
{
    bool     result    = false;
    VkResult result_vk;

    anvil_assert(m_layout == VK_NULL_HANDLE);

    /* Bake the Vulkan object */
    auto create_info_ptr = m_create_info_ptr->create_descriptor_set_layout_create_info(m_device_ptr);

    if (create_info_ptr == nullptr)
    {
        anvil_assert(create_info_ptr != nullptr);

        goto end;
    }

    result_vk = Anvil::Vulkan::vkCreateDescriptorSetLayout(m_device_ptr->get_device_vk(),
                                                           create_info_ptr->struct_chain_ptr->get_root_struct(),
                                                           nullptr, /* pAllocator */
                                                          &m_layout);

    anvil_assert_vk_call_succeeded(result_vk);
    if (is_vk_call_successful(result_vk) )
    {
        set_vk_handle(m_layout);
    }

    result = is_vk_call_successful(result_vk);

end:
    return result;
}
Anvil::SwapchainCreateInfo::SwapchainCreateInfo(Anvil::BaseDevice*                 in_device_ptr,
                                                Anvil::RenderingSurface*           in_parent_surface_ptr,
                                                Anvil::Window*                     in_window_ptr,
                                                Anvil::Format                      in_format,
                                                Anvil::ColorSpaceKHR               in_color_space,
                                                Anvil::PresentModeKHR              in_present_mode,
                                                Anvil::ImageUsageFlags             in_usage_flags,
                                                uint32_t                           in_n_images,
                                                MTSafety                           in_mt_safety,
                                                Anvil::SwapchainCreateFlags        in_flags,
                                                Anvil::DeviceGroupPresentModeFlags in_mgpu_present_mode_flags,
                                                const bool&                        in_clipped,
                                                const Anvil::Swapchain*            in_opt_old_swapchain_ptr)
    :m_clipped                (in_clipped),
     m_color_space            (in_color_space),
     m_device_ptr             (in_device_ptr),
     m_flags                  (in_flags),
     m_format                 (in_format),
     m_mgpu_present_mode_flags(in_mgpu_present_mode_flags),
     m_mt_safety              (in_mt_safety),
     m_n_images               (in_n_images),
     m_old_swapchain_ptr      (in_opt_old_swapchain_ptr),
     m_parent_surface_ptr     (in_parent_surface_ptr),
     m_present_mode           (in_present_mode),
     m_usage_flags            (in_usage_flags),
     m_window_ptr             (in_window_ptr)
{
    anvil_assert(in_n_images           >  0);
    anvil_assert(in_parent_surface_ptr != nullptr);
    anvil_assert(in_usage_flags        != 0);
}
Exemplo n.º 6
0
/* Please see header for specification */
Anvil::DescriptorSet* Anvil::DescriptorSetGroup::get_descriptor_set(uint32_t n_set)
{
    bool pool_rebaked = false;

    anvil_assert(m_descriptor_sets.find(n_set) != m_descriptor_sets.end() );

    if (m_descriptor_pool_dirty)
    {
        bake_descriptor_pool();

        anvil_assert(!m_descriptor_pool_dirty);

        pool_rebaked = true;
    }

    if (pool_rebaked                                           ||
        m_descriptor_sets[n_set].descriptor_set_ptr == nullptr)
    {
        bake_descriptor_sets();

        anvil_assert(m_descriptor_sets[n_set].descriptor_set_ptr != nullptr);
    }

    return m_descriptor_sets[n_set].descriptor_set_ptr;
}
Exemplo n.º 7
0
/* Please see header for specification */
Anvil::Buffer::Buffer(Anvil::Buffer* parent_buffer_ptr,
                      VkDeviceSize   start_offset,
                      VkDeviceSize   size)
    :m_buffer           (VK_NULL_HANDLE),
     m_buffer_size      (size),
     m_memory_block_ptr (nullptr),
     m_parent_buffer_ptr(parent_buffer_ptr),
     m_start_offset     (start_offset),
     m_usage_flags      (static_cast<VkBufferUsageFlagBits>(0) )
{
    /* Sanity checks */
    anvil_assert(parent_buffer_ptr != nullptr);
    anvil_assert(start_offset      >= 0);
    anvil_assert(size              >  0);

    m_memory_block_ptr = new Anvil::MemoryBlock(parent_buffer_ptr->m_memory_block_ptr,
                                                start_offset,
                                                size);

    m_buffer = parent_buffer_ptr->m_buffer;
    m_parent_buffer_ptr->retain();

    /* Register the object */
    Anvil::ObjectTracker::get()->register_object(Anvil::ObjectTracker::OBJECT_TYPE_BUFFER,
                                                 this);
}
Exemplo n.º 8
0
/* Please see header for specification */
bool Anvil::Buffer::set_memory(Anvil::MemoryBlock* memory_block_ptr)
{
    bool     result = false;
    VkResult result_vk;

    if (memory_block_ptr == nullptr)
    {
        anvil_assert(!(memory_block_ptr == nullptr) );

        goto end;
    }

    if (m_memory_block_ptr != nullptr)
    {
        anvil_assert( (memory_block_ptr == nullptr) );

        goto end;
    }

    /* Bind the memory object to the buffer object */
    m_memory_block_ptr = memory_block_ptr;
    m_memory_block_ptr->retain();

    result_vk = vkBindBufferMemory(m_device_ptr->get_device_vk(),
                                   m_buffer,
                                   m_memory_block_ptr->get_memory(),
                                   memory_block_ptr->get_start_offset() );
    anvil_assert_vk_call_succeeded(result_vk);

    result = is_vk_call_successful(result_vk);
end:
    return result;
}
Exemplo n.º 9
0
/** Re-creates internally-maintained descriptor pool. **/
void Anvil::DescriptorSetGroup::bake_descriptor_pool()
{
    const uint32_t n_descriptor_sets = (uint32_t) m_descriptor_sets.size();

    anvil_assert(m_descriptor_pool_dirty);
    anvil_assert(n_descriptor_sets != 0);

    /* Count how many descriptor of what types need to have pool space allocated */
    uint32_t n_descriptors_needed_array[VK_DESCRIPTOR_TYPE_RANGE_SIZE];

    memset(n_descriptors_needed_array,
           0,
           sizeof(n_descriptors_needed_array) );

    for (auto current_ds : m_descriptor_sets)
    {
        uint32_t n_ds_bindings;

        if (current_ds.second.layout_ptr == nullptr)
        {
            continue;
        }

        n_ds_bindings = static_cast<uint32_t>(current_ds.second.layout_ptr->get_n_bindings() );

        for (uint32_t n_ds_binding = 0;
                      n_ds_binding < n_ds_bindings;
                    ++n_ds_binding)
        {
            uint32_t         ds_binding_array_size;
            VkDescriptorType ds_binding_type;

            current_ds.second.layout_ptr->get_binding_properties(n_ds_binding,
                                                                 nullptr, /* opt_out_binding_index_ptr               */
                                                                &ds_binding_type,
                                                                &ds_binding_array_size,
                                                                 nullptr,  /* opt_out_stage_flags_ptr                */
                                                                 nullptr); /* opt_out_immutable_samplers_enabled_ptr */

            n_descriptors_needed_array[ds_binding_type] += ds_binding_array_size;
        }
    }

    /* Configure the underlying descriptor pool wrapper */
    for (uint32_t n_descriptor_type = 0;
                  n_descriptor_type < VK_DESCRIPTOR_TYPE_RANGE_SIZE;
                ++n_descriptor_type)
    {
        m_descriptor_pool_ptr->set_descriptor_array_size(static_cast<VkDescriptorType>(n_descriptor_type),
                                                         n_descriptors_needed_array[n_descriptor_type] + m_overhead_allocations[n_descriptor_type]);
    }

    m_descriptor_pool_ptr->bake();

    /* The descriptor pool now matches the layout's configuration */
    m_descriptor_pool_dirty = false;
}
Exemplo n.º 10
0
/* Please see header for specification */
Anvil::DescriptorSetGroup::DescriptorSetGroup(Anvil::Device* device_ptr,
                                              bool           releaseable_sets,
                                              uint32_t       n_sets)
    :m_descriptor_pool_dirty       (false),
     m_device_ptr                  (device_ptr),
     m_layout_modifications_blocked(false),
     m_n_instantiated_sets         (0),
     m_n_sets                      (n_sets),
     m_parent_dsg_ptr              (nullptr),
     m_releaseable_sets            (releaseable_sets)
{
    anvil_assert(n_sets >= 1);

    memset(m_overhead_allocations,
           0,
           sizeof(m_overhead_allocations) );

    /* Preallocate memory for various containers */
    m_cached_immutable_samplers.resize(N_PREALLOCATED_IMMUTABLE_SAMPLERS);

    /* Initialize descriptor pool */
    m_descriptor_pool_ptr = new Anvil::DescriptorPool(device_ptr,
                                                      n_sets,
                                                      releaseable_sets);

    /* Register the object */
    Anvil::ObjectTracker::get()->register_object(Anvil::ObjectTracker::OBJECT_TYPE_DESCRIPTOR_SET_GROUP,
                                                 this);
}
Exemplo n.º 11
0
/* Please see header for specification */
Anvil::PrimaryCommandBufferPoolWorker::PrimaryCommandBufferPoolWorker(Anvil::CommandPool* parent_command_pool_ptr)
    :m_parent_command_pool_ptr(parent_command_pool_ptr)
{
    anvil_assert(m_parent_command_pool_ptr != nullptr)

    m_parent_command_pool_ptr->retain();
}
bool Anvil::DescriptorSetLayout::meets_max_per_set_descriptors_limit(const DescriptorSetLayoutCreateInfoContainer* in_ds_create_info_ptr,
                                                                     const Anvil::BaseDevice*                      in_device_ptr)
{
    const Anvil::ExtensionKHRMaintenance3Entrypoints* entrypoints_ptr = nullptr;
    VkDescriptorSetLayoutSupportKHR                   query;
    bool                                              result          = false;

    if (!in_device_ptr->get_extension_info()->khr_maintenance3() )
    {
        anvil_assert(in_device_ptr->get_extension_info()->khr_maintenance3());

        goto end;
    }
    else
    {
        entrypoints_ptr = &in_device_ptr->get_extension_khr_maintenance3_entrypoints();
    }

    query.pNext     = nullptr;
    query.sType     = static_cast<VkStructureType>(VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT_KHR);
    query.supported = VK_FALSE;

    entrypoints_ptr->vkGetDescriptorSetLayoutSupportKHR(in_device_ptr->get_device_vk(),
                                                        in_ds_create_info_ptr->struct_chain_ptr->get_root_struct(),
                                                       &query);

    result = (query.supported == VK_TRUE);
end:
    return result;
}
const Anvil::BasePipelineCreateInfo* Anvil::BasePipelineManager::get_pipeline_create_info(PipelineID in_pipeline_id) const
{
    std::unique_lock<std::recursive_mutex> mutex_lock;
    auto                                   mutex_ptr         = get_mutex();
    Pipelines::const_iterator              pipeline_iterator;
    Pipeline*                              pipeline_ptr      = nullptr;
    const Anvil::BasePipelineCreateInfo*   result_ptr        = nullptr;

    if (mutex_ptr != nullptr)
    {
        mutex_lock = std::move(
            std::unique_lock<std::recursive_mutex>(*mutex_ptr)
        );
    }

    pipeline_iterator = m_baked_pipelines.find(in_pipeline_id);

    if (pipeline_iterator == m_baked_pipelines.end() )
    {
        pipeline_iterator = m_outstanding_pipelines.find(in_pipeline_id);

        if (pipeline_iterator == m_outstanding_pipelines.end() )
        {
            anvil_assert(!(pipeline_iterator == m_outstanding_pipelines.end() ));

            goto end;
        }
    }

    pipeline_ptr = pipeline_iterator->second.get();
    result_ptr   = pipeline_ptr->pipeline_create_info_ptr.get();

end:
    return result_ptr;
}
void Anvil::PipelineLayoutManager::on_pipeline_layout_dereferenced(Anvil::PipelineLayout* in_layout_ptr)
{
    bool                                   has_found  = false;
    std::unique_lock<std::recursive_mutex> mutex_lock;
    auto                                   mutex_ptr  = get_mutex();

    if (mutex_ptr != nullptr)
    {
        mutex_lock = std::move(
            std::unique_lock<std::recursive_mutex>(*mutex_ptr)
        );
    }

    for (auto layout_iterator  = m_pipeline_layouts.begin();
              layout_iterator != m_pipeline_layouts.end()    && !has_found;
            ++layout_iterator)
    {
        auto& current_pipeline_layout_container_ptr = *layout_iterator;
        auto& current_pipeline_layout_ptr           = current_pipeline_layout_container_ptr->pipeline_layout_ptr;

        if (current_pipeline_layout_ptr.get() == in_layout_ptr)
        {
            has_found = true;

            if (current_pipeline_layout_container_ptr->n_references.fetch_sub(1) == 1)
            {
                m_pipeline_layouts.erase(layout_iterator);
            }

            break;
        }
    }

    anvil_assert(has_found);
}
Exemplo n.º 15
0
/** Please see header for specification */
void Anvil::Instance::destroy()
{
    if (m_debug_callback_data != VK_NULL_HANDLE)
    {
        lock();
        {
            m_ext_debug_report_entrypoints.vkDestroyDebugReportCallbackEXT(m_instance,
                                                                           m_debug_callback_data,
                                                                           nullptr /* pAllocator */);
        }
        unlock();

        m_debug_callback_data = VK_NULL_HANDLE;
    }

    m_physical_devices.clear();

    #ifdef _DEBUG
    {
        /* Make sure no physical devices are still registered with Object Tracker at this point */
        auto object_manager_ptr = Anvil::ObjectTracker::get();

        anvil_assert(object_manager_ptr->get_object_at_index(Anvil::OBJECT_TYPE_PHYSICAL_DEVICE,
                                                             0) == nullptr);
    }
    #endif
}
Exemplo n.º 16
0
/** Converts a Anvil::QueueFamilyBits bitfield value to an array of queue family indices.
 *
 *  @param queue_families                     Input value to convert from.
 *  @param out_opt_queue_family_indices_ptr   If not NULL, deref will be updated with @param *out_opt_n_queue_family_indices_ptr
 *                                            values, corresponding to queue family indices, as specified under @param queue_families.
 *  @param out_opt_n_queue_family_indices_ptr If not NULL, deref will be set to the number of items that would be or were written
 *                                            under @param out_opt_queue_family_indices_ptr.
 *
 **/
void Anvil::Buffer::convert_queue_family_bits_to_family_indices(Anvil::QueueFamilyBits queue_families,
                                                                uint32_t*              out_opt_queue_family_indices_ptr,
                                                                uint32_t*              out_opt_n_queue_family_indices_ptr) const
{
    uint32_t n_queue_family_indices = 0;

    if ((queue_families & QUEUE_FAMILY_COMPUTE_BIT) != 0)
    {
        anvil_assert(m_device_ptr->get_n_compute_queues() > 0);

        if (out_opt_queue_family_indices_ptr != nullptr)
        {
            out_opt_queue_family_indices_ptr[n_queue_family_indices] = m_device_ptr->get_queue_family_index(Anvil::QUEUE_FAMILY_TYPE_COMPUTE);
        }

        ++n_queue_family_indices;
    }

    if ((queue_families & QUEUE_FAMILY_DMA_BIT) != 0)
    {
        anvil_assert(m_device_ptr->get_n_transfer_queues() > 0);

        if (out_opt_queue_family_indices_ptr != nullptr)
        {
            out_opt_queue_family_indices_ptr[n_queue_family_indices] = m_device_ptr->get_queue_family_index(Anvil::QUEUE_FAMILY_TYPE_TRANSFER);
        }

        ++n_queue_family_indices;
    }

    if ((queue_families & QUEUE_FAMILY_GRAPHICS_BIT) != 0)
    {
        anvil_assert(m_device_ptr->get_n_universal_queues() > 0);

        if (out_opt_queue_family_indices_ptr != nullptr)
        {
            out_opt_queue_family_indices_ptr[n_queue_family_indices] = m_device_ptr->get_queue_family_index(Anvil::QUEUE_FAMILY_TYPE_UNIVERSAL);
        }

        ++n_queue_family_indices;
    }

    if (out_opt_n_queue_family_indices_ptr != nullptr)
    {
        *out_opt_n_queue_family_indices_ptr = n_queue_family_indices;
    }
}
Exemplo n.º 17
0
bool Anvil::BasePipelineCreateInfo::add_specialization_constant(Anvil::ShaderStage in_shader_stage,
                                                                uint32_t           in_constant_id,
                                                                uint32_t           in_n_data_bytes,
                                                                const void*        in_data_ptr)
{
    uint32_t data_buffer_size = 0;
    bool     result           = false;

    if (in_n_data_bytes == 0)
    {
        anvil_assert(!(in_n_data_bytes == 0) );

        goto end;
    }

    if (in_data_ptr == nullptr)
    {
        anvil_assert(!(in_data_ptr == nullptr) );

        goto end;
    }

    /* Append specialization constant data and add a new descriptor. */
    data_buffer_size = static_cast<uint32_t>(m_specialization_constants_data_buffer.size() );


    anvil_assert(m_specialization_constants_map.find(in_shader_stage) != m_specialization_constants_map.end() );

    m_specialization_constants_map[in_shader_stage].push_back(
        SpecializationConstant(
            in_constant_id,
            in_n_data_bytes,
            data_buffer_size)
    );

    m_specialization_constants_data_buffer.resize(data_buffer_size + in_n_data_bytes);


    memcpy(&m_specialization_constants_data_buffer.at(data_buffer_size),
           in_data_ptr,
           in_n_data_bytes);

    /* All done */
    result = true;
end:
    return result;
}
Exemplo n.º 18
0
/* Please see header for specification */
bool Anvil::Fence::reset_fences(const uint32_t in_n_fences,
                                Fence*         in_fences)
{
    const Anvil::BaseDevice* device_ptr           = nullptr;
    auto                     fence_cache          = std::vector<VkFence>(in_n_fences);
    static const uint32_t    fence_cache_capacity = sizeof(fence_cache) / sizeof(fence_cache[0]);
    bool                     result               = true;
    VkResult                 result_vk;

    if (in_n_fences == 0)
    {
        goto end;
    }

    for (uint32_t n_fence_batch = 0;
                  n_fence_batch < 1 + in_n_fences / fence_cache_capacity;
                ++n_fence_batch)
    {
        const uint32_t n_fences_remaining = in_n_fences - n_fence_batch * fence_cache_capacity;

        for (uint32_t n_fence = 0;
                      n_fence < n_fences_remaining;
                    ++n_fence)
        {
            Anvil::Fence& current_fence = in_fences[n_fence_batch * fence_cache_capacity + n_fence];

            anvil_assert(device_ptr == nullptr                                          ||
                         device_ptr != nullptr && current_fence.m_device_ptr != nullptr);

            device_ptr           = current_fence.m_device_ptr;
            fence_cache[n_fence] = current_fence.m_fence;

            current_fence.lock();
        }
        {
            result_vk = vkResetFences(device_ptr->get_device_vk(),
                                      n_fences_remaining,
                                      (n_fences_remaining > 0) ? &fence_cache.at(0) : nullptr);
        }
        for (uint32_t n_fence = 0;
                      n_fence < n_fences_remaining;
                    ++n_fence)
        {
            Anvil::Fence& current_fence = in_fences[n_fence_batch * fence_cache_capacity + n_fence];

            current_fence.unlock();
        }

        anvil_assert_vk_call_succeeded(result_vk);

        if (!is_vk_call_successful(result_vk) )
        {
            result = false;
        }
    }

end:
    return result;
}
/** Destructor */
Anvil::DescriptorSetLayoutManager::~DescriptorSetLayoutManager()
{
    anvil_assert(m_descriptor_set_layouts.size() == 0);

    /* Unregister the object */
    Anvil::ObjectTracker::get()->unregister_object(Anvil::OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_MANAGER,
                                                    this);
}
Exemplo n.º 20
0
/* Please see header for specification */
Anvil::DescriptorSetGroup::DescriptorSetGroup(DescriptorSetGroup* parent_dsg_ptr,
                                              bool                releaseable_sets)
    :m_descriptor_pool_dirty       (true),
     m_descriptor_pool_ptr         (nullptr),
     m_device_ptr                  (parent_dsg_ptr->m_device_ptr),
     m_layout_modifications_blocked(true),
     m_n_sets                      (-1),
     m_parent_dsg_ptr              (parent_dsg_ptr),
     m_releaseable_sets            (releaseable_sets)
{
    anvil_assert(parent_dsg_ptr                                                != nullptr);
    anvil_assert(parent_dsg_ptr->m_parent_dsg_ptr                              == nullptr);
    anvil_assert(parent_dsg_ptr->m_descriptor_pool_ptr->are_sets_releaseable() == releaseable_sets);

    memset(m_overhead_allocations,
           0,
           sizeof(m_overhead_allocations) );

    /* Initialize descriptor pool */
    m_descriptor_pool_ptr = new Anvil::DescriptorPool(parent_dsg_ptr->m_device_ptr,
                                                      parent_dsg_ptr->m_n_sets,
                                                      releaseable_sets);

    /* Preallocate memory for descriptor containers */
    m_cached_immutable_samplers.resize(N_PREALLOCATED_IMMUTABLE_SAMPLERS);

    /* Configure the new DSG instance to use the specified parent DSG */
    const uint32_t n_dses = (uint32_t) parent_dsg_ptr->m_descriptor_sets.size();

    m_descriptor_sets = parent_dsg_ptr->m_descriptor_sets;

    for (auto ds : m_descriptor_sets)
    {
        m_descriptor_sets[ds.first].descriptor_set_ptr = nullptr;
    }

    /* The parent descriptor set group should be locked, so that it is no longer possible to modify
     * its descriptor set layout. Introducing support for such behavior would require significant
     * work and testing. */
    m_parent_dsg_ptr->m_layout_modifications_blocked = true;

    /* Register the object */
    Anvil::ObjectTracker::get()->register_object(Anvil::ObjectTracker::OBJECT_TYPE_DESCRIPTOR_SET_GROUP,
                                                 this);
}
/** Destructor */
Anvil::PipelineLayoutManager::~PipelineLayoutManager()
{
    /* If this assertion check explodes, your app has not released all pipelines it has created. */
    anvil_assert(m_pipeline_layouts.size() == 0);

    /* Unregister the object */
    Anvil::ObjectTracker::get()->unregister_object(Anvil::ObjectType::ANVIL_PIPELINE_LAYOUT_MANAGER,
                                                    this);
}
/* Please see header for specification */
VkPipeline Anvil::BasePipelineManager::get_pipeline(PipelineID in_pipeline_id)
{
    std::unique_lock<std::recursive_mutex> mutex_lock;
    auto                                   mutex_ptr         = get_mutex();
    Pipelines::const_iterator              pipeline_iterator;
    Pipeline*                              pipeline_ptr      = nullptr;
    VkPipeline                             result            = VK_NULL_HANDLE;

    if (mutex_ptr != nullptr)
    {
        mutex_lock = std::move(
            std::unique_lock<std::recursive_mutex>(*mutex_ptr)
        );
    }

    if (m_outstanding_pipelines.size() > 0)
    {
        bake();
    }

    pipeline_iterator = m_baked_pipelines.find(in_pipeline_id);

    if (pipeline_iterator == m_baked_pipelines.end() )
    {
        anvil_assert(!(pipeline_iterator == m_baked_pipelines.end()) );

        goto end;
    }

    pipeline_ptr = pipeline_iterator->second.get();

    if (pipeline_ptr->pipeline_create_info_ptr->is_proxy() )
    {
        anvil_assert(!pipeline_ptr->pipeline_create_info_ptr->is_proxy() );

        goto end;
    }

    result = pipeline_ptr->baked_pipeline;
    anvil_assert(result != VK_NULL_HANDLE);
end:
    return result;
}
Exemplo n.º 23
0
/* Please see header for specification */
void Anvil::DescriptorSetGroup::set_descriptor_pool_overhead_allocations(VkDescriptorType descriptor_type,
                                                                          uint32_t         n_overhead_allocations)
{
    anvil_assert(descriptor_type < VK_DESCRIPTOR_TYPE_RANGE_SIZE);

    if (m_overhead_allocations[descriptor_type] != n_overhead_allocations)
    {
        m_overhead_allocations[descriptor_type] = n_overhead_allocations;
        m_descriptor_pool_dirty                 = true;
    }
}
Exemplo n.º 24
0
/* Please see header for specification */
bool Anvil::Event::is_set() const
{
    VkResult result;

    result = vkGetEventStatus(m_device_ptr->get_device_vk(),
                              m_event);

    anvil_assert(result == VK_EVENT_RESET ||
                 result == VK_EVENT_SET);

    return (result == VK_EVENT_SET);
}
Exemplo n.º 25
0
/* Please see header for specification */
bool Anvil::Fence::is_set() const
{
    VkResult result;

    result = vkGetFenceStatus(m_device_ptr->get_device_vk(),
                              m_fence);

    anvil_assert(result == VK_SUCCESS  ||
                 result == VK_NOT_READY);

    return (result == VK_SUCCESS);
}
Exemplo n.º 26
0
/* Please see header for specification */
bool Anvil::DescriptorSetGroup::add_binding(uint32_t           n_set,
                                            uint32_t           binding,
                                            VkDescriptorType   type,
                                            uint32_t           n_elements,
                                            VkShaderStageFlags shader_stages)
{
    bool result = false;

    /* Sanity check: The DSG must not be locked. If you run into this assertion failure, you are trying
     *               to add a new binding to a descriptor set group which shares its layout with other
     *               DSGs. This modification would have invalidated layouts used by children DSGs, and
     *               currently there's no mechanism implemented to inform them about such event. */
    anvil_assert(!m_layout_modifications_blocked);

    /* Sanity check: make sure no more than the number of descriptor sets specified at creation time is
     *               used
     */
    if (m_descriptor_sets.find(n_set) == m_descriptor_sets.end() )
    {
        if (m_descriptor_sets.size() > (m_n_instantiated_sets + 1) )
        {
            anvil_assert(!(m_descriptor_sets.size() > (m_n_instantiated_sets + 1)) );

            goto end;
        }

        m_descriptor_sets[n_set].descriptor_set_ptr = nullptr;
        m_descriptor_sets[n_set].layout_ptr         = new Anvil::DescriptorSetLayout(m_device_ptr);
    }

    /* Pass the call down to DescriptorSet instance */
    result = m_descriptor_sets[n_set].layout_ptr->add_binding(binding,
                                                              type,
                                                              n_elements,
                                                              shader_stages);

    m_descriptor_pool_dirty = true;
end:
    return result;
}
Exemplo n.º 27
0
/* Creates a new Vulkan buffer object and caches memory requirements for the created buffer.
 *
 * @param queue_families Queue families the buffer needs to support.
 * @param sharing_mode   Sharing mode the buffer needs to support.
 * @param size           Size of the buffer.
 **/
void Anvil::Buffer::create_buffer(Anvil::QueueFamilyBits queue_families,
                                  VkSharingMode          sharing_mode,
                                  VkDeviceSize           size)
{
    VkBufferCreateInfo buffer_create_info;
    uint32_t           n_queue_family_indices;
    uint32_t           queue_family_indices[8];
    VkResult           result;

    /* Determine which queues the buffer should be available to. */
    convert_queue_family_bits_to_family_indices(queue_families,
                                                queue_family_indices,
                                               &n_queue_family_indices);

    anvil_assert(n_queue_family_indices > 0);
    anvil_assert(n_queue_family_indices < sizeof(queue_family_indices) / sizeof(queue_family_indices[0]) );

    /* Prepare the create info structure */
    buffer_create_info.flags                 = 0;
    buffer_create_info.pNext                 = nullptr;
    buffer_create_info.pQueueFamilyIndices   = queue_family_indices;
    buffer_create_info.queueFamilyIndexCount = n_queue_family_indices;
    buffer_create_info.sharingMode           = sharing_mode;
    buffer_create_info.size                  = size;
    buffer_create_info.sType                 = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
    buffer_create_info.usage                 = m_usage_flags;

    /* Create the buffer object */
    result = vkCreateBuffer(m_device_ptr->get_device_vk(),
                           &buffer_create_info,
                            nullptr, /* pAllocator */
                           &m_buffer);
    anvil_assert_vk_call_succeeded(result);

    /* Cache buffer data memory requirements */
    vkGetBufferMemoryRequirements(m_device_ptr->get_device_vk(),
                                  m_buffer,
                                 &m_buffer_memory_reqs);
}
Exemplo n.º 28
0
/* Please see header for specification */
VkDeviceSize Anvil::Buffer::get_start_offset() const
{
    if (m_memory_block_ptr != nullptr)
    {
        return m_start_offset;
    }
    else
    {
        anvil_assert(false);

        return -1;
    }
}
Exemplo n.º 29
0
/** TODO */
void Anvil::Swapchain::on_present_request_issued(Anvil::CallbackArgument* in_callback_raw_ptr)
{
    auto* callback_arg_ptr = dynamic_cast<Anvil::OnPresentRequestIssuedCallbackArgument*>(in_callback_raw_ptr);

    anvil_assert(callback_arg_ptr != nullptr);
    if (callback_arg_ptr != nullptr)
    {
        if (callback_arg_ptr->swapchain_ptr == this)
        {
            m_n_present_counter++;
        }
    }
}
void Anvil::SwapchainCreateInfo::set_view_format_list(const Anvil::Format* in_compatible_formats_ptr,
                                                      const uint32_t&      in_n_compatible_formats)
{
    anvil_assert(in_n_compatible_formats > 0);

    m_flags |= Anvil::SwapchainCreateFlagBits::CREATE_MUTABLE_FORMAT_BIT;

    m_compatible_formats.resize(in_n_compatible_formats);

    memcpy(&m_compatible_formats.at(0),
           in_compatible_formats_ptr,
           sizeof(Anvil::Format) * in_n_compatible_formats);
}