Ejemplo n.º 1
0
/* Please see header for specification */
bool Anvil::DescriptorPool::reset()
{
    VkResult result_vk;

    if (m_pool != VK_NULL_HANDLE)
    {
        result_vk = vkResetDescriptorPool(m_device_ptr->get_device_vk(),
                                          m_pool,
                                          0 /* flags */);
        anvil_assert_vk_call_succeeded(result_vk);

        if (is_vk_call_successful(result_vk) )
        {
            /* Alloced descriptor sets went out of scope. Send out a call-back, so that descriptor set
             * wrapper instances can mark themselves as unusable */
            callback(DESCRIPTOR_POOL_CALLBACK_ID_POOL_RESET,
                     this);
        }
    }
    else
    {
        result_vk = VK_SUCCESS;
    }

    return is_vk_call_successful(result_vk); 
}
Ejemplo n.º 2
0
/** Creates a new VMA allocator instance.
 *
 *  @return true if successful, false otherwise.
 **/
bool Anvil::MemoryAllocatorBackends::VMA::VMAAllocator::init()
{
    VmaAllocatorCreateInfo create_info = {};
    VkResult               result        (VK_ERROR_DEVICE_LOST);

    switch (m_device_ptr->get_type() )
    {
        case Anvil::DEVICE_TYPE_SINGLE_GPU:
        {
            const Anvil::SGPUDevice* sgpu_device_ptr(dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr) );

            create_info.physicalDevice = sgpu_device_ptr->get_physical_device()->get_physical_device();
            break;
        }

        default:
        {
            anvil_assert_fail();
        }
    }

    create_info.device                      = m_device_ptr->get_device_vk();
    create_info.pAllocationCallbacks        = nullptr;
    create_info.preferredLargeHeapBlockSize = 0;

    result = vmaCreateAllocator(&create_info,
                                &m_allocator);

    anvil_assert_vk_call_succeeded(result);
    return is_vk_call_successful(result);
}
Ejemplo n.º 3
0
/* Please see header for specification */
bool Anvil::Buffer::set_memory(Anvil::MemoryBlock* memory_block_ptr)
{
    bool     result = false;
    VkResult result_vk;

    if (memory_block_ptr == nullptr)
    {
        anvil_assert(!(memory_block_ptr == nullptr) );

        goto end;
    }

    if (m_memory_block_ptr != nullptr)
    {
        anvil_assert( (memory_block_ptr == nullptr) );

        goto end;
    }

    /* Bind the memory object to the buffer object */
    m_memory_block_ptr = memory_block_ptr;
    m_memory_block_ptr->retain();

    result_vk = vkBindBufferMemory(m_device_ptr->get_device_vk(),
                                   m_buffer,
                                   m_memory_block_ptr->get_memory(),
                                   memory_block_ptr->get_start_offset() );
    anvil_assert_vk_call_succeeded(result_vk);

    result = is_vk_call_successful(result_vk);
end:
    return result;
}
/** Please see header for specification */
bool Anvil::DescriptorSetLayout::init()
{
    bool     result    = false;
    VkResult result_vk;

    anvil_assert(m_layout == VK_NULL_HANDLE);

    /* Bake the Vulkan object */
    auto create_info_ptr = m_create_info_ptr->create_descriptor_set_layout_create_info(m_device_ptr);

    if (create_info_ptr == nullptr)
    {
        anvil_assert(create_info_ptr != nullptr);

        goto end;
    }

    result_vk = Anvil::Vulkan::vkCreateDescriptorSetLayout(m_device_ptr->get_device_vk(),
                                                           create_info_ptr->struct_chain_ptr->get_root_struct(),
                                                           nullptr, /* pAllocator */
                                                          &m_layout);

    anvil_assert_vk_call_succeeded(result_vk);
    if (is_vk_call_successful(result_vk) )
    {
        set_vk_handle(m_layout);
    }

    result = is_vk_call_successful(result_vk);

end:
    return result;
}
Ejemplo n.º 5
0
/* Please see header for specification */
bool Anvil::DescriptorPool::alloc_descriptor_sets(uint32_t                     n_sets,
                                                  Anvil::DescriptorSetLayout** descriptor_set_layouts_ptr,
                                                  VkDescriptorSet*             out_descriptor_sets_vk_ptr)
{
    VkDescriptorSetAllocateInfo ds_alloc_info;
    VkResult                    result_vk;

    m_ds_layout_cache.resize(n_sets);

    for (uint32_t n_set = 0;
                  n_set < n_sets;
                ++n_set)
    {
        m_ds_layout_cache[n_set] = descriptor_set_layouts_ptr[n_set]->get_layout();
    }

    ds_alloc_info.descriptorPool     = m_pool;
    ds_alloc_info.descriptorSetCount = n_sets;
    ds_alloc_info.pNext              = nullptr;
    ds_alloc_info.pSetLayouts        = &m_ds_layout_cache[0];
    ds_alloc_info.sType              = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;

    result_vk = vkAllocateDescriptorSets(m_device_ptr->get_device_vk(),
                                        &ds_alloc_info,
                                         out_descriptor_sets_vk_ptr);

    anvil_assert_vk_call_succeeded(result_vk);
    return is_vk_call_successful(result_vk);
}
Ejemplo n.º 6
0
/* Please see header for specification */
void Anvil::DescriptorPool::bake()
{
    VkDescriptorPoolCreateInfo descriptor_pool_create_info;
    VkDescriptorPoolSize       descriptor_pool_sizes[VK_DESCRIPTOR_TYPE_RANGE_SIZE];
    uint32_t                   n_descriptor_types_used = 0;
    VkResult                   result_vk;

    if (m_pool != VK_NULL_HANDLE)
    {
        vkDestroyDescriptorPool(m_device_ptr->get_device_vk(),
                                m_pool,
                                nullptr /* pAllocator */);

        m_pool = VK_NULL_HANDLE;
    }

    /* Convert the counters to an arrayed, linear representation */
    for (uint32_t n_descriptor_type = 0;
                  n_descriptor_type < VK_DESCRIPTOR_TYPE_RANGE_SIZE;
                ++n_descriptor_type)
    {
        if (m_descriptor_count[n_descriptor_type] > 0)
        {
            uint32_t current_index = n_descriptor_types_used;

            descriptor_pool_sizes[current_index].descriptorCount = m_descriptor_count[n_descriptor_type];
            descriptor_pool_sizes[current_index].type            = (VkDescriptorType) n_descriptor_type;

            n_descriptor_types_used++;
        }
    }

    if (n_descriptor_types_used == 0)
    {
        /* If an empty pool is needed, request space for a single dummy descriptor. */
        descriptor_pool_sizes[0].descriptorCount = 1;
        descriptor_pool_sizes[0].type            = VK_DESCRIPTOR_TYPE_SAMPLER;

        n_descriptor_types_used = 1;
    }

    /* Set up the descriptor pool instance */
    descriptor_pool_create_info.flags         = (m_releaseable_sets) ? VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT
                                                                     : 0;
    descriptor_pool_create_info.maxSets       = m_n_max_sets;
    descriptor_pool_create_info.pNext         = nullptr;
    descriptor_pool_create_info.poolSizeCount = n_descriptor_types_used;
    descriptor_pool_create_info.pPoolSizes    = descriptor_pool_sizes;
    descriptor_pool_create_info.sType         = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;

    result_vk = vkCreateDescriptorPool(m_device_ptr->get_device_vk(),
                                      &descriptor_pool_create_info,
                                       nullptr, /* pAllocator */
                                      &m_pool);

    anvil_assert_vk_call_succeeded(result_vk);

    m_baked = true;
}
Ejemplo n.º 7
0
/* Please see header for specification */
bool Anvil::Fence::reset_fences(const uint32_t in_n_fences,
                                Fence*         in_fences)
{
    const Anvil::BaseDevice* device_ptr           = nullptr;
    auto                     fence_cache          = std::vector<VkFence>(in_n_fences);
    static const uint32_t    fence_cache_capacity = sizeof(fence_cache) / sizeof(fence_cache[0]);
    bool                     result               = true;
    VkResult                 result_vk;

    if (in_n_fences == 0)
    {
        goto end;
    }

    for (uint32_t n_fence_batch = 0;
                  n_fence_batch < 1 + in_n_fences / fence_cache_capacity;
                ++n_fence_batch)
    {
        const uint32_t n_fences_remaining = in_n_fences - n_fence_batch * fence_cache_capacity;

        for (uint32_t n_fence = 0;
                      n_fence < n_fences_remaining;
                    ++n_fence)
        {
            Anvil::Fence& current_fence = in_fences[n_fence_batch * fence_cache_capacity + n_fence];

            anvil_assert(device_ptr == nullptr                                          ||
                         device_ptr != nullptr && current_fence.m_device_ptr != nullptr);

            device_ptr           = current_fence.m_device_ptr;
            fence_cache[n_fence] = current_fence.m_fence;

            current_fence.lock();
        }
        {
            result_vk = vkResetFences(device_ptr->get_device_vk(),
                                      n_fences_remaining,
                                      (n_fences_remaining > 0) ? &fence_cache.at(0) : nullptr);
        }
        for (uint32_t n_fence = 0;
                      n_fence < n_fences_remaining;
                    ++n_fence)
        {
            Anvil::Fence& current_fence = in_fences[n_fence_batch * fence_cache_capacity + n_fence];

            current_fence.unlock();
        }

        anvil_assert_vk_call_succeeded(result_vk);

        if (!is_vk_call_successful(result_vk) )
        {
            result = false;
        }
    }

end:
    return result;
}
Ejemplo n.º 8
0
/** Enumerates and caches all available physical devices. */
void Anvil::Instance::enumerate_physical_devices()
{
    std::vector<VkPhysicalDevice> devices;
    uint32_t                      n_physical_devices = 0;
    VkResult                      result             = VK_ERROR_INITIALIZATION_FAILED;

    ANVIL_REDUNDANT_VARIABLE(result);

    /* Retrieve physical device handles */
    result = vkEnumeratePhysicalDevices(m_instance,
                                       &n_physical_devices,
                                        nullptr); /* pPhysicalDevices */
    anvil_assert_vk_call_succeeded(result);

    if (n_physical_devices == 0)
    {
        fprintf(stderr,"No physical devices reported for the Vulkan instance");
        fflush (stderr);

        anvil_assert_fail();
    }

    devices.resize(n_physical_devices);

    result = vkEnumeratePhysicalDevices(m_instance,
                                       &n_physical_devices,
                                       &devices[0]);
    anvil_assert_vk_call_succeeded(result);

    /* Fill out internal physical device descriptors */
    for (unsigned int n_physical_device = 0;
                      n_physical_device < n_physical_devices;
                    ++n_physical_device)
    {
        std::unique_ptr<Anvil::PhysicalDevice> new_physical_device_ptr;

        new_physical_device_ptr = Anvil::PhysicalDevice::create(this,
                                      n_physical_device,
                                      devices[n_physical_device]);

        m_physical_devices.push_back(
            std::move(new_physical_device_ptr)
        );
    }
}
Ejemplo n.º 9
0
/** Enumerates all available layer extensions. The enumerated extensions will be stored
 *  in the specified _vulkan_layer descriptor.
 *
 *  @param in_layer_ptr Layer to enumerate the extensions for. If nullptr, device extensions
 *                      will be retrieved instead.
 **/
void Anvil::Instance::enumerate_layer_extensions(Anvil::Layer* in_layer_ptr)
{
    uint32_t n_extensions = 0;
    VkResult result       = VK_ERROR_INITIALIZATION_FAILED;

    ANVIL_REDUNDANT_VARIABLE(result);

    /* Check if the layer supports any extensions at all */
    const char* layer_name = nullptr;

    if (in_layer_ptr == nullptr)
    {
        in_layer_ptr = &m_global_layer;
    }

    layer_name = in_layer_ptr->name.c_str();
    result     = vkEnumerateInstanceExtensionProperties(layer_name,
                                                       &n_extensions,
                                                        nullptr); /* pProperties */

    anvil_assert_vk_call_succeeded(result);

    if (n_extensions > 0)
    {
        std::vector<VkExtensionProperties> extension_props;

        extension_props.resize(n_extensions);

        result = vkEnumerateInstanceExtensionProperties(layer_name,
                                                       &n_extensions,
                                                       &extension_props[0]);

        anvil_assert_vk_call_succeeded(result);

        /* Convert raw extension props data to internal descriptors */
        for (uint32_t n_extension = 0;
                      n_extension < n_extensions;
                    ++n_extension)
        {
            in_layer_ptr->extensions.push_back(extension_props[n_extension].extensionName);
        }
    }
}
Ejemplo n.º 10
0
/** Enumerates and caches all layers supported by the Vulkan Instance. */
void Anvil::Instance::enumerate_instance_layers()
{
    std::vector<VkLayerProperties> layer_props;
    uint32_t                       n_layers    = 0;
    VkResult                       result      = VK_ERROR_INITIALIZATION_FAILED;

    ANVIL_REDUNDANT_VARIABLE(result);

    /* Retrieve layer data */
    result = vkEnumerateInstanceLayerProperties(&n_layers,
                                                nullptr); /* pProperties */
    anvil_assert_vk_call_succeeded(result);

    layer_props.resize(n_layers + 1 /* global layer */);

    result = vkEnumerateInstanceLayerProperties(&n_layers,
                                               &layer_props[0]);

    anvil_assert_vk_call_succeeded(result);

    /* Convert raw layer props data to internal descriptors */
    for (uint32_t n_layer = 0;
                  n_layer < n_layers + 1;
                ++n_layer)
    {
        Anvil::Layer* layer_ptr = nullptr;

        if (n_layer < n_layers)
        {
            m_supported_layers.push_back(Anvil::Layer(layer_props[n_layer]) );

            layer_ptr = &m_supported_layers[n_layer];
        }

        enumerate_layer_extensions(layer_ptr);
    }
}
Ejemplo n.º 11
0
/* Please see header for specification */
bool Anvil::Event::set()
{
    VkResult result;

    lock();
    {
        result = vkSetEvent(m_device_ptr->get_device_vk(),
                            m_event);
    }
    unlock();

    anvil_assert_vk_call_succeeded(result);

    return (result == VK_SUCCESS);
}
Ejemplo n.º 12
0
/** Initializes debug callback support. */
void Anvil::Instance::init_debug_callbacks()
{
    VkResult result = VK_ERROR_INITIALIZATION_FAILED;

    ANVIL_REDUNDANT_VARIABLE(result);

    /* Set up the debug call-backs, while we're at it */
    VkDebugReportCallbackCreateInfoEXT debug_report_callback_create_info;

    debug_report_callback_create_info.flags       = VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT;
    debug_report_callback_create_info.pfnCallback = debug_callback_pfn_proc;
    debug_report_callback_create_info.pNext       = nullptr;
    debug_report_callback_create_info.pUserData   = this;
    debug_report_callback_create_info.sType       = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;

    result = m_ext_debug_report_entrypoints.vkCreateDebugReportCallbackEXT(m_instance,
                                                                          &debug_report_callback_create_info,
                                                                           nullptr, /* pAllocator */
                                                                          &m_debug_callback_data);
    anvil_assert_vk_call_succeeded(result);
}
Ejemplo n.º 13
0
/* Creates a new Vulkan buffer object and caches memory requirements for the created buffer.
 *
 * @param queue_families Queue families the buffer needs to support.
 * @param sharing_mode   Sharing mode the buffer needs to support.
 * @param size           Size of the buffer.
 **/
void Anvil::Buffer::create_buffer(Anvil::QueueFamilyBits queue_families,
                                  VkSharingMode          sharing_mode,
                                  VkDeviceSize           size)
{
    VkBufferCreateInfo buffer_create_info;
    uint32_t           n_queue_family_indices;
    uint32_t           queue_family_indices[8];
    VkResult           result;

    /* Determine which queues the buffer should be available to. */
    convert_queue_family_bits_to_family_indices(queue_families,
                                                queue_family_indices,
                                               &n_queue_family_indices);

    anvil_assert(n_queue_family_indices > 0);
    anvil_assert(n_queue_family_indices < sizeof(queue_family_indices) / sizeof(queue_family_indices[0]) );

    /* Prepare the create info structure */
    buffer_create_info.flags                 = 0;
    buffer_create_info.pNext                 = nullptr;
    buffer_create_info.pQueueFamilyIndices   = queue_family_indices;
    buffer_create_info.queueFamilyIndexCount = n_queue_family_indices;
    buffer_create_info.sharingMode           = sharing_mode;
    buffer_create_info.size                  = size;
    buffer_create_info.sType                 = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
    buffer_create_info.usage                 = m_usage_flags;

    /* Create the buffer object */
    result = vkCreateBuffer(m_device_ptr->get_device_vk(),
                           &buffer_create_info,
                            nullptr, /* pAllocator */
                           &m_buffer);
    anvil_assert_vk_call_succeeded(result);

    /* Cache buffer data memory requirements */
    vkGetBufferMemoryRequirements(m_device_ptr->get_device_vk(),
                                  m_buffer,
                                 &m_buffer_memory_reqs);
}
/** Please see header for specification */
Anvil::PipelineCache::PipelineCache(const Anvil::BaseDevice* in_device_ptr,
                                    bool                     in_mt_safe,
                                    size_t                   in_initial_data_size,
                                    const void*              in_initial_data)
    :DebugMarkerSupportProvider(in_device_ptr,
                                Anvil::ObjectType::PIPELINE_CACHE),
     MTSafetySupportProvider   (in_mt_safe),
     m_device_ptr              (in_device_ptr),
     m_pipeline_cache          (VK_NULL_HANDLE)
{
    VkPipelineCacheCreateInfo cache_create_info;
    VkResult                  result_vk        (VK_ERROR_INITIALIZATION_FAILED);

    ANVIL_REDUNDANT_VARIABLE(result_vk);

    cache_create_info.flags           = 0;
    cache_create_info.initialDataSize = in_initial_data_size;
    cache_create_info.pInitialData    = in_initial_data;
    cache_create_info.pNext           = nullptr;
    cache_create_info.sType           = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;

    result_vk = Anvil::Vulkan::vkCreatePipelineCache(m_device_ptr->get_device_vk(),
                                                    &cache_create_info,
                                                     nullptr, /* pAllocator */
                                                    &m_pipeline_cache);

    anvil_assert_vk_call_succeeded(result_vk);
    if (is_vk_call_successful(result_vk) )
    {
        set_vk_handle(m_pipeline_cache);
    }

    anvil_assert(m_pipeline_cache != VK_NULL_HANDLE);

    /* Register the instance */
    Anvil::ObjectTracker::get()->register_object(Anvil::ObjectType::PIPELINE_CACHE,
                                                  this);
}
Ejemplo n.º 15
0
bool Anvil::Event::init()
{
    VkEventCreateInfo event_create_info;
    VkResult          result           (VK_ERROR_INITIALIZATION_FAILED);

    /* Spawn a new event */
    event_create_info.flags = 0;
    event_create_info.pNext = nullptr;
    event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;

    result = vkCreateEvent(m_device_ptr->get_device_vk(),
                          &event_create_info,
                           nullptr, /* pAllocator */
                          &m_event);

    anvil_assert_vk_call_succeeded(result);
    if (is_vk_call_successful(result) )
    {
        set_vk_handle(m_event);
    }

    return is_vk_call_successful(result);
}
Ejemplo n.º 16
0
/** Initializes the wrapper. */
void Anvil::Instance::init(const std::vector<std::string>& in_disallowed_instance_level_extensions)
{
    VkApplicationInfo           app_info;
    VkInstanceCreateInfo        create_info;
    std::vector<const char*>    enabled_layers;
    std::map<std::string, bool> extension_enabled_status;
    size_t                      n_instance_layers        = 0;
    VkResult                    result                   = VK_ERROR_INITIALIZATION_FAILED;

    ANVIL_REDUNDANT_VARIABLE(result);

    /* Enumerate available layers */
    enumerate_instance_layers();

    /* Determine what extensions we need to request at instance creation time */
    static const char* desired_extensions_with_validation[] =
    {
        VK_KHR_SURFACE_EXTENSION_NAME,

        #ifdef _WIN32
            #if defined(ANVIL_INCLUDE_WIN3264_WINDOW_SYSTEM_SUPPORT)
                VK_KHR_WIN32_SURFACE_EXTENSION_NAME,
            #endif
        #else
            #if defined(ANVIL_INCLUDE_XCB_WINDOW_SYSTEM_SUPPORT)
                VK_KHR_XCB_SURFACE_EXTENSION_NAME,
            #endif
        #endif

        VK_EXT_DEBUG_REPORT_EXTENSION_NAME
    };
    static const char* desired_extensions_without_validation[] =
    {
        VK_KHR_SURFACE_EXTENSION_NAME,

        #ifdef _WIN32
            #if defined(ANVIL_INCLUDE_WIN3264_WINDOW_SYSTEM_SUPPORT)
                VK_KHR_WIN32_SURFACE_EXTENSION_NAME,
            #endif
        #else
            #if defined(ANVIL_INCLUDE_XCB_WINDOW_SYSTEM_SUPPORT)
                VK_KHR_XCB_SURFACE_EXTENSION_NAME,
            #endif
        #endif
    };

    /* Set up the app info descriptor **/
    app_info.apiVersion         = VK_MAKE_VERSION(1, 0, 0);
    app_info.applicationVersion = 0;
    app_info.engineVersion      = 0;
    app_info.pApplicationName   = m_app_name.c_str();
    app_info.pEngineName        = m_engine_name.c_str();
    app_info.pNext              = nullptr;
    app_info.sType              = VK_STRUCTURE_TYPE_APPLICATION_INFO;

    /* Set up the create info descriptor */
    memset(&create_info,
           0,
           sizeof(create_info) );

    n_instance_layers = static_cast<uint32_t>(m_supported_layers.size() );

    for (size_t  n_instance_layer = 0;
                 n_instance_layer < n_instance_layers;
               ++n_instance_layer)
    {
        const std::string& layer_description = m_supported_layers[n_instance_layer].description;
        const std::string& layer_name        = m_supported_layers[n_instance_layer].name;

        /* If validation is enabled and this is a layer which issues debug call-backs, cache it, so that
         * we can request for it at vkCreateInstance() call time */
        if (m_validation_callback_function       != nullptr          &&
            layer_description.find("Validation") != std::string::npos)
        {
            enabled_layers.push_back(layer_name.c_str() );
        }
    }

    {
        if (m_validation_callback_function != nullptr)
        {
            for (uint32_t n_extension = 0;
                          n_extension < sizeof(desired_extensions_with_validation) / sizeof(desired_extensions_with_validation[0]);
                        ++n_extension)
            {
                if (is_instance_extension_supported(desired_extensions_with_validation[n_extension]))
                {
                    extension_enabled_status[desired_extensions_with_validation[n_extension] ] = true;
                }
            }
        }
        else
        {
            for (uint32_t n_extension = 0;
                          n_extension < sizeof(desired_extensions_without_validation) / sizeof(desired_extensions_without_validation[0]);
                        ++n_extension)
            {
                if (is_instance_extension_supported(desired_extensions_without_validation[n_extension]))
                {
                    extension_enabled_status[desired_extensions_without_validation[n_extension] ] = true;
                }
            }
        }

        /* Enable known instance-level extensions by default */
        if (is_instance_extension_supported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME) )
        {
            extension_enabled_status[VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME] = true;
        }

        if (is_instance_extension_supported(VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME) )
        {
            extension_enabled_status[VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME] = true;
        }

        if (is_instance_extension_supported(VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME) )
        {
            extension_enabled_status[VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME] = true;
        }

        if (is_instance_extension_supported(VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME) )
        {
            extension_enabled_status[VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME] = true;
        }

        /* Filter out undesired extensions */
        for (const auto& current_extension_name : in_disallowed_instance_level_extensions)
        {
            auto ext_iterator = extension_enabled_status.find(current_extension_name);

            if (ext_iterator != extension_enabled_status.end() )
            {
                extension_enabled_status.erase(ext_iterator);
            }
        }

        m_enabled_extensions_info_ptr = Anvil::ExtensionInfo<bool>::create_instance_extension_info(extension_enabled_status,
                                                                                                   false); /* in_unspecified_extension_name_value */
    }

    /* We're ready to create a new Vulkan instance */
    std::vector<const char*> enabled_extensions_raw;

    for (auto& ext_name : extension_enabled_status)
    {
        enabled_extensions_raw.push_back(ext_name.first.c_str() );
    }

    create_info.enabledExtensionCount   = static_cast<uint32_t>(enabled_extensions_raw.size() );
    create_info.enabledLayerCount       = static_cast<uint32_t>(enabled_layers.size() );
    create_info.flags                   = 0;
    create_info.pApplicationInfo        = &app_info;
    create_info.pNext                   = nullptr;
    create_info.ppEnabledExtensionNames = (enabled_extensions_raw.size() > 0) ? &enabled_extensions_raw[0] : nullptr;
    create_info.ppEnabledLayerNames     = (enabled_layers.size()         > 0) ? &enabled_layers        [0] : nullptr;
    create_info.sType                   = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;

    result = vkCreateInstance(&create_info,
                              nullptr, /* pAllocator */
                              &m_instance);

    anvil_assert_vk_call_succeeded(result);

    /* Continue initializing */
    init_func_pointers();

    if (m_validation_callback_function != nullptr)
    {
        init_debug_callbacks();
    }

    enumerate_physical_devices();
}
Ejemplo n.º 17
0
/* Please see header for specification */
bool Anvil::Semaphore::reset()
{
    VkResult                                           result           (VK_ERROR_INITIALIZATION_FAILED);
    Anvil::StructChainer<VkSemaphoreCreateInfo>        struct_chainer;
    Anvil::StructChainUniquePtr<VkSemaphoreCreateInfo> struct_chain_ptr;

    release_semaphore();

    /* Sanity checks */
    if (m_create_info_ptr->get_exportable_external_semaphore_handle_types() != Anvil::ExternalSemaphoreHandleTypeFlagBits::NONE)
    {
        if (!m_device_ptr->get_extension_info()->khr_external_semaphore() )
        {
            anvil_assert(m_device_ptr->get_extension_info()->khr_external_semaphore() );

            goto end;
        }
    }

    /* Spawn a new semaphore */
    {
        VkSemaphoreCreateInfo semaphore_create_info;

        semaphore_create_info.flags = 0;
        semaphore_create_info.pNext = nullptr;
        semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;

        struct_chainer.append_struct(semaphore_create_info);
    }

    if (m_create_info_ptr->get_exportable_external_semaphore_handle_types() != Anvil::ExternalSemaphoreHandleTypeFlagBits::NONE)
    {
        VkExportSemaphoreCreateInfo create_info;

        create_info.handleTypes = m_create_info_ptr->get_exportable_external_semaphore_handle_types().get_vk();
        create_info.pNext       = nullptr;
        create_info.sType       = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR;

        struct_chainer.append_struct(create_info);
    }

    #if defined(_WIN32)
    {
        const Anvil::ExternalNTHandleInfo* nt_handle_info_ptr = nullptr;

        if (m_create_info_ptr->get_exportable_nt_handle_info(&nt_handle_info_ptr) )
        {
            VkExportSemaphoreWin32HandleInfoKHR handle_info;

            anvil_assert( nt_handle_info_ptr                                                                                                                   != nullptr);
            anvil_assert(((m_create_info_ptr->get_exportable_external_semaphore_handle_types() & Anvil::ExternalSemaphoreHandleTypeFlagBits::OPAQUE_WIN32_BIT) != 0)       ||
                         ((m_create_info_ptr->get_exportable_external_semaphore_handle_types() & Anvil::ExternalSemaphoreHandleTypeFlagBits::D3D12_FENCE_BIT)  != 0));

            handle_info.dwAccess    = nt_handle_info_ptr->access;
            handle_info.name        = (nt_handle_info_ptr->name.size() > 0) ? &nt_handle_info_ptr->name.at(0)
                                                                            : nullptr;
            handle_info.pAttributes = nt_handle_info_ptr->attributes_ptr;
            handle_info.pNext       = nullptr;
            handle_info.sType       = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR;

            struct_chainer.append_struct(handle_info);
        }
    }
    #endif

    struct_chain_ptr = struct_chainer.create_chain();
    if (struct_chain_ptr == nullptr)
    {
        anvil_assert(struct_chain_ptr != nullptr);

        goto end;
    }

    result = Anvil::Vulkan::vkCreateSemaphore(m_device_ptr->get_device_vk(),
                                              struct_chain_ptr->get_root_struct(),
                                              nullptr, /* pAllocator */
                                             &m_semaphore);

    anvil_assert_vk_call_succeeded(result);
    if (is_vk_call_successful(result) )
    {
        set_vk_handle(m_semaphore);
    }

end:
    return is_vk_call_successful(result);
}
Ejemplo n.º 18
0
bool Anvil::Fence::init()
{
    VkFenceCreateInfo                              fence_create_info;
    VkResult                                       result           (VK_ERROR_INITIALIZATION_FAILED);
    Anvil::StructChainer<VkFenceCreateInfo>        struct_chainer;
    Anvil::StructChainUniquePtr<VkFenceCreateInfo> struct_chain_ptr;

    /* Sanity checks */
    if (m_create_info_ptr->get_exportable_external_fence_handle_types() != Anvil::EXTERNAL_FENCE_HANDLE_TYPE_NONE)
    {
        if (!m_device_ptr->get_extension_info()->khr_external_fence() )
        {
            anvil_assert(m_device_ptr->get_extension_info()->khr_external_fence() );

            goto end;
        }
    }

    /* Spawn a new fence */
    {
        fence_create_info.flags = (m_create_info_ptr->should_create_signalled() ) ? VK_FENCE_CREATE_SIGNALED_BIT
                                                                                  : 0u;
        fence_create_info.pNext = nullptr;
        fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;

        struct_chainer.append_struct(fence_create_info);
    }

    if (m_create_info_ptr->get_exportable_external_fence_handle_types() != Anvil::EXTERNAL_FENCE_HANDLE_TYPE_NONE)
    {
        VkExportFenceCreateInfo create_info;

        create_info.handleTypes = Anvil::Utils::convert_external_fence_handle_type_bits_to_vk_external_fence_handle_type_flags(m_create_info_ptr->get_exportable_external_fence_handle_types() );
        create_info.pNext       = nullptr;
        create_info.sType       = VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO_KHR;

        struct_chainer.append_struct(create_info);
    }

    #if defined(_WIN32)
    {
        const Anvil::ExternalNTHandleInfo* nt_handle_info_ptr = nullptr;

        if (m_create_info_ptr->get_exportable_nt_handle_info(&nt_handle_info_ptr) )
        {
            VkExportFenceWin32HandleInfoKHR handle_info;

            anvil_assert(nt_handle_info_ptr                                                                                                   != nullptr);
            anvil_assert(m_create_info_ptr->get_exportable_external_fence_handle_types() & Anvil::EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT);

            handle_info.dwAccess    = nt_handle_info_ptr->access;
            handle_info.name        = (nt_handle_info_ptr->name.size() > 0) ? &nt_handle_info_ptr->name.at(0)
                                                                            : nullptr;
            handle_info.pAttributes = nt_handle_info_ptr->attributes_ptr;
            handle_info.pNext       = nullptr;
            handle_info.sType       = VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR;

            struct_chainer.append_struct(handle_info);
        }
    }
    #endif

    struct_chain_ptr = struct_chainer.create_chain();
    if (struct_chain_ptr == nullptr)
    {
        anvil_assert(struct_chain_ptr != nullptr);

        goto end;
    }

    result = vkCreateFence(m_device_ptr->get_device_vk(),
                           struct_chain_ptr->get_root_struct(),
                           nullptr, /* pAllocator */
                          &m_fence);

    anvil_assert_vk_call_succeeded(result);
    if (is_vk_call_successful(result) )
    {
        set_vk_handle(m_fence);
    }

end:
    return is_vk_call_successful(result);
}
Ejemplo n.º 19
0
/* Please see header for specification */
bool Anvil::RenderPass::init()
{
    std::vector<VkAttachmentDescription> renderpass_attachments_vk;
    VkRenderPassCreateInfo               render_pass_create_info;
    bool                                 result                           (false);
    VkResult                             result_vk;
    std::vector<VkSubpassDependency>     subpass_dependencies_vk;
    std::vector<VkSubpassDescription>    subpass_descriptions_vk;

    /* NOTE: We need to reserve storage in advance for each of the vectors below,
     *       so that it is guaranteed the push_back() calls do not cause a realloc()
     *       and invalidate already cached pointers to filled Vulkan descriptors.
     *       To achieve this, we could encapsulate the code below in a two-iteration loop,
     *       whose first iteration would count how many elements we need for each vector,
     *       and the second one would reserve that space and proceed with inserting the elements.
     *
     *       That would look ugly though.
     *
     *       In order to keep things clean & simple, we instantiate the following structure on heap
     *       for each subpass. On subpass level, we can easily predict how many elements in the worst
     *       case scenario we're going to insert, so that will do the trick. Slight performance cost,
     *       but baking is an offline task, so we should be OK.
     **/
    typedef struct SubPassAttachmentSet
    {
        /** Constructor.
         *
         *  @param in_n_max_color_attachments    Maximum number of color attachments the subpass will define.
         *  @param in_n_max_input_attachments    Maximum number of input attachments the subpass will define.
         *  @param in_n_max_preserve_attachments Maximum number of preserve attachments the subpass will define.
         **/
        explicit SubPassAttachmentSet(uint32_t in_n_max_color_attachments,
                                      uint32_t in_n_max_input_attachments,
                                      uint32_t in_n_max_preserve_attachments)
            :n_max_color_attachments   (in_n_max_color_attachments),
             n_max_input_attachments   (in_n_max_input_attachments),
             n_max_preserve_attachments(in_n_max_preserve_attachments)
        {
            color_attachments_vk.reserve        (n_max_color_attachments);
            input_attachments_vk.reserve        (n_max_input_attachments);
            preserve_attachments_vk.reserve     (n_max_preserve_attachments);
            resolve_color_attachments_vk.reserve(n_max_color_attachments);
        }

        /** Helper function which verifies the maximum number of attachments specified at
         *  creation time is not exceeded.
         **/
        void do_sanity_checks()
        {
            anvil_assert(color_attachments_vk.size()         <= n_max_color_attachments);
            anvil_assert(input_attachments_vk.size()         <= n_max_input_attachments);
            anvil_assert(preserve_attachments_vk.size()      <= n_max_preserve_attachments);
            anvil_assert(resolve_color_attachments_vk.size() <= n_max_color_attachments);
        }

        std::vector<VkAttachmentReference> color_attachments_vk;
        VkAttachmentReference              depth_attachment_vk;
        std::vector<VkAttachmentReference> input_attachments_vk;
        std::vector<uint32_t>              preserve_attachments_vk;
        std::vector<VkAttachmentReference> resolve_color_attachments_vk;
    private:
        uint32_t n_max_color_attachments;
        uint32_t n_max_input_attachments;
        uint32_t n_max_preserve_attachments;
    } SubPassAttachmentSet;

    std::vector<std::unique_ptr<SubPassAttachmentSet> > subpass_attachment_sets;

    anvil_assert(m_render_pass == VK_NULL_HANDLE);

    /* Set up helper descriptor storage space */
    subpass_dependencies_vk.reserve(m_render_pass_create_info_ptr->m_subpass_dependencies.size() );
    subpass_descriptions_vk.reserve(m_render_pass_create_info_ptr->m_subpasses.size() );

    for (auto renderpass_attachment_iterator  = m_render_pass_create_info_ptr->m_attachments.cbegin();
              renderpass_attachment_iterator != m_render_pass_create_info_ptr->m_attachments.cend();
            ++renderpass_attachment_iterator)
    {
        VkAttachmentDescription attachment_vk;

        attachment_vk.finalLayout    = renderpass_attachment_iterator->final_layout;
        attachment_vk.flags          = (renderpass_attachment_iterator->may_alias) ? VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT
                                                                                   : 0u;
        attachment_vk.format         = renderpass_attachment_iterator->format;
        attachment_vk.initialLayout  = renderpass_attachment_iterator->initial_layout;
        attachment_vk.loadOp         = renderpass_attachment_iterator->color_depth_load_op;
        attachment_vk.samples        = static_cast<VkSampleCountFlagBits>(renderpass_attachment_iterator->sample_count);
        attachment_vk.stencilLoadOp  = renderpass_attachment_iterator->stencil_load_op;
        attachment_vk.stencilStoreOp = renderpass_attachment_iterator->stencil_store_op;
        attachment_vk.storeOp        = renderpass_attachment_iterator->color_depth_store_op;

        renderpass_attachments_vk.push_back(attachment_vk);
    }

    for (auto subpass_dependency_iterator  = m_render_pass_create_info_ptr->m_subpass_dependencies.cbegin();
              subpass_dependency_iterator != m_render_pass_create_info_ptr->m_subpass_dependencies.cend();
            ++subpass_dependency_iterator)
    {
        VkSubpassDependency dependency_vk;

        dependency_vk.dependencyFlags = ((subpass_dependency_iterator->by_region) ? VK_DEPENDENCY_BY_REGION_BIT : 0u);
        dependency_vk.dstAccessMask   = subpass_dependency_iterator->destination_access_mask;
        dependency_vk.dstStageMask    = subpass_dependency_iterator->destination_stage_mask;
        dependency_vk.dstSubpass      = (subpass_dependency_iterator->destination_subpass_ptr != nullptr) ? subpass_dependency_iterator->destination_subpass_ptr->index
                                                                                                         : VK_SUBPASS_EXTERNAL;
        dependency_vk.srcAccessMask   = subpass_dependency_iterator->source_access_mask;
        dependency_vk.srcStageMask    = subpass_dependency_iterator->source_stage_mask;
        dependency_vk.srcSubpass      = (subpass_dependency_iterator->source_subpass_ptr != nullptr) ? subpass_dependency_iterator->source_subpass_ptr->index
                                                                                                    : VK_SUBPASS_EXTERNAL;

        subpass_dependencies_vk.push_back(dependency_vk);
    }

    /* We now have all the data needed to create Vulkan subpass instances. */
    for (auto subpass_iterator  = m_render_pass_create_info_ptr->m_subpasses.cbegin();
              subpass_iterator != m_render_pass_create_info_ptr->m_subpasses.cend();
            ++subpass_iterator)
    {
        std::unique_ptr<SubPassAttachmentSet> current_subpass_attachment_set_ptr;
        uint32_t                              highest_subpass_color_attachment_location = UINT32_MAX;
        uint32_t                              highest_subpass_input_attachment_index    = UINT32_MAX;
        bool                                  need_color_resolve_attachments            = false;
        VkSubpassDescription                  subpass_vk;
        VkAttachmentReference                 unused_reference;

        unused_reference.attachment = VK_ATTACHMENT_UNUSED;
        unused_reference.layout     = VK_IMAGE_LAYOUT_UNDEFINED;

        /* Determine whether any of the color attachments are going to be resolved. */
        for (auto subpass_color_attachment_iterator  = (*subpass_iterator)->color_attachments_map.cbegin();
                  subpass_color_attachment_iterator != (*subpass_iterator)->color_attachments_map.cend();
                ++subpass_color_attachment_iterator)
        {
            if (subpass_color_attachment_iterator->second.resolve_attachment_index != UINT32_MAX)
            {
                need_color_resolve_attachments = true;

                break;
            }
        }

        /* Determine the highest color attachment location & input attachment index. */
        for (auto subpass_color_attachment_iterator  = (*subpass_iterator)->color_attachments_map.cbegin();
                  subpass_color_attachment_iterator != (*subpass_iterator)->color_attachments_map.cend();
                ++subpass_color_attachment_iterator)
        {
            if (highest_subpass_color_attachment_location == UINT32_MAX                                ||
                subpass_color_attachment_iterator->first  > highest_subpass_color_attachment_location)
            {
                highest_subpass_color_attachment_location = subpass_color_attachment_iterator->first;
            }
        }

        for (auto subpass_input_attachment_iterator  = (*subpass_iterator)->input_attachments_map.cbegin();
                  subpass_input_attachment_iterator != (*subpass_iterator)->input_attachments_map.cend();
                ++subpass_input_attachment_iterator)
        {
            if (highest_subpass_input_attachment_index   == UINT32_MAX                               ||
                subpass_input_attachment_iterator->first >  highest_subpass_input_attachment_index)
            {
                highest_subpass_input_attachment_index = subpass_input_attachment_iterator->first;
            }
        }

        /* Instantiate a new subpass attachment set for current subpass */
        current_subpass_attachment_set_ptr.reset(
            new SubPassAttachmentSet(highest_subpass_color_attachment_location + 1,                             /* n_max_color_attachments     */
                                     static_cast<uint32_t>((*subpass_iterator)->input_attachments_map.size() ), /* n_max_input_attachments     */
                                     static_cast<uint32_t>((*subpass_iterator)->preserved_attachments.size() )  /* n_max_preserved_attachments */)
        );

        /* Prepare unused VK color, depth, input & resolve attachment descriptors */
        for (uint32_t n_color_attachment = 0;
                      n_color_attachment < static_cast<uint32_t>(highest_subpass_color_attachment_location + 1);
                    ++n_color_attachment)
        {
            current_subpass_attachment_set_ptr->color_attachments_vk.push_back(unused_reference);

            if (need_color_resolve_attachments)
            {
                current_subpass_attachment_set_ptr->resolve_color_attachments_vk.push_back(unused_reference);
            }
        }

        for (uint32_t n_input_attachment = 0;
                      n_input_attachment < static_cast<uint32_t>(highest_subpass_input_attachment_index + 1);
                    ++n_input_attachment)
        {
            current_subpass_attachment_set_ptr->input_attachments_vk.push_back(unused_reference);
        }

        /* Update those of the color/depth/input references, for which we have been provided actual descriptors */
        for (auto subpass_color_attachment_iterator  = (*subpass_iterator)->color_attachments_map.cbegin();
                  subpass_color_attachment_iterator != (*subpass_iterator)->color_attachments_map.cend();
                ++subpass_color_attachment_iterator)
        {
            current_subpass_attachment_set_ptr->color_attachments_vk[subpass_color_attachment_iterator->first] = m_render_pass_create_info_ptr->get_attachment_reference_from_subpass_attachment(subpass_color_attachment_iterator->second);

            if (need_color_resolve_attachments)
            {
                if (subpass_color_attachment_iterator->second.resolve_attachment_index != UINT32_MAX)
                {
                    current_subpass_attachment_set_ptr->resolve_color_attachments_vk[subpass_color_attachment_iterator->first] = m_render_pass_create_info_ptr->get_attachment_reference_for_resolve_attachment(subpass_iterator,
                                                                                                                                                                                                                subpass_color_attachment_iterator);
                }
            }
        }

        if ((*subpass_iterator)->depth_stencil_attachment.attachment_index != UINT32_MAX)
        {
            current_subpass_attachment_set_ptr->depth_attachment_vk = m_render_pass_create_info_ptr->get_attachment_reference_from_subpass_attachment((*subpass_iterator)->depth_stencil_attachment);
        }
        else
        {
            current_subpass_attachment_set_ptr->depth_attachment_vk = unused_reference;
        }

        for (auto subpass_input_attachment_iterator  = (*subpass_iterator)->input_attachments_map.cbegin();
                  subpass_input_attachment_iterator != (*subpass_iterator)->input_attachments_map.cend();
                ++subpass_input_attachment_iterator)
        {
            current_subpass_attachment_set_ptr->input_attachments_vk[subpass_input_attachment_iterator->first] = m_render_pass_create_info_ptr->get_attachment_reference_from_subpass_attachment(subpass_input_attachment_iterator->second);
        }

        /* Fill the preserved attachments vector. These do not use indices or locations, so the process is much simpler */
        for (auto subpass_preserve_attachment_iterator  = (*subpass_iterator)->preserved_attachments.cbegin();
                  subpass_preserve_attachment_iterator != (*subpass_iterator)->preserved_attachments.cend();
                ++subpass_preserve_attachment_iterator)
        {
            current_subpass_attachment_set_ptr->preserve_attachments_vk.push_back(
                m_render_pass_create_info_ptr->m_attachments.at(subpass_preserve_attachment_iterator->attachment_index).index
            );
        }

        /* Prepare the VK subpass descriptor */
        const uint32_t n_color_attachments     = highest_subpass_color_attachment_location + 1;
        const uint32_t n_input_attachments     = highest_subpass_input_attachment_index    + 1;
        const uint32_t n_preserved_attachments = static_cast<uint32_t>((*subpass_iterator)->preserved_attachments.size() );
        const uint32_t n_resolved_attachments  = ((*subpass_iterator)->resolved_attachments_map.size() == 0) ? 0
                                                                                                             : n_color_attachments;

        subpass_vk.colorAttachmentCount              = n_color_attachments;
        subpass_vk.flags                             = 0;
        subpass_vk.inputAttachmentCount              = n_input_attachments;
        subpass_vk.pColorAttachments                 = (n_color_attachments > 0)                                                      ? &current_subpass_attachment_set_ptr->color_attachments_vk.at(0)
                                                                                                                                      : nullptr;
        subpass_vk.pDepthStencilAttachment           = ((*subpass_iterator)->depth_stencil_attachment.attachment_index != UINT32_MAX) ? &current_subpass_attachment_set_ptr->depth_attachment_vk
                                                                                                                                      : nullptr;
        subpass_vk.pInputAttachments                 = (n_input_attachments > 0)                                                      ? &current_subpass_attachment_set_ptr->input_attachments_vk.at(0)
                                                                                                                                      : nullptr;
        subpass_vk.pipelineBindPoint                 = VK_PIPELINE_BIND_POINT_GRAPHICS;
        subpass_vk.pPreserveAttachments              = (n_preserved_attachments > 0) ? &current_subpass_attachment_set_ptr->preserve_attachments_vk.at(0)
                                                                                     : nullptr;
        subpass_vk.preserveAttachmentCount           = n_preserved_attachments;
        subpass_vk.pResolveAttachments               = (n_resolved_attachments > 0) ? &current_subpass_attachment_set_ptr->resolve_color_attachments_vk.at(0)
                                                                                    : nullptr;

        current_subpass_attachment_set_ptr->do_sanity_checks();

        subpass_attachment_sets.push_back(
            std::move(current_subpass_attachment_set_ptr)
        );

        subpass_descriptions_vk.push_back(subpass_vk);
    }

    /* Set up a create info descriptor and spawn a new Vulkan RenderPass object. */
    render_pass_create_info.attachmentCount = static_cast<uint32_t>(m_render_pass_create_info_ptr->m_attachments.size         () );
    render_pass_create_info.dependencyCount = static_cast<uint32_t>(m_render_pass_create_info_ptr->m_subpass_dependencies.size() );
    render_pass_create_info.subpassCount    = static_cast<uint32_t>(m_render_pass_create_info_ptr->m_subpasses.size           () );
    render_pass_create_info.flags           = 0;
    render_pass_create_info.pAttachments    = (render_pass_create_info.attachmentCount > 0) ? &renderpass_attachments_vk.at(0)
                                                                                            : nullptr;
    render_pass_create_info.pDependencies   = (render_pass_create_info.dependencyCount > 0) ? &subpass_dependencies_vk.at(0)
                                                                                            : nullptr;
    render_pass_create_info.pNext           = nullptr;
    render_pass_create_info.pSubpasses      = (render_pass_create_info.subpassCount > 0) ? &subpass_descriptions_vk.at(0)
                                                                                         : nullptr;
    render_pass_create_info.sType           = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;

    result_vk = vkCreateRenderPass(m_device_ptr->get_device_vk(),
                                  &render_pass_create_info,
                                   nullptr, /* pAllocator */
                                  &m_render_pass);

    if (!is_vk_call_successful(result_vk) )
    {
        anvil_assert_vk_call_succeeded(result_vk);

        goto end;
    }

    set_vk_handle(m_render_pass);

    result  = true;

end:
    return result;
}
/* Please see header for specification */
void Anvil::RenderingSurface::cache_surface_properties()
{
    const Anvil::DeviceType&             device_type                   (m_device_ptr->get_type() );
    bool                                 is_offscreen_rendering_enabled(true);
    auto                                 khr_surface_entrypoints       (m_create_info_ptr->get_instance_ptr()->get_extension_khr_surface_entrypoints() );
    const Anvil::MGPUDevice*             mgpu_device_ptr               (dynamic_cast<const Anvil::MGPUDevice*>(m_device_ptr));
    uint32_t                             n_physical_devices            (0);
    const Anvil::SGPUDevice*             sgpu_device_ptr               (dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr));
    std::vector<Anvil::SurfaceFormatKHR> supported_formats;
    auto                                 window_ptr                    (m_create_info_ptr->get_window_ptr() );

    if (window_ptr != nullptr)
    {
        const WindowPlatform window_platform(window_ptr->get_platform() );

        is_offscreen_rendering_enabled = (window_platform == WINDOW_PLATFORM_DUMMY                     ||
                                          window_platform == WINDOW_PLATFORM_DUMMY_WITH_PNG_SNAPSHOTS);

        if (is_offscreen_rendering_enabled)
        {
            m_height = window_ptr->get_height_at_creation_time();
            m_width  = window_ptr->get_width_at_creation_time ();
        }
        else
        {
            /* In this case, width & height may change at run-time */
        }
    }
    else
    {
        /* In this case, width & height may change at run-time */
    }

    switch (device_type)
    {
        case Anvil::DeviceType::MULTI_GPU:  n_physical_devices = mgpu_device_ptr->get_n_physical_devices(); break;
        case Anvil::DeviceType::SINGLE_GPU: n_physical_devices = 1;                                         break;

        default:
        {
            anvil_assert_fail();
        }
    }

    /* Retrieve general properties */
    uint32_t n_supported_formats           (0);
    uint32_t n_supported_presentation_modes(0);
    VkResult result                        (VK_ERROR_INITIALIZATION_FAILED);

    ANVIL_REDUNDANT_VARIABLE(result);

    for (uint32_t n_physical_device = 0;
                  n_physical_device < n_physical_devices;
                ++n_physical_device)
    {
        const Anvil::PhysicalDevice* physical_device_ptr = nullptr;

        switch (device_type)
        {
            case Anvil::DeviceType::MULTI_GPU:  physical_device_ptr = mgpu_device_ptr->get_physical_device(n_physical_device); break;
            case Anvil::DeviceType::SINGLE_GPU: physical_device_ptr = sgpu_device_ptr->get_physical_device();                  break;

            default:
            {
                anvil_assert_fail();
            }
        }

        auto& result_caps = m_physical_device_capabilities[physical_device_ptr->get_device_group_device_index()];

        if (m_surface == VK_NULL_HANDLE)
        {
            result_caps.supported_composite_alpha_flags = Anvil::CompositeAlphaFlagBits::INHERIT_BIT_KHR;
            result_caps.supported_transformations       = Anvil::SurfaceTransformFlagBits::INHERIT_BIT_KHR;
            result_caps.supported_usages                = static_cast<Anvil::ImageUsageFlags> (Anvil::ImageUsageFlagBits::COLOR_ATTACHMENT_BIT |
                                                                                               Anvil::ImageUsageFlagBits::TRANSFER_SRC_BIT     |
                                                                                               Anvil::ImageUsageFlagBits::TRANSFER_DST_BIT     |
                                                                                               Anvil::ImageUsageFlagBits::STORAGE_BIT);

            result_caps.supported_presentation_modes.push_back(Anvil::PresentModeKHR::IMMEDIATE_KHR);

            continue;
        }

        const VkPhysicalDevice physical_device_vk = physical_device_ptr->get_physical_device();

        result = khr_surface_entrypoints.vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physical_device_vk,
                                                                                   m_surface,
                                                                                   reinterpret_cast<VkSurfaceCapabilitiesKHR*>(&result_caps.capabilities) );

        anvil_assert_vk_call_succeeded(result);

        if (n_physical_device == 0)
        {
            m_height = result_caps.capabilities.current_extent.height;
            m_width  = result_caps.capabilities.current_extent.width;
        }
        else
        {
            anvil_assert(m_height == result_caps.capabilities.current_extent.height);
            anvil_assert(m_width  == result_caps.capabilities.current_extent.width);
        }

        result_caps.supported_composite_alpha_flags = result_caps.capabilities.supported_composite_alpha;
        result_caps.supported_transformations       = result_caps.capabilities.supported_transforms;
        result_caps.supported_usages                = result_caps.capabilities.supported_usage_flags;

        /* Retrieve a list of formats supported by the surface */
        result = khr_surface_entrypoints.vkGetPhysicalDeviceSurfaceFormatsKHR(physical_device_vk,
                                                                              m_surface,
                                                                             &n_supported_formats,
                                                                              nullptr /* pSurfaceFormats */);

        anvil_assert                  (n_supported_formats >  0);
        anvil_assert_vk_call_succeeded(result);

        supported_formats.resize(n_supported_formats);

        result = khr_surface_entrypoints.vkGetPhysicalDeviceSurfaceFormatsKHR(physical_device_vk,
                                                                              m_surface,
                                                                             &n_supported_formats,
                                                                              reinterpret_cast<VkSurfaceFormatKHR*>(&supported_formats.at(0) ));
        anvil_assert_vk_call_succeeded(result);

        for (unsigned int n_format = 0;
                          n_format < n_supported_formats;
                        ++n_format)
        {
            result_caps.supported_formats.push_back(RenderingSurfaceFormat(supported_formats[n_format]) );
        }

        /* Retrieve a list of supported presentation modes
         *
         * NOTE: In case of mGPU devices, n_supported_presentation_modes may actually be 0 here for slave devices.
         */
        result = khr_surface_entrypoints.vkGetPhysicalDeviceSurfacePresentModesKHR(physical_device_vk,
                                                                                   m_surface,
                                                                                  &n_supported_presentation_modes,
                                                                                   nullptr /* pPresentModes */);

        anvil_assert_vk_call_succeeded(result);

        if (n_supported_presentation_modes > 0)
        {
            std::vector<VkPresentModeKHR> temp_storage(n_supported_presentation_modes);

            result_caps.supported_presentation_modes.resize(n_supported_presentation_modes);

            result = khr_surface_entrypoints.vkGetPhysicalDeviceSurfacePresentModesKHR(physical_device_vk,
                                                                                       m_surface,
                                                                                      &n_supported_presentation_modes,
                                                                                      &temp_storage.at(0) );
            anvil_assert_vk_call_succeeded(result);

            for (uint32_t n_presentation_mode = 0;
                          n_presentation_mode < static_cast<uint32_t>(temp_storage.size() );
                        ++n_presentation_mode)
            {
                result_caps.supported_presentation_modes.at(n_presentation_mode) = static_cast<Anvil::PresentModeKHR>(temp_storage.at(n_presentation_mode) );
            }
        }
    }
}
void Anvil::RenderingSurface::update_surface_extents() const
{
    const Anvil::DeviceType& device_type                   (m_device_ptr->get_type                             () );
    auto                     instance_ptr                  (m_create_info_ptr->get_instance_ptr                () );
    auto                     khr_surface_entrypoints       (instance_ptr->get_extension_khr_surface_entrypoints() );
    const Anvil::MGPUDevice* mgpu_device_ptr               (dynamic_cast<const Anvil::MGPUDevice*>             (m_device_ptr));
    uint32_t                 n_physical_devices            (0);
    const Anvil::SGPUDevice* sgpu_device_ptr               (dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr));
    auto                     window_ptr                    (m_create_info_ptr->get_window_ptr     () );

    if (window_ptr != nullptr)
    {
        const WindowPlatform window_platform(window_ptr->get_platform() );

        if (window_platform == WINDOW_PLATFORM_DUMMY                     ||
            window_platform == WINDOW_PLATFORM_DUMMY_WITH_PNG_SNAPSHOTS)
        {
            /* Nothing to update - off-screen rendering is active. */
            goto end;
        }
        else
        {
            /* In this case, width & height may change at run-time */
        }
    }
    else
    {
        /* In this case, width & height may change at run-time */
    }

    switch (device_type)
    {
        case Anvil::DeviceType::MULTI_GPU:  n_physical_devices = mgpu_device_ptr->get_n_physical_devices(); break;
        case Anvil::DeviceType::SINGLE_GPU: n_physical_devices = 1;                                         break;

        default:
        {
            anvil_assert_fail();
        }
    }

    /* Retrieve general properties */
    for (uint32_t n_physical_device = 0;
                  n_physical_device < n_physical_devices;
                ++n_physical_device)
    {
        const Anvil::PhysicalDevice* physical_device_ptr = nullptr;
        VkResult                     result_vk;
        Anvil::SurfaceCapabilities   surface_caps;

        ANVIL_REDUNDANT_VARIABLE_CONST(result_vk);

        switch (device_type)
        {
            case Anvil::DeviceType::MULTI_GPU:  physical_device_ptr = mgpu_device_ptr->get_physical_device(n_physical_device); break;
            case Anvil::DeviceType::SINGLE_GPU: physical_device_ptr = sgpu_device_ptr->get_physical_device();                  break;

            default:
            {
                anvil_assert_fail();
            }
        }

        if (m_surface == VK_NULL_HANDLE)
        {
            /* Nothing to update */
            goto end;
        }

        const VkPhysicalDevice physical_device_vk = physical_device_ptr->get_physical_device();

        result_vk = khr_surface_entrypoints.vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physical_device_vk,
                                                                                      m_surface,
                                                                                      reinterpret_cast<VkSurfaceCapabilitiesKHR*>(&surface_caps) );

        anvil_assert_vk_call_succeeded(result_vk);

        if (n_physical_device == 0)
        {
            m_height = surface_caps.current_extent.height;
            m_width  = surface_caps.current_extent.width;
        }
        else
        {
            anvil_assert(m_height == surface_caps.current_extent.height);
            anvil_assert(m_width  == surface_caps.current_extent.width);
        }
    }

end:
    ;
}
/* Please see header for specification */
bool Anvil::RenderingSurface::init()
{
    const Anvil::DeviceType& device_type       (m_device_ptr->get_type() );
    bool                     init_successful   (false);
    auto                     instance_ptr      (m_create_info_ptr->get_instance_ptr() );
    uint32_t                 n_physical_devices(0);
    VkResult                 result            (VK_SUCCESS);
    const WindowPlatform     window_platform   (m_create_info_ptr->get_window_ptr()->get_platform());

    const bool               is_dummy_window_platform(window_platform == WINDOW_PLATFORM_DUMMY                     ||
                                                      window_platform == WINDOW_PLATFORM_DUMMY_WITH_PNG_SNAPSHOTS);


    switch (device_type)
    {
        case Anvil::DeviceType::MULTI_GPU:
        {
            const Anvil::MGPUDevice* mgpu_device_ptr(dynamic_cast<const Anvil::MGPUDevice*>(m_device_ptr) );

            n_physical_devices = mgpu_device_ptr->get_n_physical_devices();

            break;
        }

        case Anvil::DeviceType::SINGLE_GPU:
        {
            n_physical_devices = 1;

            break;
        }

        default:
        {
            anvil_assert_fail();

            goto end;
        }
    }


    if (!is_dummy_window_platform)
    {
        auto window_ptr = m_create_info_ptr->get_window_ptr();

        #if defined(ANVIL_INCLUDE_WIN3264_WINDOW_SYSTEM_SUPPORT) && defined(_WIN32)
        {
            VkWin32SurfaceCreateInfoKHR surface_create_info;

            surface_create_info.flags     = 0;
            surface_create_info.hinstance = GetModuleHandle(nullptr);
            surface_create_info.hwnd      = window_ptr->get_handle();
            surface_create_info.pNext     = nullptr;
            surface_create_info.sType     = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR;

            result = instance_ptr->get_extension_khr_win32_surface_entrypoints().vkCreateWin32SurfaceKHR(instance_ptr->get_instance_vk(),
                                                                                                        &surface_create_info,
                                                                                                         nullptr, /* pAllocator */
                                                                                                        &m_surface);
        }
        #endif
        #if defined(ANVIL_INCLUDE_XCB_WINDOW_SYSTEM_SUPPORT) && !defined(_WIN32)
        {
            VkXcbSurfaceCreateInfoKHR surface_create_info;

            surface_create_info.flags       = 0;
            surface_create_info.window      = window_ptr->get_handle();
            surface_create_info.connection  = static_cast<xcb_connection_t*>(window_ptr->get_connection());
            surface_create_info.pNext       = nullptr;
            surface_create_info.sType       = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR;

            result = instance_ptr->get_extension_khr_xcb_surface_entrypoints().vkCreateXcbSurfaceKHR(instance_ptr->get_instance_vk(),
                                                                                                    &surface_create_info,
                                                                                                     nullptr, /* pAllocator */
                                                                                                    &m_surface);
            }
        #endif

        anvil_assert_vk_call_succeeded(result);
        if (is_vk_call_successful(result) )
        {
            set_vk_handle(m_surface);
        }
    }
    else
    {
        anvil_assert(window_platform != WINDOW_PLATFORM_UNKNOWN);
    }

    if (is_dummy_window_platform == false)
    {
        /* Is there at least one queue fam that can be used together with at least one physical device associated with
         * the logical device to present using the surface we've just spawned and the physical device user has specified? */
        const auto& queue_families(m_device_ptr->get_physical_device_queue_families() );

        for (uint32_t n_physical_device = 0;
                      n_physical_device < n_physical_devices;
                    ++n_physical_device)
        {
            Anvil::RenderingSurface::PhysicalDeviceCapabilities* physical_device_caps_ptr = nullptr;
            const Anvil::PhysicalDevice*                         physical_device_ptr      = nullptr;

            switch (device_type)
            {
                case Anvil::DeviceType::MULTI_GPU:
                {
                    const Anvil::MGPUDevice* mgpu_device_ptr(dynamic_cast<const Anvil::MGPUDevice*>(m_device_ptr) );

                    physical_device_ptr      = mgpu_device_ptr->get_physical_device(n_physical_device);
                    physical_device_caps_ptr = &m_physical_device_capabilities[physical_device_ptr->get_device_group_device_index()];

                    break;
                }

                case Anvil::DeviceType::SINGLE_GPU:
                {
                    const Anvil::SGPUDevice* sgpu_device_ptr(dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr) );

                    physical_device_ptr      = sgpu_device_ptr->get_physical_device();
                    physical_device_caps_ptr = &m_physical_device_capabilities[physical_device_ptr->get_device_group_device_index()];

                    break;
                }

                default:
                {
                    anvil_assert_fail();

                    goto end;
                }
            }

            for (uint32_t n_queue_family = 0;
                          n_queue_family < static_cast<uint32_t>(queue_families.size() );
                        ++n_queue_family)
            {
                VkBool32 is_presentation_supported = VK_FALSE;

                {
                    const auto& khr_surface_entrypoints = instance_ptr->get_extension_khr_surface_entrypoints();

                    result = khr_surface_entrypoints.vkGetPhysicalDeviceSurfaceSupportKHR(physical_device_ptr->get_physical_device(),
                                                                                          n_queue_family,
                                                                                          m_surface,
                                                                                         &is_presentation_supported);
                }

                if (is_vk_call_successful(result)         &&
                    is_presentation_supported == VK_TRUE)
                {
                    physical_device_caps_ptr->present_capable_queue_fams.push_back(n_queue_family);
                }
            }
        }
    }
    else
    {
        /* offscreen rendering. Any physical device that offers universal queue can be used to "present" */
        for (uint32_t n_physical_device = 0;
                      n_physical_device < n_physical_devices;
                    ++n_physical_device)
        {
            switch (device_type)
            {
                case Anvil::DeviceType::MULTI_GPU:
                {
                    const Anvil::MGPUDevice* mgpu_device_ptr(dynamic_cast<const Anvil::MGPUDevice*>(m_device_ptr) );

                    if (mgpu_device_ptr->get_n_universal_queues() > 0)
                    {
                        const Anvil::PhysicalDevice* physical_device_ptr = mgpu_device_ptr->get_physical_device(n_physical_device);
                        auto&                        result_caps         = m_physical_device_capabilities[physical_device_ptr->get_device_group_device_index()];

                        result_caps.present_capable_queue_fams.push_back(mgpu_device_ptr->get_universal_queue(0)->get_queue_family_index() );
                    }

                    break;
                }

                case Anvil::DeviceType::SINGLE_GPU:
                {
                    const Anvil::SGPUDevice* sgpu_device_ptr(dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr) );

                    if (sgpu_device_ptr->get_n_universal_queues() > 0)
                    {
                        const Anvil::PhysicalDevice* physical_device_ptr = sgpu_device_ptr->get_physical_device();
                        auto&                        result_caps         = m_physical_device_capabilities[physical_device_ptr->get_device_group_device_index()];

                        result_caps.present_capable_queue_fams.push_back(sgpu_device_ptr->get_universal_queue(0)->get_queue_family_index() );
                    }

                    break;
                }

                default:
                {
                    anvil_assert_fail();

                    goto end;
                }
            }
        }

        result = VK_SUCCESS;
    }

    if (!is_vk_call_successful(result) )
    {
        anvil_assert_vk_call_succeeded(result);

        init_successful = false;
    }
    else
    {
        /* Retrieve Vulkan object capabilities and cache them */
        cache_surface_properties();

        init_successful = true;
    }

end:
    return init_successful;
}
Ejemplo n.º 23
0
/** Initializes the swapchain object. */
bool Anvil::Swapchain::init()
{
    uint32_t                                              n_swapchain_images             = 0;
    auto                                                  parent_surface_ptr             = m_create_info_ptr->get_rendering_surface();
    VkResult                                              result                         = VK_ERROR_INITIALIZATION_FAILED;
    Anvil::StructChainUniquePtr<VkSwapchainCreateInfoKHR> struct_chain_ptr;
    std::vector<VkImage>                                  swapchain_images;
    const VkSurfaceTransformFlagBitsKHR                   swapchain_transformation       = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
    const WindowPlatform                                  window_platform                = m_create_info_ptr->get_window()->get_platform();
    const bool                                            is_offscreen_rendering_enabled = (window_platform   == WINDOW_PLATFORM_DUMMY                     ||
                                                                                            window_platform   == WINDOW_PLATFORM_DUMMY_WITH_PNG_SNAPSHOTS);

    m_size.width  = parent_surface_ptr->get_width ();
    m_size.height = parent_surface_ptr->get_height();

    /* not doing offscreen rendering */
    if (!is_offscreen_rendering_enabled)
    {
        const auto&                                    khr_swapchain_entrypoints = m_device_ptr->get_extension_khr_swapchain_entrypoints();
        Anvil::StructChainer<VkSwapchainCreateInfoKHR> struct_chainer;

        #ifdef _DEBUG
        {
            const Anvil::SGPUDevice* sgpu_device_ptr(dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr) );

            const Anvil::DeviceType    device_type                     = m_device_ptr->get_type();
            uint32_t                   n_physical_devices              = 0;
            bool                       result_bool                     = false;
            const char*                required_surface_extension_name = nullptr;
            VkSurfaceCapabilitiesKHR   surface_caps;
            VkCompositeAlphaFlagsKHR   supported_composite_alpha_flags = static_cast<VkCompositeAlphaFlagsKHR>(0);
            VkSurfaceTransformFlagsKHR supported_surface_transform_flags;

            #ifdef _WIN32
                #if defined(ANVIL_INCLUDE_WIN3264_WINDOW_SYSTEM_SUPPORT)
                    required_surface_extension_name = VK_KHR_WIN32_SURFACE_EXTENSION_NAME;
                #endif
            #else
                #if defined(ANVIL_INCLUDE_XCB_WINDOW_SYSTEM_SUPPORT)
                    required_surface_extension_name = VK_KHR_XCB_SURFACE_EXTENSION_NAME;
                #endif
            #endif

            anvil_assert(required_surface_extension_name == nullptr                                                            ||
                         m_device_ptr->get_parent_instance()->is_instance_extension_supported(required_surface_extension_name) );

            switch (device_type)
            {
                case Anvil::DEVICE_TYPE_SINGLE_GPU: n_physical_devices = 1; break;

                default:
                {
                    anvil_assert_fail();
                }
            }

            for (uint32_t n_physical_device = 0;
                          n_physical_device < n_physical_devices;
                        ++n_physical_device)
            {
                const Anvil::PhysicalDevice* current_physical_device_ptr = nullptr;

                switch (device_type)
                {
                    case Anvil::DEVICE_TYPE_SINGLE_GPU: current_physical_device_ptr = sgpu_device_ptr->get_physical_device(); break;

                    default:
                    {
                        anvil_assert_fail();
                    }
                }

                /* Ensure opaque composite alpha mode is supported */
                anvil_assert(parent_surface_ptr->get_supported_composite_alpha_flags(&supported_composite_alpha_flags) );

                anvil_assert(supported_composite_alpha_flags & VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR);

                /* Ensure we can use the swapchain image format  */
                anvil_assert(parent_surface_ptr->is_compatible_with_image_format(m_create_info_ptr->get_format(),
                                                                                &result_bool) );
                anvil_assert(result_bool);

                /* Ensure the transformation we're about to request is supported by the rendering surface */
                anvil_assert(parent_surface_ptr->get_supported_transformations(&supported_surface_transform_flags) );

                anvil_assert(supported_surface_transform_flags & swapchain_transformation);

                /* Ensure the requested number of swapchain images is reasonable*/
                anvil_assert(parent_surface_ptr->get_capabilities(&surface_caps) );

                anvil_assert(surface_caps.maxImageCount == 0                                 ||
                             surface_caps.maxImageCount >= m_create_info_ptr->get_n_images() );
            }
        }
        #endif

        {
            VkSwapchainCreateInfoKHR create_info;

            create_info.clipped               = true; /* we won't be reading from the presentable images */
            create_info.compositeAlpha        = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
            create_info.flags                 = m_create_info_ptr->get_flags();
            create_info.imageArrayLayers      = 1;
            create_info.imageColorSpace       = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
            create_info.imageExtent.height    = parent_surface_ptr->get_height();
            create_info.imageExtent.width     = parent_surface_ptr->get_width ();
            create_info.imageFormat           = m_create_info_ptr->get_format ();
            create_info.imageSharingMode      = VK_SHARING_MODE_EXCLUSIVE;
            create_info.imageUsage            = m_create_info_ptr->get_usage_flags();
            create_info.minImageCount         = m_create_info_ptr->get_n_images   ();
            create_info.oldSwapchain          = VK_NULL_HANDLE;
            create_info.pNext                 = nullptr;
            create_info.pQueueFamilyIndices   = nullptr;
            create_info.presentMode           = m_create_info_ptr->get_present_mode();
            create_info.preTransform          = swapchain_transformation;
            create_info.queueFamilyIndexCount = 0;
            create_info.sType                 = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
            create_info.surface               = parent_surface_ptr->get_surface();

            struct_chainer.append_struct(create_info);
        }

        struct_chain_ptr = struct_chainer.create_chain();

        parent_surface_ptr->lock();
        {
            result = khr_swapchain_entrypoints.vkCreateSwapchainKHR(m_device_ptr->get_device_vk(),
                                                                    struct_chain_ptr->get_root_struct(),
                                                                    nullptr, /* pAllocator */
                                                                   &m_swapchain);
        }
        parent_surface_ptr->unlock();

        anvil_assert_vk_call_succeeded(result);
        if (is_vk_call_successful(result) )
        {
            set_vk_handle(m_swapchain);
        }

        /* Retrieve swap-chain images */
        result = khr_swapchain_entrypoints.vkGetSwapchainImagesKHR(m_device_ptr->get_device_vk(),
                                                                   m_swapchain,
                                                                  &n_swapchain_images,
                                                                   nullptr); /* pSwapchainImages */

        anvil_assert_vk_call_succeeded(result);
        anvil_assert                  (n_swapchain_images >  0);

        swapchain_images.resize(n_swapchain_images);

        result = khr_swapchain_entrypoints.vkGetSwapchainImagesKHR(m_device_ptr->get_device_vk(),
                                                                   m_swapchain,
                                                                  &n_swapchain_images,
                                                                  &swapchain_images[0]);

        anvil_assert_vk_call_succeeded(result);
    }
    else /* offscreen rendering */
    {
        m_create_info_ptr->set_usage_flags(m_create_info_ptr->get_usage_flags() | VK_IMAGE_USAGE_TRANSFER_SRC_BIT);

        n_swapchain_images = m_create_info_ptr->get_n_images();
    }

    for (uint32_t n_result_image = 0;
                  n_result_image < n_swapchain_images;
                ++n_result_image)
    {
        /* Spawn an Image wrapper class for the swap-chain image. */
        if (!is_offscreen_rendering_enabled)
        {
            auto create_info_ptr = Anvil::ImageCreateInfo::create_swapchain_wrapper(m_device_ptr,
                                                                                    this,
                                                                                    swapchain_images[n_result_image],
                                                                                    n_result_image);

            create_info_ptr->set_mt_safety(Anvil::Utils::convert_boolean_to_mt_safety_enum(is_mt_safe() ) );

            m_image_ptrs[n_result_image] = Anvil::Image::create(std::move(create_info_ptr) );
        }
        else
        {
            auto create_info_ptr = Anvil::ImageCreateInfo::create_nonsparse_alloc(m_device_ptr,
                                                                                  VK_IMAGE_TYPE_2D,
                                                                                  m_create_info_ptr->get_format(),
                                                                                  VK_IMAGE_TILING_OPTIMAL,
                                                                                  m_create_info_ptr->get_usage_flags(),
                                                                                  m_size.width,
                                                                                  m_size.height,
                                                                                  1, /* base_mipmap_depth */
                                                                                  1,
                                                                                  VK_SAMPLE_COUNT_1_BIT,
                                                                                  QUEUE_FAMILY_GRAPHICS_BIT,
                                                                                  VK_SHARING_MODE_EXCLUSIVE,
                                                                                  false, /* in_use_full_mipmap_chain */
                                                                                  0,     /* in_memory_features       */
                                                                                  0,     /* in_create_flags          */
                                                                                  VK_IMAGE_LAYOUT_GENERAL,
                                                                                  nullptr);

            create_info_ptr->set_mt_safety(Anvil::Utils::convert_boolean_to_mt_safety_enum(is_mt_safe() ) );

            m_image_ptrs[n_result_image] = Anvil::Image::create(std::move(create_info_ptr) );
        }

        /* For each swap-chain image, create a relevant view */
        {
            auto create_info_ptr = Anvil::ImageViewCreateInfo::create_2D(m_device_ptr,
                                                                         m_image_ptrs[n_result_image].get(),
                                                                         0, /* n_base_layer */
                                                                         0, /* n_base_mipmap_level */
                                                                         1, /* n_mipmaps           */
                                                                         VK_IMAGE_ASPECT_COLOR_BIT,
                                                                         m_create_info_ptr->get_format(),
                                                                         VK_COMPONENT_SWIZZLE_R,
                                                                         VK_COMPONENT_SWIZZLE_G,
                                                                         VK_COMPONENT_SWIZZLE_B,
                                                                         VK_COMPONENT_SWIZZLE_A);

            create_info_ptr->set_mt_safety(Anvil::Utils::convert_boolean_to_mt_safety_enum(is_mt_safe() ) );

            m_image_view_ptrs[n_result_image] = Anvil::ImageView::create(std::move(create_info_ptr) );
        }

        result = VK_SUCCESS;
    }

    /* Sign up for present submission notifications. This is needed to ensure that number of presented frames ==
     * number of acquired frames at destruction time.
     */
    {
        std::vector<Anvil::Queue*> queues;

        switch (m_device_ptr->get_type() )
        {
            case Anvil::DEVICE_TYPE_SINGLE_GPU:
            {
                const std::vector<uint32_t>* queue_fams_with_present_support_ptr(nullptr);
                const auto                   rendering_surface_ptr              (m_create_info_ptr->get_rendering_surface() );
                const Anvil::SGPUDevice*     sgpu_device_ptr                    (dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr) );

                if (!rendering_surface_ptr->get_queue_families_with_present_support(&queue_fams_with_present_support_ptr) )
                {
                    break;
                }

                if (queue_fams_with_present_support_ptr == nullptr)
                {
                    anvil_assert(queue_fams_with_present_support_ptr != nullptr);
                }
                else
                {
                    for (const auto queue_fam : *queue_fams_with_present_support_ptr)
                    {
                        const uint32_t n_queues = sgpu_device_ptr->get_n_queues(queue_fam);

                        for (uint32_t n_queue = 0;
                                      n_queue < n_queues;
                                    ++n_queue)
                        {
                            auto queue_ptr = sgpu_device_ptr->get_queue_for_queue_family_index(queue_fam,
                                                                                               n_queue);

                            anvil_assert(queue_ptr != nullptr);

                            if (std::find(queues.begin(),
                                          queues.end(),
                                          queue_ptr) == queues.end() )
                            {
                                queues.push_back(queue_ptr);
                            }
                        }
                    }
                }

                break;
            }
        }

        for (auto queue_ptr : queues)
        {
            queue_ptr->register_for_callbacks(
                QUEUE_CALLBACK_ID_PRESENT_REQUEST_ISSUED,
                std::bind(&Swapchain::on_present_request_issued,
                          this,
                          std::placeholders::_1),
                this
            );

            m_observed_queues.push_back(queue_ptr);
        }
    }

    /* Sign up for "about to close the parent window" notifications. Swapchain instance SHOULD be deinitialized
     * before the window is destroyed, so we're going to act as nice citizens.
     */
    m_create_info_ptr->get_window()->register_for_callbacks(
        WINDOW_CALLBACK_ID_ABOUT_TO_CLOSE,
        std::bind(&Swapchain::on_parent_window_about_to_close,
                  this),
        this
    );

    return is_vk_call_successful(result);
}
Ejemplo n.º 24
0
/** Please see header for specification */
uint32_t Anvil::Swapchain::acquire_image(Anvil::Semaphore* in_opt_semaphore_ptr,
                                         bool              in_should_block)
{
    uint32_t             result                        (UINT32_MAX);
    VkResult             result_vk                     (VK_ERROR_INITIALIZATION_FAILED);
    const WindowPlatform window_platform               (m_create_info_ptr->get_window()->get_platform() );
    const bool           is_offscreen_rendering_enabled( (window_platform   == WINDOW_PLATFORM_DUMMY                     ||
                                                          window_platform   == WINDOW_PLATFORM_DUMMY_WITH_PNG_SNAPSHOTS) );

    ANVIL_REDUNDANT_VARIABLE(result_vk);

    if (!is_offscreen_rendering_enabled)
    {
        VkFence fence_handle = VK_NULL_HANDLE;

        if (in_opt_semaphore_ptr != nullptr)
        {
            in_opt_semaphore_ptr->lock();
        }

        m_image_available_fence_ptr->lock();
        lock();
        {
            const auto& khr_swapchain_entrypoints = m_device_ptr->get_extension_khr_swapchain_entrypoints();

            if (in_should_block)
            {
                m_image_available_fence_ptr->reset();

                fence_handle = m_image_available_fence_ptr->get_fence();
            }

            result_vk = khr_swapchain_entrypoints.vkAcquireNextImageKHR(m_device_ptr->get_device_vk(),
                                                                        m_swapchain,
                                                                        UINT64_MAX,
                                                                        (in_opt_semaphore_ptr != nullptr) ? in_opt_semaphore_ptr->get_semaphore() : VK_NULL_HANDLE,
                                                                        fence_handle,
                                                                       &result);

            if (fence_handle != VK_NULL_HANDLE)
            {
                result_vk = vkWaitForFences(m_device_ptr->get_device_vk(),
                                            1, /* fenceCount */
                                           &fence_handle,
                                            VK_TRUE, /* waitAll */
                                            UINT64_MAX);

                anvil_assert_vk_call_succeeded(result_vk);
            }
        }
        unlock();
        m_image_available_fence_ptr->unlock();

        if (in_opt_semaphore_ptr != nullptr)
        {
            in_opt_semaphore_ptr->unlock();
        }

        anvil_assert_vk_call_succeeded(result_vk);
    }
    else
    {
        if (in_should_block)
        {
            m_device_ptr->wait_idle();
        }

        if (in_opt_semaphore_ptr != nullptr)
        {
            /* We need to set the semaphore manually in this scenario */
            m_device_ptr->get_universal_queue(0)->submit(
                Anvil::SubmitInfo::create_signal(1,       /* n_semaphores_to_signal */
                                                &in_opt_semaphore_ptr)
            );
        }

        result = m_n_acquire_counter_rounded;
    }

    m_n_acquire_counter++;
    m_n_acquire_counter_rounded = (m_n_acquire_counter_rounded + 1) % m_create_info_ptr->get_n_images();

    m_last_acquired_image_index = result;

    return result;
}
Ejemplo n.º 25
0
/** Creates a new VMA allocator instance.
 *
 *  @return true if successful, false otherwise.
 **/
bool Anvil::MemoryAllocatorBackends::VMA::VMAAllocator::init()
{
    VmaAllocatorCreateInfo create_info                        = {};
    const bool             khr_dedicated_allocation_supported = m_device_ptr->get_extension_info()->khr_dedicated_allocation();
    VkResult               result                             = VK_ERROR_DEVICE_LOST;

    /* Prepare VK func ptr array */
    m_vma_func_ptrs.reset(
        new VmaVulkanFunctions()
    );

    if (m_vma_func_ptrs == nullptr)
    {
        anvil_assert(m_vma_func_ptrs != nullptr);

        goto end;
    }

    m_vma_func_ptrs->vkAllocateMemory                    = Vulkan::vkAllocateMemory;
    m_vma_func_ptrs->vkBindBufferMemory                  = Vulkan::vkBindBufferMemory;
    m_vma_func_ptrs->vkBindImageMemory                   = Vulkan::vkBindImageMemory;
    m_vma_func_ptrs->vkCreateBuffer                      = Vulkan::vkCreateBuffer;
    m_vma_func_ptrs->vkCreateImage                       = Vulkan::vkCreateImage;
    m_vma_func_ptrs->vkDestroyBuffer                     = Vulkan::vkDestroyBuffer;
    m_vma_func_ptrs->vkDestroyImage                      = Vulkan::vkDestroyImage;
    m_vma_func_ptrs->vkFreeMemory                        = Vulkan::vkFreeMemory;
    m_vma_func_ptrs->vkGetBufferMemoryRequirements       = Vulkan::vkGetBufferMemoryRequirements;
    m_vma_func_ptrs->vkGetImageMemoryRequirements        = Vulkan::vkGetImageMemoryRequirements;
    m_vma_func_ptrs->vkGetPhysicalDeviceMemoryProperties = Vulkan::vkGetPhysicalDeviceMemoryProperties;
    m_vma_func_ptrs->vkGetPhysicalDeviceProperties       = Vulkan::vkGetPhysicalDeviceProperties;
    m_vma_func_ptrs->vkMapMemory                         = Vulkan::vkMapMemory;
    m_vma_func_ptrs->vkUnmapMemory                       = Vulkan::vkUnmapMemory;

    if (m_device_ptr->get_extension_info()->khr_get_memory_requirements2() )
    {
        m_vma_func_ptrs->vkGetBufferMemoryRequirements2KHR = m_device_ptr->get_extension_khr_get_memory_requirements2_entrypoints().vkGetBufferMemoryRequirements2KHR;
        m_vma_func_ptrs->vkGetImageMemoryRequirements2KHR  = m_device_ptr->get_extension_khr_get_memory_requirements2_entrypoints().vkGetImageMemoryRequirements2KHR;
    }
    else
    {
        m_vma_func_ptrs->vkGetBufferMemoryRequirements2KHR = nullptr;
        m_vma_func_ptrs->vkGetImageMemoryRequirements2KHR  = nullptr;
    }

    /* Prepare VMA create info struct */
    switch (m_device_ptr->get_type() )
    {
        case Anvil::DeviceType::MULTI_GPU:
        {
            /* VMA library takes a physical device handle to extract info regarding supported
             * memory types and the like. As VK_KHR_device_group provide explicit mGPU support,
             * it is guaranteed all physical devices within a logical device offer exactly the
             * same capabilities. This means we're safe to pass zeroth physical device to the
             * library, and everything will still be OK.
             */
            const Anvil::MGPUDevice* mgpu_device_ptr(dynamic_cast<const Anvil::MGPUDevice*>(m_device_ptr) );

            create_info.physicalDevice = mgpu_device_ptr->get_physical_device(0)->get_physical_device();
            break;
        }

        case Anvil::DeviceType::SINGLE_GPU:
        {
            const Anvil::SGPUDevice* sgpu_device_ptr(dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr) );

            create_info.physicalDevice = sgpu_device_ptr->get_physical_device()->get_physical_device();
            break;
        }

        default:
        {
            anvil_assert_fail();
        }
    }

    create_info.flags                       = (khr_dedicated_allocation_supported) ? VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT : 0;
    create_info.device                      = m_device_ptr->get_device_vk();
    create_info.pAllocationCallbacks        = nullptr;
    create_info.preferredLargeHeapBlockSize = 0;
    create_info.pVulkanFunctions            = m_vma_func_ptrs.get();

    result = vmaCreateAllocator(&create_info,
                                &m_allocator);

    anvil_assert_vk_call_succeeded(result);
end:
    return is_vk_call_successful(result);
}
Ejemplo n.º 26
0
/** Please see header for specification */
VkResult Anvil::Queue::present(Anvil::Swapchain*        in_swapchain_ptr,
                               uint32_t                 in_swapchain_image_index,
                               uint32_t                 in_n_wait_semaphores,
                               Anvil::Semaphore* const* in_wait_semaphore_ptrs)
{
    VkResult                                presentation_results   [MAX_SWAPCHAINS];
    VkResult                                result;
    Anvil::StructChainer<VkPresentInfoKHR>  struct_chainer;
    const ExtensionKHRSwapchainEntrypoints* swapchain_entrypoints_ptr(nullptr);
    VkSwapchainKHR                          swapchains_vk          [MAX_SWAPCHAINS];
    std::vector<VkSemaphore>                wait_semaphores_vk     (in_n_wait_semaphores);

    /* If the application is only interested in off-screen rendering, do *not* post the present request,
     * since the fake swapchain image is not presentable. We still have to wait on the user-specified
     * semaphores though. */
    if (in_swapchain_ptr != nullptr)
    {
        Anvil::Window* window_ptr = nullptr;

        window_ptr = in_swapchain_ptr->get_create_info_ptr()->get_window();

        if (window_ptr != nullptr)
        {
            const WindowPlatform window_platform = window_ptr->get_platform();

            if (window_platform == WINDOW_PLATFORM_DUMMY                    ||
                window_platform == WINDOW_PLATFORM_DUMMY_WITH_PNG_SNAPSHOTS)
            {
                static const VkPipelineStageFlags dst_stage_mask(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);

                m_device_ptr->get_universal_queue(0)->submit(
                    SubmitInfo::create(nullptr,
                                       0,       /* in_n_semaphores_to_signal           */
                                       nullptr, /* in_opt_semaphore_to_signal_ptrs_ptr */
                                       in_n_wait_semaphores,
                                       in_wait_semaphore_ptrs,
                                      &dst_stage_mask,
                                       true) /* in_should_block */
                );

                for (uint32_t n_presentation = 0;
                              n_presentation < 1;
                            ++n_presentation)
                {
                    OnPresentRequestIssuedCallbackArgument callback_argument(in_swapchain_ptr);

                    CallbacksSupportProvider::callback(QUEUE_CALLBACK_ID_PRESENT_REQUEST_ISSUED,
                                                      &callback_argument);
                }

                result = VK_SUCCESS;
                goto end;
            }
        }
    }

    /* Convert arrays of Anvil objects to raw Vulkan handle arrays */
    for (uint32_t n_swapchain = 0;
                  n_swapchain < 1;
                ++n_swapchain)
    {
        swapchains_vk[n_swapchain] = in_swapchain_ptr->get_swapchain_vk();
    }

    for (uint32_t n_wait_semaphore = 0;
                  n_wait_semaphore < in_n_wait_semaphores;
                ++n_wait_semaphore)
    {
        wait_semaphores_vk[n_wait_semaphore] = in_wait_semaphore_ptrs[n_wait_semaphore]->get_semaphore();
    }

    {
        VkPresentInfoKHR image_presentation_info;

        image_presentation_info.pImageIndices      = &in_swapchain_image_index;
        image_presentation_info.pNext              = nullptr;
        image_presentation_info.pResults           = presentation_results;
        image_presentation_info.pSwapchains        = swapchains_vk;
        image_presentation_info.pWaitSemaphores    = (in_n_wait_semaphores != 0) ? &wait_semaphores_vk.at(0) : nullptr;
        image_presentation_info.sType              = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
        image_presentation_info.swapchainCount     = 1;
        image_presentation_info.waitSemaphoreCount = in_n_wait_semaphores;

        struct_chainer.append_struct(image_presentation_info);
    }

    swapchain_entrypoints_ptr = &m_device_ptr->get_extension_khr_swapchain_entrypoints();

    present_lock_unlock(1,
                       &in_swapchain_ptr,
                        in_n_wait_semaphores,
                        in_wait_semaphore_ptrs,
                        true);
    {
        auto chain_ptr = struct_chainer.create_chain();

        result = swapchain_entrypoints_ptr->vkQueuePresentKHR(m_queue,
                                                              chain_ptr->get_root_struct() );
    }
    present_lock_unlock(1,
                       &in_swapchain_ptr,
                        in_n_wait_semaphores,
                        in_wait_semaphore_ptrs,
                        false);

    anvil_assert_vk_call_succeeded(result);

    if (is_vk_call_successful(result) )
    {
        for (uint32_t n_presentation = 0;
                      n_presentation < 1;
                    ++n_presentation)
        {
            anvil_assert(is_vk_call_successful(presentation_results[n_presentation]));

            /* Return the most important error code reported */
            if (result != VK_ERROR_DEVICE_LOST)
            {
                switch (presentation_results[n_presentation])
                {
                    case VK_ERROR_DEVICE_LOST:
                    {
                        result = VK_ERROR_DEVICE_LOST;

                        break;
                    }

                    case VK_ERROR_SURFACE_LOST_KHR:
                    {
                        if (result != VK_ERROR_DEVICE_LOST)
                        {
                            result = VK_ERROR_SURFACE_LOST_KHR;
                        }

                        break;
                    }

                    case VK_ERROR_OUT_OF_DATE_KHR:
                    {
                        if (result != VK_ERROR_DEVICE_LOST      &&
                            result != VK_ERROR_SURFACE_LOST_KHR)
                        {
                            result = VK_ERROR_OUT_OF_DATE_KHR;
                        }

                        break;
                    }

                    case VK_SUBOPTIMAL_KHR:
                    {
                        if (result != VK_ERROR_DEVICE_LOST      &&
                            result != VK_ERROR_SURFACE_LOST_KHR &&
                            result != VK_ERROR_OUT_OF_DATE_KHR)
                        {
                            result = VK_SUBOPTIMAL_KHR;
                        }

                        break;
                    }

                    default:
                    {
                        anvil_assert(presentation_results[n_presentation] == VK_SUCCESS);
                    }
                }
            }

            {
                OnPresentRequestIssuedCallbackArgument callback_argument(in_swapchain_ptr);

                CallbacksSupportProvider::callback(QUEUE_CALLBACK_ID_PRESENT_REQUEST_ISSUED,
                                                  &callback_argument);
            }
        }
    }

end:
    return result;
}
Ejemplo n.º 27
0
/** Please see header for specification */
void Anvil::Queue::submit(const Anvil::SubmitInfo& in_submit_info)
{
    Anvil::Fence*                      fence_ptr        (in_submit_info.get_fence() );
    bool                               needs_fence_reset(false);
    VkResult                           result           (VK_ERROR_INITIALIZATION_FAILED);
    Anvil::StructChainer<VkSubmitInfo> struct_chainer;

    std::vector<VkCommandBuffer> cmd_buffers_vk      (in_submit_info.get_n_command_buffers  () );
    std::vector<VkSemaphore>     signal_semaphores_vk(in_submit_info.get_n_signal_semaphores() );
    std::vector<VkSemaphore>     wait_semaphores_vk  (in_submit_info.get_n_wait_semaphores  () );

    ANVIL_REDUNDANT_VARIABLE(result);

    /* Prepare for the submission */
    switch (in_submit_info.get_type() )
    {
        case SubmissionType::SGPU:
        {
            VkSubmitInfo submit_info;

            for (uint32_t n_command_buffer = 0;
                          n_command_buffer < in_submit_info.get_n_command_buffers();
                        ++n_command_buffer)
            {
                cmd_buffers_vk.at(n_command_buffer) = in_submit_info.get_command_buffers_sgpu()[n_command_buffer]->get_command_buffer();
            }

            for (uint32_t n_signal_semaphore = 0;
                          n_signal_semaphore < in_submit_info.get_n_signal_semaphores();
                        ++n_signal_semaphore)
            {
                auto sem_ptr = in_submit_info.get_signal_semaphores_sgpu()[n_signal_semaphore];

                signal_semaphores_vk.at(n_signal_semaphore) = sem_ptr->get_semaphore();
            }

            for (uint32_t n_wait_semaphore = 0;
                          n_wait_semaphore < in_submit_info.get_n_wait_semaphores();
                        ++n_wait_semaphore)
            {
                wait_semaphores_vk.at(n_wait_semaphore) = in_submit_info.get_wait_semaphores_sgpu()[n_wait_semaphore]->get_semaphore();
            }

            submit_info.commandBufferCount   = in_submit_info.get_n_command_buffers ();
            submit_info.pCommandBuffers      = (in_submit_info.get_n_command_buffers()   != 0) ? &cmd_buffers_vk.at(0)       : nullptr;
            submit_info.pNext                = nullptr;
            submit_info.pSignalSemaphores    = (in_submit_info.get_n_signal_semaphores() != 0) ? &signal_semaphores_vk.at(0) : nullptr;
            submit_info.pWaitDstStageMask    = in_submit_info.get_destination_stage_wait_masks();
            submit_info.pWaitSemaphores      = (in_submit_info.get_n_wait_semaphores()   != 0) ? &wait_semaphores_vk.at(0)   : nullptr;
            submit_info.signalSemaphoreCount = in_submit_info.get_n_signal_semaphores();
            submit_info.sType                = VK_STRUCTURE_TYPE_SUBMIT_INFO;
            submit_info.waitSemaphoreCount   = in_submit_info.get_n_wait_semaphores();

            struct_chainer.append_struct(submit_info);

            break;
        }

        default:
        {
            anvil_assert_fail();
        }
    }

    /* Any additional structs to chain? */
    #if defined(_WIN32)
    {
        const uint64_t* d3d12_fence_signal_semaphore_values_ptr = nullptr;
        const uint64_t* d3d12_fence_wait_semaphore_values_ptr   = nullptr;

        if (in_submit_info.get_d3d12_fence_semaphore_values(&d3d12_fence_signal_semaphore_values_ptr,
                                                            &d3d12_fence_wait_semaphore_values_ptr) )
        {
            VkD3D12FenceSubmitInfoKHR fence_info;

            fence_info.pNext                      = nullptr;
            fence_info.pSignalSemaphoreValues     = d3d12_fence_signal_semaphore_values_ptr;
            fence_info.pWaitSemaphoreValues       = d3d12_fence_wait_semaphore_values_ptr;
            fence_info.signalSemaphoreValuesCount = in_submit_info.get_n_signal_semaphores();
            fence_info.sType                      = VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR;
            fence_info.waitSemaphoreValuesCount   = in_submit_info.get_n_wait_semaphores();

            struct_chainer.append_struct(fence_info);
        }
    }
    #endif

    /* Go for it */
    if (fence_ptr                         == nullptr &&
        in_submit_info.get_should_block() )
    {
        fence_ptr         = m_submit_fence_ptr.get();
        needs_fence_reset = true;
    }

    switch (in_submit_info.get_type() )
    {
        case SubmissionType::SGPU:
        {
             submit_command_buffers_lock_unlock(in_submit_info.get_n_command_buffers     (),
                                                in_submit_info.get_command_buffers_sgpu  (),
                                                in_submit_info.get_n_signal_semaphores   (),
                                                in_submit_info.get_signal_semaphores_sgpu(),
                                                in_submit_info.get_n_wait_semaphores     (),
                                                in_submit_info.get_wait_semaphores_sgpu  (),
                                                fence_ptr,
                                                true); /* in_should_lock */

             break;
        }

        default:
        {
            anvil_assert_fail();
        }
     }

     {
        auto chain_ptr = struct_chainer.create_chain();

        if (needs_fence_reset)
        {
            m_submit_fence_ptr->reset();
        }

         result = vkQueueSubmit(m_queue,
                                1, /* submitCount */
                                chain_ptr->get_root_struct(),
                               (fence_ptr != nullptr) ? fence_ptr->get_fence() 
                                                      : VK_NULL_HANDLE);

        if (in_submit_info.get_should_block() )
        {
            /* Wait till initialization finishes GPU-side */
            result = vkWaitForFences(m_device_ptr->get_device_vk(),
                                     1, /* fenceCount */
                                     fence_ptr->get_fence_ptr(),
                                     VK_TRUE,     /* waitAll */
                                     UINT64_MAX); /* timeout */

            anvil_assert_vk_call_succeeded(result);
        }
     }

     switch (in_submit_info.get_type() )
     {
         case SubmissionType::SGPU:
         {
             submit_command_buffers_lock_unlock(in_submit_info.get_n_command_buffers     (),
                                                in_submit_info.get_command_buffers_sgpu  (),
                                                in_submit_info.get_n_signal_semaphores   (),
                                                in_submit_info.get_signal_semaphores_sgpu(),
                                                in_submit_info.get_n_wait_semaphores     (),
                                                in_submit_info.get_wait_semaphores_sgpu  (),
                                                fence_ptr,
                                                false); /* in_should_lock */

             break;
         }

         default:
         {
             anvil_assert_fail();
         }
     }

     anvil_assert_vk_call_succeeded(result);
}
Ejemplo n.º 28
0
/** Performs a number of image view type-specific sanity checks and creates the requested
 *  Vulkan image view instance.
 *
 *  For argument discussion, please see documentation for the constructors above.
 *
 *  @return true if the function executed successfully, false otherwise.
 **/
bool Anvil::ImageView::init()
{
    const auto                                  aspect_mask               = m_create_info_ptr->get_aspect           ();
    const auto                                  format                    = m_create_info_ptr->get_format           ();
    const auto                                  image_view_type           = m_create_info_ptr->get_type             ();
    const auto                                  n_base_layer              = m_create_info_ptr->get_base_layer       ();
    const auto                                  n_base_mip                = m_create_info_ptr->get_base_mipmap_level();
    const auto                                  n_layers                  = m_create_info_ptr->get_n_layers         ();
    const auto                                  n_mips                    = m_create_info_ptr->get_n_mipmaps        ();
    VkFormat                                    parent_image_format       = VK_FORMAT_UNDEFINED;
    uint32_t                                    parent_image_n_layers     = 0;
    uint32_t                                    parent_image_n_mipmaps    = 0;
    auto                                        parent_image_ptr          = m_create_info_ptr->get_parent_image();
    bool                                        result                    = false;
    VkResult                                    result_vk;
    Anvil::StructChainer<VkImageViewCreateInfo> struct_chainer;
    const auto&                                 swizzle_array             = m_create_info_ptr->get_swizzle_array();

    parent_image_format    = parent_image_ptr->get_create_info_ptr()->get_format();
    parent_image_n_mipmaps = parent_image_ptr->get_n_mipmaps                    ();

    if (parent_image_ptr->get_create_info_ptr()->get_type_vk() != VK_IMAGE_TYPE_3D)
    {
        parent_image_n_layers = parent_image_ptr->get_create_info_ptr()->get_n_layers();
    }
    else
    {
        parent_image_ptr->get_image_mipmap_size(0,       /* in_n_mipmap        */
                                                nullptr, /* out_opt_width_ptr  */
                                                nullptr, /* out_opt_height_ptr */
                                               &parent_image_n_layers);
    }

    if (!(parent_image_n_layers >= n_base_layer + n_layers))
    {
        anvil_assert(parent_image_n_layers >= n_base_layer + n_layers);

        goto end;
    }

    if (!(parent_image_n_mipmaps >= n_base_mip + n_mips))
    {
        anvil_assert(parent_image_n_mipmaps >= n_base_mip + n_mips);

        goto end;
    }

    if (((parent_image_ptr->get_create_info_ptr()->get_create_flags() & Anvil::IMAGE_CREATE_FLAG_MUTABLE_FORMAT_BIT) == 0)      &&
         (parent_image_format                                                                                        != format))
    {
        anvil_assert(parent_image_format == format);

        goto end;
    }

    if (parent_image_ptr->get_create_info_ptr()->get_type_vk() == VK_IMAGE_TYPE_3D)
    {
        if (image_view_type == VK_IMAGE_VIEW_TYPE_2D       ||
            image_view_type == VK_IMAGE_VIEW_TYPE_2D_ARRAY)
        {
            if (!m_device_ptr->get_extension_info()->khr_maintenance1() )
            {
                anvil_assert(m_device_ptr->get_extension_info()->khr_maintenance1());

                goto end;
            }

            if ((parent_image_ptr->get_create_info_ptr()->get_create_flags() & Anvil::IMAGE_CREATE_FLAG_2D_ARRAY_COMPATIBLE_BIT) == 0)
            {
                anvil_assert((parent_image_ptr->get_create_info_ptr()->get_create_flags() & Anvil::IMAGE_CREATE_FLAG_2D_ARRAY_COMPATIBLE_BIT) != 0);

                goto end;
            }
        }
    }

    /* Create the image view instance */
    {
        VkImageViewCreateInfo image_view_create_info;

        image_view_create_info.components.a                    = swizzle_array[3];
        image_view_create_info.components.b                    = swizzle_array[2];
        image_view_create_info.components.g                    = swizzle_array[1];
        image_view_create_info.components.r                    = swizzle_array[0];
        image_view_create_info.flags                           = 0;
        image_view_create_info.format                          = format;
        image_view_create_info.image                           = parent_image_ptr->get_image();
        image_view_create_info.pNext                           = nullptr;
        image_view_create_info.sType                           = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
        image_view_create_info.subresourceRange.aspectMask     = aspect_mask;
        image_view_create_info.subresourceRange.baseArrayLayer = n_base_layer;
        image_view_create_info.subresourceRange.baseMipLevel   = n_base_mip;
        image_view_create_info.subresourceRange.layerCount     = n_layers;
        image_view_create_info.subresourceRange.levelCount     = n_mips;
        image_view_create_info.viewType                        = image_view_type;

        struct_chainer.append_struct(image_view_create_info);
    }

    {
        auto chain_ptr = struct_chainer.create_chain();

        result_vk = vkCreateImageView(m_device_ptr->get_device_vk(),
                                      chain_ptr->get_root_struct(),
                                      nullptr, /* pAllocator */
                                     &m_image_view);
    }

    if (!is_vk_call_successful(result_vk) )
    {
        anvil_assert_vk_call_succeeded(result_vk);

        goto end;
    }

    /* Cache the properties */
    set_vk_handle(m_image_view);

    /* All done */
    result = true;

end:
    return result;
}