Beispiel #1
0
/** Enumerates and caches all available physical devices. */
void Anvil::Instance::enumerate_physical_devices()
{
    std::vector<VkPhysicalDevice> devices;
    uint32_t                      n_physical_devices = 0;
    VkResult                      result             = VK_ERROR_INITIALIZATION_FAILED;

    ANVIL_REDUNDANT_VARIABLE(result);

    /* Retrieve physical device handles */
    result = vkEnumeratePhysicalDevices(m_instance,
                                       &n_physical_devices,
                                        nullptr); /* pPhysicalDevices */
    anvil_assert_vk_call_succeeded(result);

    if (n_physical_devices == 0)
    {
        fprintf(stderr,"No physical devices reported for the Vulkan instance");
        fflush (stderr);

        anvil_assert_fail();
    }

    devices.resize(n_physical_devices);

    result = vkEnumeratePhysicalDevices(m_instance,
                                       &n_physical_devices,
                                       &devices[0]);
    anvil_assert_vk_call_succeeded(result);

    /* Fill out internal physical device descriptors */
    for (unsigned int n_physical_device = 0;
                      n_physical_device < n_physical_devices;
                    ++n_physical_device)
    {
        std::unique_ptr<Anvil::PhysicalDevice> new_physical_device_ptr;

        new_physical_device_ptr = Anvil::PhysicalDevice::create(this,
                                      n_physical_device,
                                      devices[n_physical_device]);

        m_physical_devices.push_back(
            std::move(new_physical_device_ptr)
        );
    }
}
Beispiel #2
0
/** Enumerates all available layer extensions. The enumerated extensions will be stored
 *  in the specified _vulkan_layer descriptor.
 *
 *  @param in_layer_ptr Layer to enumerate the extensions for. If nullptr, device extensions
 *                      will be retrieved instead.
 **/
void Anvil::Instance::enumerate_layer_extensions(Anvil::Layer* in_layer_ptr)
{
    uint32_t n_extensions = 0;
    VkResult result       = VK_ERROR_INITIALIZATION_FAILED;

    ANVIL_REDUNDANT_VARIABLE(result);

    /* Check if the layer supports any extensions at all */
    const char* layer_name = nullptr;

    if (in_layer_ptr == nullptr)
    {
        in_layer_ptr = &m_global_layer;
    }

    layer_name = in_layer_ptr->name.c_str();
    result     = vkEnumerateInstanceExtensionProperties(layer_name,
                                                       &n_extensions,
                                                        nullptr); /* pProperties */

    anvil_assert_vk_call_succeeded(result);

    if (n_extensions > 0)
    {
        std::vector<VkExtensionProperties> extension_props;

        extension_props.resize(n_extensions);

        result = vkEnumerateInstanceExtensionProperties(layer_name,
                                                       &n_extensions,
                                                       &extension_props[0]);

        anvil_assert_vk_call_succeeded(result);

        /* Convert raw extension props data to internal descriptors */
        for (uint32_t n_extension = 0;
                      n_extension < n_extensions;
                    ++n_extension)
        {
            in_layer_ptr->extensions.push_back(extension_props[n_extension].extensionName);
        }
    }
}
Beispiel #3
0
/** Initializes debug callback support. */
void Anvil::Instance::init_debug_callbacks()
{
    VkResult result = VK_ERROR_INITIALIZATION_FAILED;

    ANVIL_REDUNDANT_VARIABLE(result);

    /* Set up the debug call-backs, while we're at it */
    VkDebugReportCallbackCreateInfoEXT debug_report_callback_create_info;

    debug_report_callback_create_info.flags       = VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT;
    debug_report_callback_create_info.pfnCallback = debug_callback_pfn_proc;
    debug_report_callback_create_info.pNext       = nullptr;
    debug_report_callback_create_info.pUserData   = this;
    debug_report_callback_create_info.sType       = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;

    result = m_ext_debug_report_entrypoints.vkCreateDebugReportCallbackEXT(m_instance,
                                                                          &debug_report_callback_create_info,
                                                                           nullptr, /* pAllocator */
                                                                          &m_debug_callback_data);
    anvil_assert_vk_call_succeeded(result);
}
/** Please see header for specification */
Anvil::PipelineCache::PipelineCache(const Anvil::BaseDevice* in_device_ptr,
                                    bool                     in_mt_safe,
                                    size_t                   in_initial_data_size,
                                    const void*              in_initial_data)
    :DebugMarkerSupportProvider(in_device_ptr,
                                Anvil::ObjectType::PIPELINE_CACHE),
     MTSafetySupportProvider   (in_mt_safe),
     m_device_ptr              (in_device_ptr),
     m_pipeline_cache          (VK_NULL_HANDLE)
{
    VkPipelineCacheCreateInfo cache_create_info;
    VkResult                  result_vk        (VK_ERROR_INITIALIZATION_FAILED);

    ANVIL_REDUNDANT_VARIABLE(result_vk);

    cache_create_info.flags           = 0;
    cache_create_info.initialDataSize = in_initial_data_size;
    cache_create_info.pInitialData    = in_initial_data;
    cache_create_info.pNext           = nullptr;
    cache_create_info.sType           = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;

    result_vk = Anvil::Vulkan::vkCreatePipelineCache(m_device_ptr->get_device_vk(),
                                                    &cache_create_info,
                                                     nullptr, /* pAllocator */
                                                    &m_pipeline_cache);

    anvil_assert_vk_call_succeeded(result_vk);
    if (is_vk_call_successful(result_vk) )
    {
        set_vk_handle(m_pipeline_cache);
    }

    anvil_assert(m_pipeline_cache != VK_NULL_HANDLE);

    /* Register the instance */
    Anvil::ObjectTracker::get()->register_object(Anvil::ObjectType::PIPELINE_CACHE,
                                                  this);
}
Beispiel #5
0
/* Please see header for specification */
void Anvil::ImageView::get_base_mipmap_size(uint32_t* out_opt_base_mipmap_width_ptr,
                                            uint32_t* out_opt_base_mipmap_height_ptr,
                                            uint32_t* out_opt_base_mipmap_depth_ptr) const
{
    const auto n_base_mip_level(m_create_info_ptr->get_base_mipmap_level() );
    const auto parent_image_ptr(m_create_info_ptr->get_parent_image     () );
    bool       result          (false);
    uint32_t   result_depth    (0);

    ANVIL_REDUNDANT_VARIABLE(result);

    result = parent_image_ptr->get_image_mipmap_size(n_base_mip_level,
                                                     out_opt_base_mipmap_width_ptr,
                                                     out_opt_base_mipmap_height_ptr,
                                                     nullptr);
    anvil_assert(result);

    switch (m_create_info_ptr->get_type() )
    {
        case VK_IMAGE_VIEW_TYPE_1D:         result_depth = 1;                                 break;
        case VK_IMAGE_VIEW_TYPE_1D_ARRAY:   result_depth = m_create_info_ptr->get_n_layers(); break;
        case VK_IMAGE_VIEW_TYPE_2D:         result_depth = 1;                                 break;
        case VK_IMAGE_VIEW_TYPE_2D_ARRAY:   result_depth = m_create_info_ptr->get_n_layers(); break;
        case VK_IMAGE_VIEW_TYPE_3D:         result_depth = m_create_info_ptr->get_n_slices(); break;
        case VK_IMAGE_VIEW_TYPE_CUBE:       result_depth = m_create_info_ptr->get_n_layers(); break;
        case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: result_depth = m_create_info_ptr->get_n_layers(); break;

        default:
        {
            anvil_assert_fail();
        }
    }

    if (out_opt_base_mipmap_depth_ptr != nullptr)
    {
        *out_opt_base_mipmap_depth_ptr = result_depth;
    }
}
Beispiel #6
0
/** Enumerates and caches all layers supported by the Vulkan Instance. */
void Anvil::Instance::enumerate_instance_layers()
{
    std::vector<VkLayerProperties> layer_props;
    uint32_t                       n_layers    = 0;
    VkResult                       result      = VK_ERROR_INITIALIZATION_FAILED;

    ANVIL_REDUNDANT_VARIABLE(result);

    /* Retrieve layer data */
    result = vkEnumerateInstanceLayerProperties(&n_layers,
                                                nullptr); /* pProperties */
    anvil_assert_vk_call_succeeded(result);

    layer_props.resize(n_layers + 1 /* global layer */);

    result = vkEnumerateInstanceLayerProperties(&n_layers,
                                               &layer_props[0]);

    anvil_assert_vk_call_succeeded(result);

    /* Convert raw layer props data to internal descriptors */
    for (uint32_t n_layer = 0;
                  n_layer < n_layers + 1;
                ++n_layer)
    {
        Anvil::Layer* layer_ptr = nullptr;

        if (n_layer < n_layers)
        {
            m_supported_layers.push_back(Anvil::Layer(layer_props[n_layer]) );

            layer_ptr = &m_supported_layers[n_layer];
        }

        enumerate_layer_extensions(layer_ptr);
    }
}
/* Please see header for specification */
void Anvil::RenderingSurface::cache_surface_properties()
{
    const Anvil::DeviceType&             device_type                   (m_device_ptr->get_type() );
    bool                                 is_offscreen_rendering_enabled(true);
    auto                                 khr_surface_entrypoints       (m_create_info_ptr->get_instance_ptr()->get_extension_khr_surface_entrypoints() );
    const Anvil::MGPUDevice*             mgpu_device_ptr               (dynamic_cast<const Anvil::MGPUDevice*>(m_device_ptr));
    uint32_t                             n_physical_devices            (0);
    const Anvil::SGPUDevice*             sgpu_device_ptr               (dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr));
    std::vector<Anvil::SurfaceFormatKHR> supported_formats;
    auto                                 window_ptr                    (m_create_info_ptr->get_window_ptr() );

    if (window_ptr != nullptr)
    {
        const WindowPlatform window_platform(window_ptr->get_platform() );

        is_offscreen_rendering_enabled = (window_platform == WINDOW_PLATFORM_DUMMY                     ||
                                          window_platform == WINDOW_PLATFORM_DUMMY_WITH_PNG_SNAPSHOTS);

        if (is_offscreen_rendering_enabled)
        {
            m_height = window_ptr->get_height_at_creation_time();
            m_width  = window_ptr->get_width_at_creation_time ();
        }
        else
        {
            /* In this case, width & height may change at run-time */
        }
    }
    else
    {
        /* In this case, width & height may change at run-time */
    }

    switch (device_type)
    {
        case Anvil::DeviceType::MULTI_GPU:  n_physical_devices = mgpu_device_ptr->get_n_physical_devices(); break;
        case Anvil::DeviceType::SINGLE_GPU: n_physical_devices = 1;                                         break;

        default:
        {
            anvil_assert_fail();
        }
    }

    /* Retrieve general properties */
    uint32_t n_supported_formats           (0);
    uint32_t n_supported_presentation_modes(0);
    VkResult result                        (VK_ERROR_INITIALIZATION_FAILED);

    ANVIL_REDUNDANT_VARIABLE(result);

    for (uint32_t n_physical_device = 0;
                  n_physical_device < n_physical_devices;
                ++n_physical_device)
    {
        const Anvil::PhysicalDevice* physical_device_ptr = nullptr;

        switch (device_type)
        {
            case Anvil::DeviceType::MULTI_GPU:  physical_device_ptr = mgpu_device_ptr->get_physical_device(n_physical_device); break;
            case Anvil::DeviceType::SINGLE_GPU: physical_device_ptr = sgpu_device_ptr->get_physical_device();                  break;

            default:
            {
                anvil_assert_fail();
            }
        }

        auto& result_caps = m_physical_device_capabilities[physical_device_ptr->get_device_group_device_index()];

        if (m_surface == VK_NULL_HANDLE)
        {
            result_caps.supported_composite_alpha_flags = Anvil::CompositeAlphaFlagBits::INHERIT_BIT_KHR;
            result_caps.supported_transformations       = Anvil::SurfaceTransformFlagBits::INHERIT_BIT_KHR;
            result_caps.supported_usages                = static_cast<Anvil::ImageUsageFlags> (Anvil::ImageUsageFlagBits::COLOR_ATTACHMENT_BIT |
                                                                                               Anvil::ImageUsageFlagBits::TRANSFER_SRC_BIT     |
                                                                                               Anvil::ImageUsageFlagBits::TRANSFER_DST_BIT     |
                                                                                               Anvil::ImageUsageFlagBits::STORAGE_BIT);

            result_caps.supported_presentation_modes.push_back(Anvil::PresentModeKHR::IMMEDIATE_KHR);

            continue;
        }

        const VkPhysicalDevice physical_device_vk = physical_device_ptr->get_physical_device();

        result = khr_surface_entrypoints.vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physical_device_vk,
                                                                                   m_surface,
                                                                                   reinterpret_cast<VkSurfaceCapabilitiesKHR*>(&result_caps.capabilities) );

        anvil_assert_vk_call_succeeded(result);

        if (n_physical_device == 0)
        {
            m_height = result_caps.capabilities.current_extent.height;
            m_width  = result_caps.capabilities.current_extent.width;
        }
        else
        {
            anvil_assert(m_height == result_caps.capabilities.current_extent.height);
            anvil_assert(m_width  == result_caps.capabilities.current_extent.width);
        }

        result_caps.supported_composite_alpha_flags = result_caps.capabilities.supported_composite_alpha;
        result_caps.supported_transformations       = result_caps.capabilities.supported_transforms;
        result_caps.supported_usages                = result_caps.capabilities.supported_usage_flags;

        /* Retrieve a list of formats supported by the surface */
        result = khr_surface_entrypoints.vkGetPhysicalDeviceSurfaceFormatsKHR(physical_device_vk,
                                                                              m_surface,
                                                                             &n_supported_formats,
                                                                              nullptr /* pSurfaceFormats */);

        anvil_assert                  (n_supported_formats >  0);
        anvil_assert_vk_call_succeeded(result);

        supported_formats.resize(n_supported_formats);

        result = khr_surface_entrypoints.vkGetPhysicalDeviceSurfaceFormatsKHR(physical_device_vk,
                                                                              m_surface,
                                                                             &n_supported_formats,
                                                                              reinterpret_cast<VkSurfaceFormatKHR*>(&supported_formats.at(0) ));
        anvil_assert_vk_call_succeeded(result);

        for (unsigned int n_format = 0;
                          n_format < n_supported_formats;
                        ++n_format)
        {
            result_caps.supported_formats.push_back(RenderingSurfaceFormat(supported_formats[n_format]) );
        }

        /* Retrieve a list of supported presentation modes
         *
         * NOTE: In case of mGPU devices, n_supported_presentation_modes may actually be 0 here for slave devices.
         */
        result = khr_surface_entrypoints.vkGetPhysicalDeviceSurfacePresentModesKHR(physical_device_vk,
                                                                                   m_surface,
                                                                                  &n_supported_presentation_modes,
                                                                                   nullptr /* pPresentModes */);

        anvil_assert_vk_call_succeeded(result);

        if (n_supported_presentation_modes > 0)
        {
            std::vector<VkPresentModeKHR> temp_storage(n_supported_presentation_modes);

            result_caps.supported_presentation_modes.resize(n_supported_presentation_modes);

            result = khr_surface_entrypoints.vkGetPhysicalDeviceSurfacePresentModesKHR(physical_device_vk,
                                                                                       m_surface,
                                                                                      &n_supported_presentation_modes,
                                                                                      &temp_storage.at(0) );
            anvil_assert_vk_call_succeeded(result);

            for (uint32_t n_presentation_mode = 0;
                          n_presentation_mode < static_cast<uint32_t>(temp_storage.size() );
                        ++n_presentation_mode)
            {
                result_caps.supported_presentation_modes.at(n_presentation_mode) = static_cast<Anvil::PresentModeKHR>(temp_storage.at(n_presentation_mode) );
            }
        }
    }
}
Beispiel #8
0
/** Please see header for specification */
uint32_t Anvil::Swapchain::acquire_image(Anvil::Semaphore* in_opt_semaphore_ptr,
                                         bool              in_should_block)
{
    uint32_t             result                        (UINT32_MAX);
    VkResult             result_vk                     (VK_ERROR_INITIALIZATION_FAILED);
    const WindowPlatform window_platform               (m_create_info_ptr->get_window()->get_platform() );
    const bool           is_offscreen_rendering_enabled( (window_platform   == WINDOW_PLATFORM_DUMMY                     ||
                                                          window_platform   == WINDOW_PLATFORM_DUMMY_WITH_PNG_SNAPSHOTS) );

    ANVIL_REDUNDANT_VARIABLE(result_vk);

    if (!is_offscreen_rendering_enabled)
    {
        VkFence fence_handle = VK_NULL_HANDLE;

        if (in_opt_semaphore_ptr != nullptr)
        {
            in_opt_semaphore_ptr->lock();
        }

        m_image_available_fence_ptr->lock();
        lock();
        {
            const auto& khr_swapchain_entrypoints = m_device_ptr->get_extension_khr_swapchain_entrypoints();

            if (in_should_block)
            {
                m_image_available_fence_ptr->reset();

                fence_handle = m_image_available_fence_ptr->get_fence();
            }

            result_vk = khr_swapchain_entrypoints.vkAcquireNextImageKHR(m_device_ptr->get_device_vk(),
                                                                        m_swapchain,
                                                                        UINT64_MAX,
                                                                        (in_opt_semaphore_ptr != nullptr) ? in_opt_semaphore_ptr->get_semaphore() : VK_NULL_HANDLE,
                                                                        fence_handle,
                                                                       &result);

            if (fence_handle != VK_NULL_HANDLE)
            {
                result_vk = vkWaitForFences(m_device_ptr->get_device_vk(),
                                            1, /* fenceCount */
                                           &fence_handle,
                                            VK_TRUE, /* waitAll */
                                            UINT64_MAX);

                anvil_assert_vk_call_succeeded(result_vk);
            }
        }
        unlock();
        m_image_available_fence_ptr->unlock();

        if (in_opt_semaphore_ptr != nullptr)
        {
            in_opt_semaphore_ptr->unlock();
        }

        anvil_assert_vk_call_succeeded(result_vk);
    }
    else
    {
        if (in_should_block)
        {
            m_device_ptr->wait_idle();
        }

        if (in_opt_semaphore_ptr != nullptr)
        {
            /* We need to set the semaphore manually in this scenario */
            m_device_ptr->get_universal_queue(0)->submit(
                Anvil::SubmitInfo::create_signal(1,       /* n_semaphores_to_signal */
                                                &in_opt_semaphore_ptr)
            );
        }

        result = m_n_acquire_counter_rounded;
    }

    m_n_acquire_counter++;
    m_n_acquire_counter_rounded = (m_n_acquire_counter_rounded + 1) % m_create_info_ptr->get_n_images();

    m_last_acquired_image_index = result;

    return result;
}
Beispiel #9
0
/** Please see header for specification */
void Anvil::Queue::submit(const Anvil::SubmitInfo& in_submit_info)
{
    Anvil::Fence*                      fence_ptr        (in_submit_info.get_fence() );
    bool                               needs_fence_reset(false);
    VkResult                           result           (VK_ERROR_INITIALIZATION_FAILED);
    Anvil::StructChainer<VkSubmitInfo> struct_chainer;

    std::vector<VkCommandBuffer> cmd_buffers_vk      (in_submit_info.get_n_command_buffers  () );
    std::vector<VkSemaphore>     signal_semaphores_vk(in_submit_info.get_n_signal_semaphores() );
    std::vector<VkSemaphore>     wait_semaphores_vk  (in_submit_info.get_n_wait_semaphores  () );

    ANVIL_REDUNDANT_VARIABLE(result);

    /* Prepare for the submission */
    switch (in_submit_info.get_type() )
    {
        case SubmissionType::SGPU:
        {
            VkSubmitInfo submit_info;

            for (uint32_t n_command_buffer = 0;
                          n_command_buffer < in_submit_info.get_n_command_buffers();
                        ++n_command_buffer)
            {
                cmd_buffers_vk.at(n_command_buffer) = in_submit_info.get_command_buffers_sgpu()[n_command_buffer]->get_command_buffer();
            }

            for (uint32_t n_signal_semaphore = 0;
                          n_signal_semaphore < in_submit_info.get_n_signal_semaphores();
                        ++n_signal_semaphore)
            {
                auto sem_ptr = in_submit_info.get_signal_semaphores_sgpu()[n_signal_semaphore];

                signal_semaphores_vk.at(n_signal_semaphore) = sem_ptr->get_semaphore();
            }

            for (uint32_t n_wait_semaphore = 0;
                          n_wait_semaphore < in_submit_info.get_n_wait_semaphores();
                        ++n_wait_semaphore)
            {
                wait_semaphores_vk.at(n_wait_semaphore) = in_submit_info.get_wait_semaphores_sgpu()[n_wait_semaphore]->get_semaphore();
            }

            submit_info.commandBufferCount   = in_submit_info.get_n_command_buffers ();
            submit_info.pCommandBuffers      = (in_submit_info.get_n_command_buffers()   != 0) ? &cmd_buffers_vk.at(0)       : nullptr;
            submit_info.pNext                = nullptr;
            submit_info.pSignalSemaphores    = (in_submit_info.get_n_signal_semaphores() != 0) ? &signal_semaphores_vk.at(0) : nullptr;
            submit_info.pWaitDstStageMask    = in_submit_info.get_destination_stage_wait_masks();
            submit_info.pWaitSemaphores      = (in_submit_info.get_n_wait_semaphores()   != 0) ? &wait_semaphores_vk.at(0)   : nullptr;
            submit_info.signalSemaphoreCount = in_submit_info.get_n_signal_semaphores();
            submit_info.sType                = VK_STRUCTURE_TYPE_SUBMIT_INFO;
            submit_info.waitSemaphoreCount   = in_submit_info.get_n_wait_semaphores();

            struct_chainer.append_struct(submit_info);

            break;
        }

        default:
        {
            anvil_assert_fail();
        }
    }

    /* Any additional structs to chain? */
    #if defined(_WIN32)
    {
        const uint64_t* d3d12_fence_signal_semaphore_values_ptr = nullptr;
        const uint64_t* d3d12_fence_wait_semaphore_values_ptr   = nullptr;

        if (in_submit_info.get_d3d12_fence_semaphore_values(&d3d12_fence_signal_semaphore_values_ptr,
                                                            &d3d12_fence_wait_semaphore_values_ptr) )
        {
            VkD3D12FenceSubmitInfoKHR fence_info;

            fence_info.pNext                      = nullptr;
            fence_info.pSignalSemaphoreValues     = d3d12_fence_signal_semaphore_values_ptr;
            fence_info.pWaitSemaphoreValues       = d3d12_fence_wait_semaphore_values_ptr;
            fence_info.signalSemaphoreValuesCount = in_submit_info.get_n_signal_semaphores();
            fence_info.sType                      = VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR;
            fence_info.waitSemaphoreValuesCount   = in_submit_info.get_n_wait_semaphores();

            struct_chainer.append_struct(fence_info);
        }
    }
    #endif

    /* Go for it */
    if (fence_ptr                         == nullptr &&
        in_submit_info.get_should_block() )
    {
        fence_ptr         = m_submit_fence_ptr.get();
        needs_fence_reset = true;
    }

    switch (in_submit_info.get_type() )
    {
        case SubmissionType::SGPU:
        {
             submit_command_buffers_lock_unlock(in_submit_info.get_n_command_buffers     (),
                                                in_submit_info.get_command_buffers_sgpu  (),
                                                in_submit_info.get_n_signal_semaphores   (),
                                                in_submit_info.get_signal_semaphores_sgpu(),
                                                in_submit_info.get_n_wait_semaphores     (),
                                                in_submit_info.get_wait_semaphores_sgpu  (),
                                                fence_ptr,
                                                true); /* in_should_lock */

             break;
        }

        default:
        {
            anvil_assert_fail();
        }
     }

     {
        auto chain_ptr = struct_chainer.create_chain();

        if (needs_fence_reset)
        {
            m_submit_fence_ptr->reset();
        }

         result = vkQueueSubmit(m_queue,
                                1, /* submitCount */
                                chain_ptr->get_root_struct(),
                               (fence_ptr != nullptr) ? fence_ptr->get_fence() 
                                                      : VK_NULL_HANDLE);

        if (in_submit_info.get_should_block() )
        {
            /* Wait till initialization finishes GPU-side */
            result = vkWaitForFences(m_device_ptr->get_device_vk(),
                                     1, /* fenceCount */
                                     fence_ptr->get_fence_ptr(),
                                     VK_TRUE,     /* waitAll */
                                     UINT64_MAX); /* timeout */

            anvil_assert_vk_call_succeeded(result);
        }
     }

     switch (in_submit_info.get_type() )
     {
         case SubmissionType::SGPU:
         {
             submit_command_buffers_lock_unlock(in_submit_info.get_n_command_buffers     (),
                                                in_submit_info.get_command_buffers_sgpu  (),
                                                in_submit_info.get_n_signal_semaphores   (),
                                                in_submit_info.get_signal_semaphores_sgpu(),
                                                in_submit_info.get_n_wait_semaphores     (),
                                                in_submit_info.get_wait_semaphores_sgpu  (),
                                                fence_ptr,
                                                false); /* in_should_lock */

             break;
         }

         default:
         {
             anvil_assert_fail();
         }
     }

     anvil_assert_vk_call_succeeded(result);
}
/* Please see header for specification */
bool Anvil::GLSLShaderToSPIRVGenerator::bake_spirv_blob() const
{
    bool           glsl_filename_is_temporary = false;
    std::string    glsl_filename_with_path;
    bool           result                     = false;

    ANVIL_REDUNDANT_VARIABLE(glsl_filename_is_temporary);

    if (m_glsl_source_code_dirty)
    {
        bake_glsl_source_code();

        anvil_assert(!m_glsl_source_code_dirty);
    }

    if (m_mode == MODE_LOAD_SOURCE_FROM_FILE)
    {
        glsl_filename_is_temporary = false;
        glsl_filename_with_path    = m_data;
    }

    /* Form a temporary file name we will use to write the modified GLSL shader to. */
    #ifndef ANVIL_LINK_WITH_GLSLANG
    {
        switch (m_shader_stage)
        {
            case ShaderStage::COMPUTE:                 glsl_filename_with_path = "temp.comp"; break;
            case ShaderStage::FRAGMENT:                glsl_filename_with_path = "temp.frag"; break;
            case ShaderStage::GEOMETRY:                glsl_filename_with_path = "temp.geom"; break;
            case ShaderStage::TESSELLATION_CONTROL:    glsl_filename_with_path = "temp.tesc"; break;
            case ShaderStage::TESSELLATION_EVALUATION: glsl_filename_with_path = "temp.tese"; break;
            case ShaderStage::VERTEX:                  glsl_filename_with_path = "temp.vert"; break;

            default:
            {
                anvil_assert_fail();

                goto end;
            }
        }

        /* Write down the file to a temporary location */
        Anvil::IO::write_text_file(glsl_filename_with_path,
                                   m_glsl_source_code);

        glsl_filename_is_temporary = true;
    }
    #endif


    #ifdef ANVIL_LINK_WITH_GLSLANG
    {
        /* Shader modules are cached throughout Instance's lifetime in Anvil. It might just happen that
         * the shader we're about to convert to SPIR-V representation has already been converted in the past.
         *
         * Given that the conversion process can be time-consuming, let's try to see if any of the living
         * shader module instances already use exactly the same source code.
         */
        uint32_t n_current_shader_module = 0;
        auto     object_tracker_ptr      = Anvil::ObjectTracker::get();

        do
        {
            auto                       shader_module_raw_ptr = object_tracker_ptr->get_object_at_index     (Anvil::ObjectType::SHADER_MODULE,
                                                                                                            n_current_shader_module);
            const Anvil::ShaderModule* shader_module_ptr     = reinterpret_cast<const Anvil::ShaderModule*>(shader_module_raw_ptr);

            if (shader_module_raw_ptr == nullptr)
            {
                /* Out of shader module instances. */
                break;
            }

            if (shader_module_ptr->get_glsl_source_code() == m_glsl_source_code)
            {
                const auto reference_spirv_blob               = shader_module_ptr->get_spirv_blob();
                const auto reference_spirv_blob_size_in_bytes = reference_spirv_blob.size() * sizeof(reference_spirv_blob.at(0) );

                anvil_assert(reference_spirv_blob_size_in_bytes != 0);

                m_spirv_blob.resize(reference_spirv_blob_size_in_bytes);

                memcpy(&m_spirv_blob.at        (0),
                       &reference_spirv_blob.at(0),
                       reference_spirv_blob_size_in_bytes);

                result = true;
                break;
            }

            /* Move to the next shader module instance */
            ++n_current_shader_module;
        }
        while (n_current_shader_module != 0); /* work around "conditional expression is constant" warnings issued by some compilers */

        if (m_spirv_blob.size() == 0)
        {
            /* Need to bake a brand new SPIR-V blob */
            result = bake_spirv_blob_by_calling_glslang(m_glsl_source_code.c_str() );
        }
    }

    #else
    {
        /* We need to point glslangvalidator at a location where it can stash the SPIR-V blob. */
        result = bake_spirv_blob_by_spawning_glslang_process(glsl_filename_with_path,
                                                             "temp.spv");
    }

end:
    #endif



    return result;
}
Beispiel #11
0
/** Initializes the wrapper. */
void Anvil::Instance::init(const std::vector<std::string>& in_disallowed_instance_level_extensions)
{
    VkApplicationInfo           app_info;
    VkInstanceCreateInfo        create_info;
    std::vector<const char*>    enabled_layers;
    std::map<std::string, bool> extension_enabled_status;
    size_t                      n_instance_layers        = 0;
    VkResult                    result                   = VK_ERROR_INITIALIZATION_FAILED;

    ANVIL_REDUNDANT_VARIABLE(result);

    /* Enumerate available layers */
    enumerate_instance_layers();

    /* Determine what extensions we need to request at instance creation time */
    static const char* desired_extensions_with_validation[] =
    {
        VK_KHR_SURFACE_EXTENSION_NAME,

        #ifdef _WIN32
            #if defined(ANVIL_INCLUDE_WIN3264_WINDOW_SYSTEM_SUPPORT)
                VK_KHR_WIN32_SURFACE_EXTENSION_NAME,
            #endif
        #else
            #if defined(ANVIL_INCLUDE_XCB_WINDOW_SYSTEM_SUPPORT)
                VK_KHR_XCB_SURFACE_EXTENSION_NAME,
            #endif
        #endif

        VK_EXT_DEBUG_REPORT_EXTENSION_NAME
    };
    static const char* desired_extensions_without_validation[] =
    {
        VK_KHR_SURFACE_EXTENSION_NAME,

        #ifdef _WIN32
            #if defined(ANVIL_INCLUDE_WIN3264_WINDOW_SYSTEM_SUPPORT)
                VK_KHR_WIN32_SURFACE_EXTENSION_NAME,
            #endif
        #else
            #if defined(ANVIL_INCLUDE_XCB_WINDOW_SYSTEM_SUPPORT)
                VK_KHR_XCB_SURFACE_EXTENSION_NAME,
            #endif
        #endif
    };

    /* Set up the app info descriptor **/
    app_info.apiVersion         = VK_MAKE_VERSION(1, 0, 0);
    app_info.applicationVersion = 0;
    app_info.engineVersion      = 0;
    app_info.pApplicationName   = m_app_name.c_str();
    app_info.pEngineName        = m_engine_name.c_str();
    app_info.pNext              = nullptr;
    app_info.sType              = VK_STRUCTURE_TYPE_APPLICATION_INFO;

    /* Set up the create info descriptor */
    memset(&create_info,
           0,
           sizeof(create_info) );

    n_instance_layers = static_cast<uint32_t>(m_supported_layers.size() );

    for (size_t  n_instance_layer = 0;
                 n_instance_layer < n_instance_layers;
               ++n_instance_layer)
    {
        const std::string& layer_description = m_supported_layers[n_instance_layer].description;
        const std::string& layer_name        = m_supported_layers[n_instance_layer].name;

        /* If validation is enabled and this is a layer which issues debug call-backs, cache it, so that
         * we can request for it at vkCreateInstance() call time */
        if (m_validation_callback_function       != nullptr          &&
            layer_description.find("Validation") != std::string::npos)
        {
            enabled_layers.push_back(layer_name.c_str() );
        }
    }

    {
        if (m_validation_callback_function != nullptr)
        {
            for (uint32_t n_extension = 0;
                          n_extension < sizeof(desired_extensions_with_validation) / sizeof(desired_extensions_with_validation[0]);
                        ++n_extension)
            {
                if (is_instance_extension_supported(desired_extensions_with_validation[n_extension]))
                {
                    extension_enabled_status[desired_extensions_with_validation[n_extension] ] = true;
                }
            }
        }
        else
        {
            for (uint32_t n_extension = 0;
                          n_extension < sizeof(desired_extensions_without_validation) / sizeof(desired_extensions_without_validation[0]);
                        ++n_extension)
            {
                if (is_instance_extension_supported(desired_extensions_without_validation[n_extension]))
                {
                    extension_enabled_status[desired_extensions_without_validation[n_extension] ] = true;
                }
            }
        }

        /* Enable known instance-level extensions by default */
        if (is_instance_extension_supported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME) )
        {
            extension_enabled_status[VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME] = true;
        }

        if (is_instance_extension_supported(VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME) )
        {
            extension_enabled_status[VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME] = true;
        }

        if (is_instance_extension_supported(VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME) )
        {
            extension_enabled_status[VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME] = true;
        }

        if (is_instance_extension_supported(VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME) )
        {
            extension_enabled_status[VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME] = true;
        }

        /* Filter out undesired extensions */
        for (const auto& current_extension_name : in_disallowed_instance_level_extensions)
        {
            auto ext_iterator = extension_enabled_status.find(current_extension_name);

            if (ext_iterator != extension_enabled_status.end() )
            {
                extension_enabled_status.erase(ext_iterator);
            }
        }

        m_enabled_extensions_info_ptr = Anvil::ExtensionInfo<bool>::create_instance_extension_info(extension_enabled_status,
                                                                                                   false); /* in_unspecified_extension_name_value */
    }

    /* We're ready to create a new Vulkan instance */
    std::vector<const char*> enabled_extensions_raw;

    for (auto& ext_name : extension_enabled_status)
    {
        enabled_extensions_raw.push_back(ext_name.first.c_str() );
    }

    create_info.enabledExtensionCount   = static_cast<uint32_t>(enabled_extensions_raw.size() );
    create_info.enabledLayerCount       = static_cast<uint32_t>(enabled_layers.size() );
    create_info.flags                   = 0;
    create_info.pApplicationInfo        = &app_info;
    create_info.pNext                   = nullptr;
    create_info.ppEnabledExtensionNames = (enabled_extensions_raw.size() > 0) ? &enabled_extensions_raw[0] : nullptr;
    create_info.ppEnabledLayerNames     = (enabled_layers.size()         > 0) ? &enabled_layers        [0] : nullptr;
    create_info.sType                   = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;

    result = vkCreateInstance(&create_info,
                              nullptr, /* pAllocator */
                              &m_instance);

    anvil_assert_vk_call_succeeded(result);

    /* Continue initializing */
    init_func_pointers();

    if (m_validation_callback_function != nullptr)
    {
        init_debug_callbacks();
    }

    enumerate_physical_devices();
}