コード例 #1
0
/* Please see header for specification */
bool Anvil::BasePipelineCreateInfo::attach_push_constant_range(uint32_t           in_offset,
                                                               uint32_t           in_size,
                                                               VkShaderStageFlags in_stages)
{
    bool result = false;

    /* Retrieve pipeline's descriptor and add the specified push constant range */
    const auto new_descriptor = Anvil::PushConstantRange(in_offset,
                                                         in_size,
                                                         in_stages);

    if (std::find(m_push_constant_ranges.begin(),
                  m_push_constant_ranges.end(),
                  new_descriptor) != m_push_constant_ranges.end() )
    {
        anvil_assert_fail();

        goto end;
    }

    m_push_constant_ranges.push_back(new_descriptor);

    /* All done */
    result = true;

end:
    return result;
}
コード例 #2
0
ファイル: backend_vma.cpp プロジェクト: mp3butcher/Anvil
/** Creates a new VMA allocator instance.
 *
 *  @return true if successful, false otherwise.
 **/
bool Anvil::MemoryAllocatorBackends::VMA::VMAAllocator::init()
{
    VmaAllocatorCreateInfo create_info = {};
    VkResult               result        (VK_ERROR_DEVICE_LOST);

    switch (m_device_ptr->get_type() )
    {
        case Anvil::DEVICE_TYPE_SINGLE_GPU:
        {
            const Anvil::SGPUDevice* sgpu_device_ptr(dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr) );

            create_info.physicalDevice = sgpu_device_ptr->get_physical_device()->get_physical_device();
            break;
        }

        default:
        {
            anvil_assert_fail();
        }
    }

    create_info.device                      = m_device_ptr->get_device_vk();
    create_info.pAllocationCallbacks        = nullptr;
    create_info.preferredLargeHeapBlockSize = 0;

    result = vmaCreateAllocator(&create_info,
                                &m_allocator);

    anvil_assert_vk_call_succeeded(result);
    return is_vk_call_successful(result);
}
コード例 #3
0
ファイル: instance.cpp プロジェクト: mp3butcher/Anvil
/** Enumerates and caches all available physical devices. */
void Anvil::Instance::enumerate_physical_devices()
{
    std::vector<VkPhysicalDevice> devices;
    uint32_t                      n_physical_devices = 0;
    VkResult                      result             = VK_ERROR_INITIALIZATION_FAILED;

    ANVIL_REDUNDANT_VARIABLE(result);

    /* Retrieve physical device handles */
    result = vkEnumeratePhysicalDevices(m_instance,
                                       &n_physical_devices,
                                        nullptr); /* pPhysicalDevices */
    anvil_assert_vk_call_succeeded(result);

    if (n_physical_devices == 0)
    {
        fprintf(stderr,"No physical devices reported for the Vulkan instance");
        fflush (stderr);

        anvil_assert_fail();
    }

    devices.resize(n_physical_devices);

    result = vkEnumeratePhysicalDevices(m_instance,
                                       &n_physical_devices,
                                       &devices[0]);
    anvil_assert_vk_call_succeeded(result);

    /* Fill out internal physical device descriptors */
    for (unsigned int n_physical_device = 0;
                      n_physical_device < n_physical_devices;
                    ++n_physical_device)
    {
        std::unique_ptr<Anvil::PhysicalDevice> new_physical_device_ptr;

        new_physical_device_ptr = Anvil::PhysicalDevice::create(this,
                                      n_physical_device,
                                      devices[n_physical_device]);

        m_physical_devices.push_back(
            std::move(new_physical_device_ptr)
        );
    }
}
コード例 #4
0
/* Please see header for specification */
bool Anvil::GLSLShaderToSPIRVGenerator::add_pragma(std::string in_pragma_name,
                                                   std::string in_opt_value)
{
    bool result = false;

    if (m_pragmas.find(in_pragma_name) != m_pragmas.end() )
    {
        anvil_assert_fail();

        goto end;
    }

    m_pragmas[in_pragma_name] = in_opt_value;

    /* All done */
    result = true;
end:
    return result;
}
コード例 #5
0
/* Please see header for specification */
bool Anvil::GLSLShaderToSPIRVGenerator::add_definition_value_pair(std::string in_definition_name,
                                                                  std::string in_value)
{
    bool result = false;

    if (m_definition_values.find(in_definition_name) != m_definition_values.end() )
    {
        anvil_assert_fail();

        goto end;
    }

    m_definition_values[in_definition_name] = in_value;

    /* All done */
    result = true;
end:
    return result;
}
コード例 #6
0
/* Please see header for specification */
std::string Anvil::GLSLShaderToSPIRVGenerator::get_extension_behavior_glsl_code(const ExtensionBehavior& in_value) const
{
    std::string result;

    switch (in_value)
    {
        case EXTENSION_BEHAVIOR_DISABLE: result = "disable"; break;
        case EXTENSION_BEHAVIOR_ENABLE:  result = "enable";  break;
        case EXTENSION_BEHAVIOR_REQUIRE: result = "require"; break;
        case EXTENSION_BEHAVIOR_WARN:    result = "warn";    break;

        default:
        {
            anvil_assert_fail();
        }
    }

    return result;
}
コード例 #7
0
/* Please see header for specification */
bool Anvil::GLSLShaderToSPIRVGenerator::add_extension_behavior(std::string       in_extension_name,
                                                               ExtensionBehavior in_behavior)
{
    bool result = false;

    if (m_extension_behaviors.find(in_extension_name) != m_extension_behaviors.end() )
    {
        anvil_assert_fail();

        goto end;
    }

    m_extension_behaviors[in_extension_name] = in_behavior;

    /* All done */
    result = true;
end:
    return result;
}
コード例 #8
0
    /** Retrieves EShLanguage corresponding to m_shader_stage assigned to the GLSLShaderToSPIRVGenerator instance. **/
    EShLanguage Anvil::GLSLShaderToSPIRVGenerator::get_glslang_shader_stage() const
    {
        EShLanguage result = EShLangCount;

        switch (m_shader_stage)
        {
            case Anvil::ShaderStage::COMPUTE:                 result = EShLangCompute;        break;
            case Anvil::ShaderStage::FRAGMENT:                result = EShLangFragment;       break;
            case Anvil::ShaderStage::GEOMETRY:                result = EShLangGeometry;       break;
            case Anvil::ShaderStage::TESSELLATION_CONTROL:    result = EShLangTessControl;    break;
            case Anvil::ShaderStage::TESSELLATION_EVALUATION: result = EShLangTessEvaluation; break;
            case Anvil::ShaderStage::VERTEX:                  result = EShLangVertex;         break;

            default:
            {
                anvil_assert_fail();
            }
        }

        return result;
    }
コード例 #9
0
/* Please see header for specification */
bool Anvil::GLSLShaderToSPIRVGenerator::add_placeholder_value_pair(const std::string& in_placeholder_name,
                                                                   const std::string& in_value)
{
    bool result = false;

    for (const auto& placeholder_value_pair : m_placeholder_values)
    {
        if (placeholder_value_pair.first == in_placeholder_name)
        {
            anvil_assert_fail();

            goto end;
        }
    }

    m_placeholder_values.push_back(std::make_pair(in_placeholder_name, in_value));

    /* All done */
    result = true;
end:
    return result;
}
コード例 #10
0
ファイル: image_view.cpp プロジェクト: mp3butcher/Anvil
/* Please see header for specification */
void Anvil::ImageView::get_base_mipmap_size(uint32_t* out_opt_base_mipmap_width_ptr,
                                            uint32_t* out_opt_base_mipmap_height_ptr,
                                            uint32_t* out_opt_base_mipmap_depth_ptr) const
{
    const auto n_base_mip_level(m_create_info_ptr->get_base_mipmap_level() );
    const auto parent_image_ptr(m_create_info_ptr->get_parent_image     () );
    bool       result          (false);
    uint32_t   result_depth    (0);

    ANVIL_REDUNDANT_VARIABLE(result);

    result = parent_image_ptr->get_image_mipmap_size(n_base_mip_level,
                                                     out_opt_base_mipmap_width_ptr,
                                                     out_opt_base_mipmap_height_ptr,
                                                     nullptr);
    anvil_assert(result);

    switch (m_create_info_ptr->get_type() )
    {
        case VK_IMAGE_VIEW_TYPE_1D:         result_depth = 1;                                 break;
        case VK_IMAGE_VIEW_TYPE_1D_ARRAY:   result_depth = m_create_info_ptr->get_n_layers(); break;
        case VK_IMAGE_VIEW_TYPE_2D:         result_depth = 1;                                 break;
        case VK_IMAGE_VIEW_TYPE_2D_ARRAY:   result_depth = m_create_info_ptr->get_n_layers(); break;
        case VK_IMAGE_VIEW_TYPE_3D:         result_depth = m_create_info_ptr->get_n_slices(); break;
        case VK_IMAGE_VIEW_TYPE_CUBE:       result_depth = m_create_info_ptr->get_n_layers(); break;
        case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: result_depth = m_create_info_ptr->get_n_layers(); break;

        default:
        {
            anvil_assert_fail();
        }
    }

    if (out_opt_base_mipmap_depth_ptr != nullptr)
    {
        *out_opt_base_mipmap_depth_ptr = result_depth;
    }
}
コード例 #11
0
/* Please see header for specification */
bool Anvil::RenderingSurface::init()
{
    const Anvil::DeviceType& device_type       (m_device_ptr->get_type() );
    bool                     init_successful   (false);
    auto                     instance_ptr      (m_create_info_ptr->get_instance_ptr() );
    uint32_t                 n_physical_devices(0);
    VkResult                 result            (VK_SUCCESS);
    const WindowPlatform     window_platform   (m_create_info_ptr->get_window_ptr()->get_platform());

    const bool               is_dummy_window_platform(window_platform == WINDOW_PLATFORM_DUMMY                     ||
                                                      window_platform == WINDOW_PLATFORM_DUMMY_WITH_PNG_SNAPSHOTS);


    switch (device_type)
    {
        case Anvil::DeviceType::MULTI_GPU:
        {
            const Anvil::MGPUDevice* mgpu_device_ptr(dynamic_cast<const Anvil::MGPUDevice*>(m_device_ptr) );

            n_physical_devices = mgpu_device_ptr->get_n_physical_devices();

            break;
        }

        case Anvil::DeviceType::SINGLE_GPU:
        {
            n_physical_devices = 1;

            break;
        }

        default:
        {
            anvil_assert_fail();

            goto end;
        }
    }


    if (!is_dummy_window_platform)
    {
        auto window_ptr = m_create_info_ptr->get_window_ptr();

        #if defined(ANVIL_INCLUDE_WIN3264_WINDOW_SYSTEM_SUPPORT) && defined(_WIN32)
        {
            VkWin32SurfaceCreateInfoKHR surface_create_info;

            surface_create_info.flags     = 0;
            surface_create_info.hinstance = GetModuleHandle(nullptr);
            surface_create_info.hwnd      = window_ptr->get_handle();
            surface_create_info.pNext     = nullptr;
            surface_create_info.sType     = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR;

            result = instance_ptr->get_extension_khr_win32_surface_entrypoints().vkCreateWin32SurfaceKHR(instance_ptr->get_instance_vk(),
                                                                                                        &surface_create_info,
                                                                                                         nullptr, /* pAllocator */
                                                                                                        &m_surface);
        }
        #endif
        #if defined(ANVIL_INCLUDE_XCB_WINDOW_SYSTEM_SUPPORT) && !defined(_WIN32)
        {
            VkXcbSurfaceCreateInfoKHR surface_create_info;

            surface_create_info.flags       = 0;
            surface_create_info.window      = window_ptr->get_handle();
            surface_create_info.connection  = static_cast<xcb_connection_t*>(window_ptr->get_connection());
            surface_create_info.pNext       = nullptr;
            surface_create_info.sType       = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR;

            result = instance_ptr->get_extension_khr_xcb_surface_entrypoints().vkCreateXcbSurfaceKHR(instance_ptr->get_instance_vk(),
                                                                                                    &surface_create_info,
                                                                                                     nullptr, /* pAllocator */
                                                                                                    &m_surface);
            }
        #endif

        anvil_assert_vk_call_succeeded(result);
        if (is_vk_call_successful(result) )
        {
            set_vk_handle(m_surface);
        }
    }
    else
    {
        anvil_assert(window_platform != WINDOW_PLATFORM_UNKNOWN);
    }

    if (is_dummy_window_platform == false)
    {
        /* Is there at least one queue fam that can be used together with at least one physical device associated with
         * the logical device to present using the surface we've just spawned and the physical device user has specified? */
        const auto& queue_families(m_device_ptr->get_physical_device_queue_families() );

        for (uint32_t n_physical_device = 0;
                      n_physical_device < n_physical_devices;
                    ++n_physical_device)
        {
            Anvil::RenderingSurface::PhysicalDeviceCapabilities* physical_device_caps_ptr = nullptr;
            const Anvil::PhysicalDevice*                         physical_device_ptr      = nullptr;

            switch (device_type)
            {
                case Anvil::DeviceType::MULTI_GPU:
                {
                    const Anvil::MGPUDevice* mgpu_device_ptr(dynamic_cast<const Anvil::MGPUDevice*>(m_device_ptr) );

                    physical_device_ptr      = mgpu_device_ptr->get_physical_device(n_physical_device);
                    physical_device_caps_ptr = &m_physical_device_capabilities[physical_device_ptr->get_device_group_device_index()];

                    break;
                }

                case Anvil::DeviceType::SINGLE_GPU:
                {
                    const Anvil::SGPUDevice* sgpu_device_ptr(dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr) );

                    physical_device_ptr      = sgpu_device_ptr->get_physical_device();
                    physical_device_caps_ptr = &m_physical_device_capabilities[physical_device_ptr->get_device_group_device_index()];

                    break;
                }

                default:
                {
                    anvil_assert_fail();

                    goto end;
                }
            }

            for (uint32_t n_queue_family = 0;
                          n_queue_family < static_cast<uint32_t>(queue_families.size() );
                        ++n_queue_family)
            {
                VkBool32 is_presentation_supported = VK_FALSE;

                {
                    const auto& khr_surface_entrypoints = instance_ptr->get_extension_khr_surface_entrypoints();

                    result = khr_surface_entrypoints.vkGetPhysicalDeviceSurfaceSupportKHR(physical_device_ptr->get_physical_device(),
                                                                                          n_queue_family,
                                                                                          m_surface,
                                                                                         &is_presentation_supported);
                }

                if (is_vk_call_successful(result)         &&
                    is_presentation_supported == VK_TRUE)
                {
                    physical_device_caps_ptr->present_capable_queue_fams.push_back(n_queue_family);
                }
            }
        }
    }
    else
    {
        /* offscreen rendering. Any physical device that offers universal queue can be used to "present" */
        for (uint32_t n_physical_device = 0;
                      n_physical_device < n_physical_devices;
                    ++n_physical_device)
        {
            switch (device_type)
            {
                case Anvil::DeviceType::MULTI_GPU:
                {
                    const Anvil::MGPUDevice* mgpu_device_ptr(dynamic_cast<const Anvil::MGPUDevice*>(m_device_ptr) );

                    if (mgpu_device_ptr->get_n_universal_queues() > 0)
                    {
                        const Anvil::PhysicalDevice* physical_device_ptr = mgpu_device_ptr->get_physical_device(n_physical_device);
                        auto&                        result_caps         = m_physical_device_capabilities[physical_device_ptr->get_device_group_device_index()];

                        result_caps.present_capable_queue_fams.push_back(mgpu_device_ptr->get_universal_queue(0)->get_queue_family_index() );
                    }

                    break;
                }

                case Anvil::DeviceType::SINGLE_GPU:
                {
                    const Anvil::SGPUDevice* sgpu_device_ptr(dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr) );

                    if (sgpu_device_ptr->get_n_universal_queues() > 0)
                    {
                        const Anvil::PhysicalDevice* physical_device_ptr = sgpu_device_ptr->get_physical_device();
                        auto&                        result_caps         = m_physical_device_capabilities[physical_device_ptr->get_device_group_device_index()];

                        result_caps.present_capable_queue_fams.push_back(sgpu_device_ptr->get_universal_queue(0)->get_queue_family_index() );
                    }

                    break;
                }

                default:
                {
                    anvil_assert_fail();

                    goto end;
                }
            }
        }

        result = VK_SUCCESS;
    }

    if (!is_vk_call_successful(result) )
    {
        anvil_assert_vk_call_succeeded(result);

        init_successful = false;
    }
    else
    {
        /* Retrieve Vulkan object capabilities and cache them */
        cache_surface_properties();

        init_successful = true;
    }

end:
    return init_successful;
}
コード例 #12
0
/* Please see header for specification */
Anvil::PipelineLayout* Anvil::BasePipelineManager::get_pipeline_layout(PipelineID in_pipeline_id)
{
    std::unique_lock<std::recursive_mutex> mutex_lock;
    auto                                   mutex_ptr         = get_mutex();
    Pipelines::iterator                    pipeline_iterator;
    Pipeline*                              pipeline_ptr      = nullptr;
    Anvil::PipelineLayout*                 result_ptr        = nullptr;

    if (mutex_ptr != nullptr)
    {
        mutex_lock = std::move(
            std::unique_lock<std::recursive_mutex>(*mutex_ptr)
        );
    }

    pipeline_iterator = m_baked_pipelines.find(in_pipeline_id);

    if (pipeline_iterator == m_baked_pipelines.end() )
    {
        pipeline_iterator = m_outstanding_pipelines.find(in_pipeline_id);

        if (pipeline_iterator == m_outstanding_pipelines.end() )
        {
            anvil_assert(!(pipeline_iterator == m_outstanding_pipelines.end() ));

            goto end;
        }
    }

    pipeline_ptr = pipeline_iterator->second.get();

    if (pipeline_ptr->pipeline_create_info_ptr->is_proxy() )
    {
        anvil_assert(!pipeline_ptr->pipeline_create_info_ptr->is_proxy() );

        goto end;
    }

    if (pipeline_ptr->layout_ptr == nullptr)
    {
        if (!m_pipeline_layout_manager_ptr->get_layout(pipeline_ptr->pipeline_create_info_ptr->get_ds_create_info_items(),
                                                       pipeline_ptr->pipeline_create_info_ptr->get_push_constant_ranges(),
                                                      &pipeline_ptr->layout_ptr) )
        {
            anvil_assert_fail();

            goto end;
        }

        if (pipeline_ptr->layout_ptr == nullptr)
        {
            anvil_assert(!(pipeline_ptr->layout_ptr == nullptr) );

            goto end;
        }
    }

    result_ptr = pipeline_ptr->layout_ptr.get();

end:
    return result_ptr;
}
コード例 #13
0
ファイル: swapchain.cpp プロジェクト: mp3butcher/Anvil
/** Initializes the swapchain object. */
bool Anvil::Swapchain::init()
{
    uint32_t                                              n_swapchain_images             = 0;
    auto                                                  parent_surface_ptr             = m_create_info_ptr->get_rendering_surface();
    VkResult                                              result                         = VK_ERROR_INITIALIZATION_FAILED;
    Anvil::StructChainUniquePtr<VkSwapchainCreateInfoKHR> struct_chain_ptr;
    std::vector<VkImage>                                  swapchain_images;
    const VkSurfaceTransformFlagBitsKHR                   swapchain_transformation       = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
    const WindowPlatform                                  window_platform                = m_create_info_ptr->get_window()->get_platform();
    const bool                                            is_offscreen_rendering_enabled = (window_platform   == WINDOW_PLATFORM_DUMMY                     ||
                                                                                            window_platform   == WINDOW_PLATFORM_DUMMY_WITH_PNG_SNAPSHOTS);

    m_size.width  = parent_surface_ptr->get_width ();
    m_size.height = parent_surface_ptr->get_height();

    /* not doing offscreen rendering */
    if (!is_offscreen_rendering_enabled)
    {
        const auto&                                    khr_swapchain_entrypoints = m_device_ptr->get_extension_khr_swapchain_entrypoints();
        Anvil::StructChainer<VkSwapchainCreateInfoKHR> struct_chainer;

        #ifdef _DEBUG
        {
            const Anvil::SGPUDevice* sgpu_device_ptr(dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr) );

            const Anvil::DeviceType    device_type                     = m_device_ptr->get_type();
            uint32_t                   n_physical_devices              = 0;
            bool                       result_bool                     = false;
            const char*                required_surface_extension_name = nullptr;
            VkSurfaceCapabilitiesKHR   surface_caps;
            VkCompositeAlphaFlagsKHR   supported_composite_alpha_flags = static_cast<VkCompositeAlphaFlagsKHR>(0);
            VkSurfaceTransformFlagsKHR supported_surface_transform_flags;

            #ifdef _WIN32
                #if defined(ANVIL_INCLUDE_WIN3264_WINDOW_SYSTEM_SUPPORT)
                    required_surface_extension_name = VK_KHR_WIN32_SURFACE_EXTENSION_NAME;
                #endif
            #else
                #if defined(ANVIL_INCLUDE_XCB_WINDOW_SYSTEM_SUPPORT)
                    required_surface_extension_name = VK_KHR_XCB_SURFACE_EXTENSION_NAME;
                #endif
            #endif

            anvil_assert(required_surface_extension_name == nullptr                                                            ||
                         m_device_ptr->get_parent_instance()->is_instance_extension_supported(required_surface_extension_name) );

            switch (device_type)
            {
                case Anvil::DEVICE_TYPE_SINGLE_GPU: n_physical_devices = 1; break;

                default:
                {
                    anvil_assert_fail();
                }
            }

            for (uint32_t n_physical_device = 0;
                          n_physical_device < n_physical_devices;
                        ++n_physical_device)
            {
                const Anvil::PhysicalDevice* current_physical_device_ptr = nullptr;

                switch (device_type)
                {
                    case Anvil::DEVICE_TYPE_SINGLE_GPU: current_physical_device_ptr = sgpu_device_ptr->get_physical_device(); break;

                    default:
                    {
                        anvil_assert_fail();
                    }
                }

                /* Ensure opaque composite alpha mode is supported */
                anvil_assert(parent_surface_ptr->get_supported_composite_alpha_flags(&supported_composite_alpha_flags) );

                anvil_assert(supported_composite_alpha_flags & VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR);

                /* Ensure we can use the swapchain image format  */
                anvil_assert(parent_surface_ptr->is_compatible_with_image_format(m_create_info_ptr->get_format(),
                                                                                &result_bool) );
                anvil_assert(result_bool);

                /* Ensure the transformation we're about to request is supported by the rendering surface */
                anvil_assert(parent_surface_ptr->get_supported_transformations(&supported_surface_transform_flags) );

                anvil_assert(supported_surface_transform_flags & swapchain_transformation);

                /* Ensure the requested number of swapchain images is reasonable*/
                anvil_assert(parent_surface_ptr->get_capabilities(&surface_caps) );

                anvil_assert(surface_caps.maxImageCount == 0                                 ||
                             surface_caps.maxImageCount >= m_create_info_ptr->get_n_images() );
            }
        }
        #endif

        {
            VkSwapchainCreateInfoKHR create_info;

            create_info.clipped               = true; /* we won't be reading from the presentable images */
            create_info.compositeAlpha        = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
            create_info.flags                 = m_create_info_ptr->get_flags();
            create_info.imageArrayLayers      = 1;
            create_info.imageColorSpace       = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
            create_info.imageExtent.height    = parent_surface_ptr->get_height();
            create_info.imageExtent.width     = parent_surface_ptr->get_width ();
            create_info.imageFormat           = m_create_info_ptr->get_format ();
            create_info.imageSharingMode      = VK_SHARING_MODE_EXCLUSIVE;
            create_info.imageUsage            = m_create_info_ptr->get_usage_flags();
            create_info.minImageCount         = m_create_info_ptr->get_n_images   ();
            create_info.oldSwapchain          = VK_NULL_HANDLE;
            create_info.pNext                 = nullptr;
            create_info.pQueueFamilyIndices   = nullptr;
            create_info.presentMode           = m_create_info_ptr->get_present_mode();
            create_info.preTransform          = swapchain_transformation;
            create_info.queueFamilyIndexCount = 0;
            create_info.sType                 = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
            create_info.surface               = parent_surface_ptr->get_surface();

            struct_chainer.append_struct(create_info);
        }

        struct_chain_ptr = struct_chainer.create_chain();

        parent_surface_ptr->lock();
        {
            result = khr_swapchain_entrypoints.vkCreateSwapchainKHR(m_device_ptr->get_device_vk(),
                                                                    struct_chain_ptr->get_root_struct(),
                                                                    nullptr, /* pAllocator */
                                                                   &m_swapchain);
        }
        parent_surface_ptr->unlock();

        anvil_assert_vk_call_succeeded(result);
        if (is_vk_call_successful(result) )
        {
            set_vk_handle(m_swapchain);
        }

        /* Retrieve swap-chain images */
        result = khr_swapchain_entrypoints.vkGetSwapchainImagesKHR(m_device_ptr->get_device_vk(),
                                                                   m_swapchain,
                                                                  &n_swapchain_images,
                                                                   nullptr); /* pSwapchainImages */

        anvil_assert_vk_call_succeeded(result);
        anvil_assert                  (n_swapchain_images >  0);

        swapchain_images.resize(n_swapchain_images);

        result = khr_swapchain_entrypoints.vkGetSwapchainImagesKHR(m_device_ptr->get_device_vk(),
                                                                   m_swapchain,
                                                                  &n_swapchain_images,
                                                                  &swapchain_images[0]);

        anvil_assert_vk_call_succeeded(result);
    }
    else /* offscreen rendering */
    {
        m_create_info_ptr->set_usage_flags(m_create_info_ptr->get_usage_flags() | VK_IMAGE_USAGE_TRANSFER_SRC_BIT);

        n_swapchain_images = m_create_info_ptr->get_n_images();
    }

    for (uint32_t n_result_image = 0;
                  n_result_image < n_swapchain_images;
                ++n_result_image)
    {
        /* Spawn an Image wrapper class for the swap-chain image. */
        if (!is_offscreen_rendering_enabled)
        {
            auto create_info_ptr = Anvil::ImageCreateInfo::create_swapchain_wrapper(m_device_ptr,
                                                                                    this,
                                                                                    swapchain_images[n_result_image],
                                                                                    n_result_image);

            create_info_ptr->set_mt_safety(Anvil::Utils::convert_boolean_to_mt_safety_enum(is_mt_safe() ) );

            m_image_ptrs[n_result_image] = Anvil::Image::create(std::move(create_info_ptr) );
        }
        else
        {
            auto create_info_ptr = Anvil::ImageCreateInfo::create_nonsparse_alloc(m_device_ptr,
                                                                                  VK_IMAGE_TYPE_2D,
                                                                                  m_create_info_ptr->get_format(),
                                                                                  VK_IMAGE_TILING_OPTIMAL,
                                                                                  m_create_info_ptr->get_usage_flags(),
                                                                                  m_size.width,
                                                                                  m_size.height,
                                                                                  1, /* base_mipmap_depth */
                                                                                  1,
                                                                                  VK_SAMPLE_COUNT_1_BIT,
                                                                                  QUEUE_FAMILY_GRAPHICS_BIT,
                                                                                  VK_SHARING_MODE_EXCLUSIVE,
                                                                                  false, /* in_use_full_mipmap_chain */
                                                                                  0,     /* in_memory_features       */
                                                                                  0,     /* in_create_flags          */
                                                                                  VK_IMAGE_LAYOUT_GENERAL,
                                                                                  nullptr);

            create_info_ptr->set_mt_safety(Anvil::Utils::convert_boolean_to_mt_safety_enum(is_mt_safe() ) );

            m_image_ptrs[n_result_image] = Anvil::Image::create(std::move(create_info_ptr) );
        }

        /* For each swap-chain image, create a relevant view */
        {
            auto create_info_ptr = Anvil::ImageViewCreateInfo::create_2D(m_device_ptr,
                                                                         m_image_ptrs[n_result_image].get(),
                                                                         0, /* n_base_layer */
                                                                         0, /* n_base_mipmap_level */
                                                                         1, /* n_mipmaps           */
                                                                         VK_IMAGE_ASPECT_COLOR_BIT,
                                                                         m_create_info_ptr->get_format(),
                                                                         VK_COMPONENT_SWIZZLE_R,
                                                                         VK_COMPONENT_SWIZZLE_G,
                                                                         VK_COMPONENT_SWIZZLE_B,
                                                                         VK_COMPONENT_SWIZZLE_A);

            create_info_ptr->set_mt_safety(Anvil::Utils::convert_boolean_to_mt_safety_enum(is_mt_safe() ) );

            m_image_view_ptrs[n_result_image] = Anvil::ImageView::create(std::move(create_info_ptr) );
        }

        result = VK_SUCCESS;
    }

    /* Sign up for present submission notifications. This is needed to ensure that number of presented frames ==
     * number of acquired frames at destruction time.
     */
    {
        std::vector<Anvil::Queue*> queues;

        switch (m_device_ptr->get_type() )
        {
            case Anvil::DEVICE_TYPE_SINGLE_GPU:
            {
                const std::vector<uint32_t>* queue_fams_with_present_support_ptr(nullptr);
                const auto                   rendering_surface_ptr              (m_create_info_ptr->get_rendering_surface() );
                const Anvil::SGPUDevice*     sgpu_device_ptr                    (dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr) );

                if (!rendering_surface_ptr->get_queue_families_with_present_support(&queue_fams_with_present_support_ptr) )
                {
                    break;
                }

                if (queue_fams_with_present_support_ptr == nullptr)
                {
                    anvil_assert(queue_fams_with_present_support_ptr != nullptr);
                }
                else
                {
                    for (const auto queue_fam : *queue_fams_with_present_support_ptr)
                    {
                        const uint32_t n_queues = sgpu_device_ptr->get_n_queues(queue_fam);

                        for (uint32_t n_queue = 0;
                                      n_queue < n_queues;
                                    ++n_queue)
                        {
                            auto queue_ptr = sgpu_device_ptr->get_queue_for_queue_family_index(queue_fam,
                                                                                               n_queue);

                            anvil_assert(queue_ptr != nullptr);

                            if (std::find(queues.begin(),
                                          queues.end(),
                                          queue_ptr) == queues.end() )
                            {
                                queues.push_back(queue_ptr);
                            }
                        }
                    }
                }

                break;
            }
        }

        for (auto queue_ptr : queues)
        {
            queue_ptr->register_for_callbacks(
                QUEUE_CALLBACK_ID_PRESENT_REQUEST_ISSUED,
                std::bind(&Swapchain::on_present_request_issued,
                          this,
                          std::placeholders::_1),
                this
            );

            m_observed_queues.push_back(queue_ptr);
        }
    }

    /* Sign up for "about to close the parent window" notifications. Swapchain instance SHOULD be deinitialized
     * before the window is destroyed, so we're going to act as nice citizens.
     */
    m_create_info_ptr->get_window()->register_for_callbacks(
        WINDOW_CALLBACK_ID_ABOUT_TO_CLOSE,
        std::bind(&Swapchain::on_parent_window_about_to_close,
                  this),
        this
    );

    return is_vk_call_successful(result);
}
コード例 #14
0
/** Creates a new VMA allocator instance.
 *
 *  @return true if successful, false otherwise.
 **/
bool Anvil::MemoryAllocatorBackends::VMA::VMAAllocator::init()
{
    VmaAllocatorCreateInfo create_info                        = {};
    const bool             khr_dedicated_allocation_supported = m_device_ptr->get_extension_info()->khr_dedicated_allocation();
    VkResult               result                             = VK_ERROR_DEVICE_LOST;

    /* Prepare VK func ptr array */
    m_vma_func_ptrs.reset(
        new VmaVulkanFunctions()
    );

    if (m_vma_func_ptrs == nullptr)
    {
        anvil_assert(m_vma_func_ptrs != nullptr);

        goto end;
    }

    m_vma_func_ptrs->vkAllocateMemory                    = Vulkan::vkAllocateMemory;
    m_vma_func_ptrs->vkBindBufferMemory                  = Vulkan::vkBindBufferMemory;
    m_vma_func_ptrs->vkBindImageMemory                   = Vulkan::vkBindImageMemory;
    m_vma_func_ptrs->vkCreateBuffer                      = Vulkan::vkCreateBuffer;
    m_vma_func_ptrs->vkCreateImage                       = Vulkan::vkCreateImage;
    m_vma_func_ptrs->vkDestroyBuffer                     = Vulkan::vkDestroyBuffer;
    m_vma_func_ptrs->vkDestroyImage                      = Vulkan::vkDestroyImage;
    m_vma_func_ptrs->vkFreeMemory                        = Vulkan::vkFreeMemory;
    m_vma_func_ptrs->vkGetBufferMemoryRequirements       = Vulkan::vkGetBufferMemoryRequirements;
    m_vma_func_ptrs->vkGetImageMemoryRequirements        = Vulkan::vkGetImageMemoryRequirements;
    m_vma_func_ptrs->vkGetPhysicalDeviceMemoryProperties = Vulkan::vkGetPhysicalDeviceMemoryProperties;
    m_vma_func_ptrs->vkGetPhysicalDeviceProperties       = Vulkan::vkGetPhysicalDeviceProperties;
    m_vma_func_ptrs->vkMapMemory                         = Vulkan::vkMapMemory;
    m_vma_func_ptrs->vkUnmapMemory                       = Vulkan::vkUnmapMemory;

    if (m_device_ptr->get_extension_info()->khr_get_memory_requirements2() )
    {
        m_vma_func_ptrs->vkGetBufferMemoryRequirements2KHR = m_device_ptr->get_extension_khr_get_memory_requirements2_entrypoints().vkGetBufferMemoryRequirements2KHR;
        m_vma_func_ptrs->vkGetImageMemoryRequirements2KHR  = m_device_ptr->get_extension_khr_get_memory_requirements2_entrypoints().vkGetImageMemoryRequirements2KHR;
    }
    else
    {
        m_vma_func_ptrs->vkGetBufferMemoryRequirements2KHR = nullptr;
        m_vma_func_ptrs->vkGetImageMemoryRequirements2KHR  = nullptr;
    }

    /* Prepare VMA create info struct */
    switch (m_device_ptr->get_type() )
    {
        case Anvil::DeviceType::MULTI_GPU:
        {
            /* VMA library takes a physical device handle to extract info regarding supported
             * memory types and the like. As VK_KHR_device_group provide explicit mGPU support,
             * it is guaranteed all physical devices within a logical device offer exactly the
             * same capabilities. This means we're safe to pass zeroth physical device to the
             * library, and everything will still be OK.
             */
            const Anvil::MGPUDevice* mgpu_device_ptr(dynamic_cast<const Anvil::MGPUDevice*>(m_device_ptr) );

            create_info.physicalDevice = mgpu_device_ptr->get_physical_device(0)->get_physical_device();
            break;
        }

        case Anvil::DeviceType::SINGLE_GPU:
        {
            const Anvil::SGPUDevice* sgpu_device_ptr(dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr) );

            create_info.physicalDevice = sgpu_device_ptr->get_physical_device()->get_physical_device();
            break;
        }

        default:
        {
            anvil_assert_fail();
        }
    }

    create_info.flags                       = (khr_dedicated_allocation_supported) ? VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT : 0;
    create_info.device                      = m_device_ptr->get_device_vk();
    create_info.pAllocationCallbacks        = nullptr;
    create_info.preferredLargeHeapBlockSize = 0;
    create_info.pVulkanFunctions            = m_vma_func_ptrs.get();

    result = vmaCreateAllocator(&create_info,
                                &m_allocator);

    anvil_assert_vk_call_succeeded(result);
end:
    return is_vk_call_successful(result);
}
コード例 #15
0
/* Please see header for specification */
bool Anvil::GLSLShaderToSPIRVGenerator::bake_glsl_source_code() const
{
    std::string    final_glsl_source_string;
    const uint32_t n_definition_values        = static_cast<uint32_t>(m_definition_values.size() );
    const uint32_t n_extension_behaviors      = static_cast<uint32_t>(m_extension_behaviors.size() );
    const uint32_t n_placeholder_values       = static_cast<uint32_t>(m_placeholder_values.size());
    const uint32_t n_pragmas                  = static_cast<uint32_t>(m_pragmas.size() );
    bool           result                     = false;

    anvil_assert(m_glsl_source_code_dirty);

    switch (m_mode)
    {
        case MODE_LOAD_SOURCE_FROM_FILE:
        {
            char* glsl_source = nullptr;

            Anvil::IO::read_file(m_data,
                                 true, /* is_text_file */
                                &glsl_source,
                                 nullptr);  /* out_opt_size_ptr */

            if (glsl_source == nullptr)
            {
                anvil_assert(glsl_source != nullptr);

                goto end;
            }

            final_glsl_source_string = std::string(glsl_source);

            delete [] glsl_source;
            break;
        }

        case MODE_USE_SPECIFIED_SOURCE:
        {
            final_glsl_source_string = m_data;

            break;
        }

        default:
        {
            /* Unrecognized mode specified for a GLSLShaderToSPIRVGenerator instance. */
            anvil_assert_fail();

            goto end;
        }
    }

    if (n_pragmas             > 0 ||
        n_placeholder_values  > 0 ||
        n_extension_behaviors > 0 ||
        n_definition_values   > 0)
    {
        size_t glsl_source_string_second_line_index;

        /* Inject extension behavior definitions, starting from the second line. According to the spec, first line in
         * a GLSL shader must define the ESSL/GLSL version, and glslangvalidator seems to be pretty
         * strict about this. */
        glsl_source_string_second_line_index = final_glsl_source_string.find_first_of('\n') + 1;

        for (auto map_iterator  = m_extension_behaviors.begin();
                  map_iterator != m_extension_behaviors.end();
                ++map_iterator)
        {
            const ExtensionBehavior& current_extension_behavior      = map_iterator->second;
            std::string              current_extension_behavior_glsl = get_extension_behavior_glsl_code(current_extension_behavior);
            std::string              current_extension_name          = map_iterator->first;
            std::string              new_line                        = std::string("#extension ")      +
                                                                       current_extension_name          +
                                                                       std::string(" : ")              +
                                                                       current_extension_behavior_glsl +
                                                                       "\n";

            final_glsl_source_string.insert(glsl_source_string_second_line_index,
                                            new_line);

            glsl_source_string_second_line_index += new_line.length();
        }

        /* Follow with #defines which associate values with definition names */
        for (auto map_iterator  = m_definition_values.begin();
                  map_iterator != m_definition_values.end();
                ++map_iterator)
        {
            std::string current_key   = map_iterator->first;
            std::string current_value = map_iterator->second;
            std::string new_line      = std::string("#define ") + current_key + std::string(" ") + current_value + "\n";

            final_glsl_source_string.insert(glsl_source_string_second_line_index,
                                            new_line);
        }

        /* Next define pragmas */
        for (auto& current_pragma : m_pragmas)
        {
            std::string pragma_name  = current_pragma.first;
            std::string pragma_value = current_pragma.second;
            std::string new_line     = std::string("#pragma ") + pragma_name + std::string(" ") + pragma_value + "\n";

            final_glsl_source_string.insert(glsl_source_string_second_line_index,
                                            new_line);
        }

        /* Finish with replacing placeholders with values */
        for(auto vec_iterator  = m_placeholder_values.begin();
                 vec_iterator != m_placeholder_values.end();
               ++vec_iterator)
        {
            const std::string& current_key   = vec_iterator->first;
            const std::string& current_value = vec_iterator->second;
            size_t glsl_source_string_pos    = final_glsl_source_string.find(current_key, 0);

            while (glsl_source_string_pos != std::string::npos)
            {
                final_glsl_source_string.replace(glsl_source_string_pos, current_key.size(), current_value);

                glsl_source_string_pos = final_glsl_source_string.find(current_key, glsl_source_string_pos);
            }
        }
    }

    /* Cache the GLSL source code used for the conversion */
    m_glsl_source_code = final_glsl_source_string;

    /* All done */
    m_glsl_source_code_dirty = false;
    result = true;

end:
    return result;
}
コード例 #16
0
void Anvil::RenderingSurface::update_surface_extents() const
{
    const Anvil::DeviceType& device_type                   (m_device_ptr->get_type                             () );
    auto                     instance_ptr                  (m_create_info_ptr->get_instance_ptr                () );
    auto                     khr_surface_entrypoints       (instance_ptr->get_extension_khr_surface_entrypoints() );
    const Anvil::MGPUDevice* mgpu_device_ptr               (dynamic_cast<const Anvil::MGPUDevice*>             (m_device_ptr));
    uint32_t                 n_physical_devices            (0);
    const Anvil::SGPUDevice* sgpu_device_ptr               (dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr));
    auto                     window_ptr                    (m_create_info_ptr->get_window_ptr     () );

    if (window_ptr != nullptr)
    {
        const WindowPlatform window_platform(window_ptr->get_platform() );

        if (window_platform == WINDOW_PLATFORM_DUMMY                     ||
            window_platform == WINDOW_PLATFORM_DUMMY_WITH_PNG_SNAPSHOTS)
        {
            /* Nothing to update - off-screen rendering is active. */
            goto end;
        }
        else
        {
            /* In this case, width & height may change at run-time */
        }
    }
    else
    {
        /* In this case, width & height may change at run-time */
    }

    switch (device_type)
    {
        case Anvil::DeviceType::MULTI_GPU:  n_physical_devices = mgpu_device_ptr->get_n_physical_devices(); break;
        case Anvil::DeviceType::SINGLE_GPU: n_physical_devices = 1;                                         break;

        default:
        {
            anvil_assert_fail();
        }
    }

    /* Retrieve general properties */
    for (uint32_t n_physical_device = 0;
                  n_physical_device < n_physical_devices;
                ++n_physical_device)
    {
        const Anvil::PhysicalDevice* physical_device_ptr = nullptr;
        VkResult                     result_vk;
        Anvil::SurfaceCapabilities   surface_caps;

        ANVIL_REDUNDANT_VARIABLE_CONST(result_vk);

        switch (device_type)
        {
            case Anvil::DeviceType::MULTI_GPU:  physical_device_ptr = mgpu_device_ptr->get_physical_device(n_physical_device); break;
            case Anvil::DeviceType::SINGLE_GPU: physical_device_ptr = sgpu_device_ptr->get_physical_device();                  break;

            default:
            {
                anvil_assert_fail();
            }
        }

        if (m_surface == VK_NULL_HANDLE)
        {
            /* Nothing to update */
            goto end;
        }

        const VkPhysicalDevice physical_device_vk = physical_device_ptr->get_physical_device();

        result_vk = khr_surface_entrypoints.vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physical_device_vk,
                                                                                      m_surface,
                                                                                      reinterpret_cast<VkSurfaceCapabilitiesKHR*>(&surface_caps) );

        anvil_assert_vk_call_succeeded(result_vk);

        if (n_physical_device == 0)
        {
            m_height = surface_caps.current_extent.height;
            m_width  = surface_caps.current_extent.width;
        }
        else
        {
            anvil_assert(m_height == surface_caps.current_extent.height);
            anvil_assert(m_width  == surface_caps.current_extent.width);
        }
    }

end:
    ;
}
コード例 #17
0
    /** Reads contents of a file under location @param glsl_filename_with_path and treats the retrieved contents as GLSL source code,
     *  which is then used for GLSL->SPIRV conversion process. The result blob is stored at @param spirv_filename_with_path. The function
     *  then reads the blob contents and stores it under m_spirv_blob.
     *
     *  @param glsl_filename_with_path  As per description above. Must not be nullptr.
     *  @param spirv_filename_with_path As per description above. Must not be nullptr.
     *
     *  @return true if successful, false otherwise.
     **/
    bool Anvil::GLSLShaderToSPIRVGenerator::bake_spirv_blob_by_spawning_glslang_process(const std::string& in_glsl_filename_with_path,
                                                                                        const std::string& in_spirv_filename_with_path) const
    {
        auto        callback_arg            = OnGLSLToSPIRVConversionAboutToBeStartedCallbackArgument(this);
        std::string glslangvalidator_params;
        bool        result                  = false;
        size_t      spirv_file_size         = 0;
        char*       spirv_blob_ptr          = nullptr;

        callback(GLSL_SHADER_TO_SPIRV_GENERATOR_CALLBACK_ID_CONVERSION_ABOUT_TO_START,
                &callback_arg);

        #ifdef _WIN32
        {
            /* Launch glslangvalidator and wait until it finishes doing the job */
            PROCESS_INFORMATION process_info;
            STARTUPINFO         startup_info;

            glslangvalidator_params = "dummy -V -o \"" + in_spirv_filename_with_path + "\" \"" + in_glsl_filename_with_path + "\"";

            memset(&process_info,
                   0,
                   sizeof(process_info) );
            memset(&startup_info,
                   0,
                   sizeof(startup_info) );

            startup_info.cb = sizeof(startup_info);

            if (!CreateProcess(".\\glslangValidator.exe",
                               (LPSTR) glslangvalidator_params.c_str(),
                               nullptr, /* lpProcessAttributes */
                               nullptr, /* lpThreadAttributes */
                               FALSE, /* bInheritHandles */
                               CREATE_NO_WINDOW,
                               nullptr, /* lpEnvironment */
                               nullptr, /* lpCurrentDirectory */
                               &startup_info,
                               &process_info) )
            {
                anvil_assert_fail();

                goto end;
            }

            /* Wait till glslangvalidator is done. */
            if (WaitForSingleObject(process_info.hProcess,
                                    INFINITE) != WAIT_OBJECT_0)
            {
                anvil_assert_fail();

                goto end;
            }
        }
        #else
        {
            int32_t status;
            pid_t   child_pid = 0;

            child_pid = fork();

            if (child_pid == 0)
            {
                char* argv[6] = {(char*)"-S", (char*)"-V", (char*)"-o"};

                char  spirv_file_name[SPIRV_FILE_NAME_LEN];
                char  glsl_file_name [SPIRV_FILE_NAME_LEN];

                strcpy(spirv_file_name, in_spirv_filename_with_path.c_str());
                strcpy(glsl_file_name,  in_glsl_filename_with_path.c_str());

                argv[3] = spirv_file_name;
                argv[4] = glsl_file_name;
                argv[5] = (char*)0;

                int32_t flag = execv("./glslangValidator", (char* const*)argv);
                if (flag == -1)
                {
                    anvil_assert_fail();
                    goto end;
                }
            }
            else
            {
                do
                {
                    pid_t wpid = waitpid(child_pid, &status, WUNTRACED | WCONTINUED);
                    if (wpid == -1)
                    {
                        anvil_assert_fail();
                        goto end;
                    }
                } while (!WIFEXITED(status) && !WIFSIGNALED(status));
            }
        }
        #endif

        /* Now, read the SPIR-V file contents */


        Anvil::IO::read_file(in_spirv_filename_with_path.c_str(),
                             false, /* is_text_file */
                            &spirv_blob_ptr,
                            &spirv_file_size);

        if (spirv_blob_ptr == nullptr)
        {
            anvil_assert(spirv_blob_ptr != nullptr);

            goto end;
        }

        if (spirv_file_size <= 0)
        {
            anvil_assert(spirv_file_size > 0);

            goto end;
        }

        /* No need to keep the file any more. */
        Anvil::IO::delete_file(in_spirv_filename_with_path);

        m_spirv_blob.resize(spirv_file_size);

        memcpy(&m_spirv_blob.at(0),
               spirv_blob_ptr,
               spirv_file_size);

        delete [] spirv_blob_ptr;
        spirv_blob_ptr = nullptr;

        result = true;

    end:
        return result;
    }
コード例 #18
0
/* Please see header for specification */
bool Anvil::GLSLShaderToSPIRVGenerator::bake_spirv_blob() const
{
    bool           glsl_filename_is_temporary = false;
    std::string    glsl_filename_with_path;
    bool           result                     = false;

    ANVIL_REDUNDANT_VARIABLE(glsl_filename_is_temporary);

    if (m_glsl_source_code_dirty)
    {
        bake_glsl_source_code();

        anvil_assert(!m_glsl_source_code_dirty);
    }

    if (m_mode == MODE_LOAD_SOURCE_FROM_FILE)
    {
        glsl_filename_is_temporary = false;
        glsl_filename_with_path    = m_data;
    }

    /* Form a temporary file name we will use to write the modified GLSL shader to. */
    #ifndef ANVIL_LINK_WITH_GLSLANG
    {
        switch (m_shader_stage)
        {
            case ShaderStage::COMPUTE:                 glsl_filename_with_path = "temp.comp"; break;
            case ShaderStage::FRAGMENT:                glsl_filename_with_path = "temp.frag"; break;
            case ShaderStage::GEOMETRY:                glsl_filename_with_path = "temp.geom"; break;
            case ShaderStage::TESSELLATION_CONTROL:    glsl_filename_with_path = "temp.tesc"; break;
            case ShaderStage::TESSELLATION_EVALUATION: glsl_filename_with_path = "temp.tese"; break;
            case ShaderStage::VERTEX:                  glsl_filename_with_path = "temp.vert"; break;

            default:
            {
                anvil_assert_fail();

                goto end;
            }
        }

        /* Write down the file to a temporary location */
        Anvil::IO::write_text_file(glsl_filename_with_path,
                                   m_glsl_source_code);

        glsl_filename_is_temporary = true;
    }
    #endif


    #ifdef ANVIL_LINK_WITH_GLSLANG
    {
        /* Shader modules are cached throughout Instance's lifetime in Anvil. It might just happen that
         * the shader we're about to convert to SPIR-V representation has already been converted in the past.
         *
         * Given that the conversion process can be time-consuming, let's try to see if any of the living
         * shader module instances already use exactly the same source code.
         */
        uint32_t n_current_shader_module = 0;
        auto     object_tracker_ptr      = Anvil::ObjectTracker::get();

        do
        {
            auto                       shader_module_raw_ptr = object_tracker_ptr->get_object_at_index     (Anvil::ObjectType::SHADER_MODULE,
                                                                                                            n_current_shader_module);
            const Anvil::ShaderModule* shader_module_ptr     = reinterpret_cast<const Anvil::ShaderModule*>(shader_module_raw_ptr);

            if (shader_module_raw_ptr == nullptr)
            {
                /* Out of shader module instances. */
                break;
            }

            if (shader_module_ptr->get_glsl_source_code() == m_glsl_source_code)
            {
                const auto reference_spirv_blob               = shader_module_ptr->get_spirv_blob();
                const auto reference_spirv_blob_size_in_bytes = reference_spirv_blob.size() * sizeof(reference_spirv_blob.at(0) );

                anvil_assert(reference_spirv_blob_size_in_bytes != 0);

                m_spirv_blob.resize(reference_spirv_blob_size_in_bytes);

                memcpy(&m_spirv_blob.at        (0),
                       &reference_spirv_blob.at(0),
                       reference_spirv_blob_size_in_bytes);

                result = true;
                break;
            }

            /* Move to the next shader module instance */
            ++n_current_shader_module;
        }
        while (n_current_shader_module != 0); /* work around "conditional expression is constant" warnings issued by some compilers */

        if (m_spirv_blob.size() == 0)
        {
            /* Need to bake a brand new SPIR-V blob */
            result = bake_spirv_blob_by_calling_glslang(m_glsl_source_code.c_str() );
        }
    }

    #else
    {
        /* We need to point glslangvalidator at a location where it can stash the SPIR-V blob. */
        result = bake_spirv_blob_by_spawning_glslang_process(glsl_filename_with_path,
                                                             "temp.spv");
    }

end:
    #endif



    return result;
}
コード例 #19
0
/* Please see header for specification */
void Anvil::RenderingSurface::cache_surface_properties()
{
    const Anvil::DeviceType&             device_type                   (m_device_ptr->get_type() );
    bool                                 is_offscreen_rendering_enabled(true);
    auto                                 khr_surface_entrypoints       (m_create_info_ptr->get_instance_ptr()->get_extension_khr_surface_entrypoints() );
    const Anvil::MGPUDevice*             mgpu_device_ptr               (dynamic_cast<const Anvil::MGPUDevice*>(m_device_ptr));
    uint32_t                             n_physical_devices            (0);
    const Anvil::SGPUDevice*             sgpu_device_ptr               (dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr));
    std::vector<Anvil::SurfaceFormatKHR> supported_formats;
    auto                                 window_ptr                    (m_create_info_ptr->get_window_ptr() );

    if (window_ptr != nullptr)
    {
        const WindowPlatform window_platform(window_ptr->get_platform() );

        is_offscreen_rendering_enabled = (window_platform == WINDOW_PLATFORM_DUMMY                     ||
                                          window_platform == WINDOW_PLATFORM_DUMMY_WITH_PNG_SNAPSHOTS);

        if (is_offscreen_rendering_enabled)
        {
            m_height = window_ptr->get_height_at_creation_time();
            m_width  = window_ptr->get_width_at_creation_time ();
        }
        else
        {
            /* In this case, width & height may change at run-time */
        }
    }
    else
    {
        /* In this case, width & height may change at run-time */
    }

    switch (device_type)
    {
        case Anvil::DeviceType::MULTI_GPU:  n_physical_devices = mgpu_device_ptr->get_n_physical_devices(); break;
        case Anvil::DeviceType::SINGLE_GPU: n_physical_devices = 1;                                         break;

        default:
        {
            anvil_assert_fail();
        }
    }

    /* Retrieve general properties */
    uint32_t n_supported_formats           (0);
    uint32_t n_supported_presentation_modes(0);
    VkResult result                        (VK_ERROR_INITIALIZATION_FAILED);

    ANVIL_REDUNDANT_VARIABLE(result);

    for (uint32_t n_physical_device = 0;
                  n_physical_device < n_physical_devices;
                ++n_physical_device)
    {
        const Anvil::PhysicalDevice* physical_device_ptr = nullptr;

        switch (device_type)
        {
            case Anvil::DeviceType::MULTI_GPU:  physical_device_ptr = mgpu_device_ptr->get_physical_device(n_physical_device); break;
            case Anvil::DeviceType::SINGLE_GPU: physical_device_ptr = sgpu_device_ptr->get_physical_device();                  break;

            default:
            {
                anvil_assert_fail();
            }
        }

        auto& result_caps = m_physical_device_capabilities[physical_device_ptr->get_device_group_device_index()];

        if (m_surface == VK_NULL_HANDLE)
        {
            result_caps.supported_composite_alpha_flags = Anvil::CompositeAlphaFlagBits::INHERIT_BIT_KHR;
            result_caps.supported_transformations       = Anvil::SurfaceTransformFlagBits::INHERIT_BIT_KHR;
            result_caps.supported_usages                = static_cast<Anvil::ImageUsageFlags> (Anvil::ImageUsageFlagBits::COLOR_ATTACHMENT_BIT |
                                                                                               Anvil::ImageUsageFlagBits::TRANSFER_SRC_BIT     |
                                                                                               Anvil::ImageUsageFlagBits::TRANSFER_DST_BIT     |
                                                                                               Anvil::ImageUsageFlagBits::STORAGE_BIT);

            result_caps.supported_presentation_modes.push_back(Anvil::PresentModeKHR::IMMEDIATE_KHR);

            continue;
        }

        const VkPhysicalDevice physical_device_vk = physical_device_ptr->get_physical_device();

        result = khr_surface_entrypoints.vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physical_device_vk,
                                                                                   m_surface,
                                                                                   reinterpret_cast<VkSurfaceCapabilitiesKHR*>(&result_caps.capabilities) );

        anvil_assert_vk_call_succeeded(result);

        if (n_physical_device == 0)
        {
            m_height = result_caps.capabilities.current_extent.height;
            m_width  = result_caps.capabilities.current_extent.width;
        }
        else
        {
            anvil_assert(m_height == result_caps.capabilities.current_extent.height);
            anvil_assert(m_width  == result_caps.capabilities.current_extent.width);
        }

        result_caps.supported_composite_alpha_flags = result_caps.capabilities.supported_composite_alpha;
        result_caps.supported_transformations       = result_caps.capabilities.supported_transforms;
        result_caps.supported_usages                = result_caps.capabilities.supported_usage_flags;

        /* Retrieve a list of formats supported by the surface */
        result = khr_surface_entrypoints.vkGetPhysicalDeviceSurfaceFormatsKHR(physical_device_vk,
                                                                              m_surface,
                                                                             &n_supported_formats,
                                                                              nullptr /* pSurfaceFormats */);

        anvil_assert                  (n_supported_formats >  0);
        anvil_assert_vk_call_succeeded(result);

        supported_formats.resize(n_supported_formats);

        result = khr_surface_entrypoints.vkGetPhysicalDeviceSurfaceFormatsKHR(physical_device_vk,
                                                                              m_surface,
                                                                             &n_supported_formats,
                                                                              reinterpret_cast<VkSurfaceFormatKHR*>(&supported_formats.at(0) ));
        anvil_assert_vk_call_succeeded(result);

        for (unsigned int n_format = 0;
                          n_format < n_supported_formats;
                        ++n_format)
        {
            result_caps.supported_formats.push_back(RenderingSurfaceFormat(supported_formats[n_format]) );
        }

        /* Retrieve a list of supported presentation modes
         *
         * NOTE: In case of mGPU devices, n_supported_presentation_modes may actually be 0 here for slave devices.
         */
        result = khr_surface_entrypoints.vkGetPhysicalDeviceSurfacePresentModesKHR(physical_device_vk,
                                                                                   m_surface,
                                                                                  &n_supported_presentation_modes,
                                                                                   nullptr /* pPresentModes */);

        anvil_assert_vk_call_succeeded(result);

        if (n_supported_presentation_modes > 0)
        {
            std::vector<VkPresentModeKHR> temp_storage(n_supported_presentation_modes);

            result_caps.supported_presentation_modes.resize(n_supported_presentation_modes);

            result = khr_surface_entrypoints.vkGetPhysicalDeviceSurfacePresentModesKHR(physical_device_vk,
                                                                                       m_surface,
                                                                                      &n_supported_presentation_modes,
                                                                                      &temp_storage.at(0) );
            anvil_assert_vk_call_succeeded(result);

            for (uint32_t n_presentation_mode = 0;
                          n_presentation_mode < static_cast<uint32_t>(temp_storage.size() );
                        ++n_presentation_mode)
            {
                result_caps.supported_presentation_modes.at(n_presentation_mode) = static_cast<Anvil::PresentModeKHR>(temp_storage.at(n_presentation_mode) );
            }
        }
    }
}
コード例 #20
0
/* Please see header for specification */
Anvil::ExternalHandleUniquePtr Anvil::Semaphore::export_to_external_handle(const Anvil::ExternalSemaphoreHandleTypeFlagBits& in_semaphore_handle_type)
{
    #if defined(_WIN32)
        const auto invalid_handle                 = nullptr;
        const bool is_autorelease_handle          = Anvil::Utils::is_nt_handle(in_semaphore_handle_type);
        const bool only_one_handle_ever_permitted = Anvil::Utils::is_nt_handle(in_semaphore_handle_type);
    #else
        const int  invalid_handle                 = -1;
        const bool is_autorelease_handle          = true;
        const bool only_one_handle_ever_permitted = false;
    #endif

    ExternalHandleType             result_handle = invalid_handle;
    Anvil::ExternalHandleUniquePtr result_ptr;

    /* Sanity checks */
    #if defined(_WIN32)
    {
        if (!m_create_info_ptr->get_device()->get_extension_info()->khr_external_semaphore_win32() )
        {
            anvil_assert(m_create_info_ptr->get_device()->get_extension_info()->khr_external_semaphore_win32() );

            goto end;
        }
    }
    #else
    {
        if (!m_create_info_ptr->get_device()->get_extension_info()->khr_external_semaphore_fd() )
        {
            anvil_assert(m_create_info_ptr->get_device()->get_extension_info()->khr_external_semaphore_fd() );

            goto end;
        }
    }
    #endif

    if (only_one_handle_ever_permitted                                                                                                    &&
        m_external_semaphore_created_for_handle_type.find(in_semaphore_handle_type) != m_external_semaphore_created_for_handle_type.end() )
    {
        anvil_assert_fail();

        goto end;
    }

    /* Go and try to open a new handle. */
    {
        #if defined(_WIN32)
            const auto                       entrypoints_ptr = &m_create_info_ptr->get_device()->get_extension_khr_external_semaphore_win32_entrypoints();
            VkSemaphoreGetWin32HandleInfoKHR info;
        #else
            const auto              entrypoints_ptr = &m_create_info_ptr->get_device()->get_extension_khr_external_semaphore_fd_entrypoints();
            VkSemaphoreGetFdInfoKHR info;
        #endif


        anvil_assert(m_semaphore != VK_NULL_HANDLE);

        info.handleType = static_cast<VkExternalSemaphoreHandleTypeFlagBits>(in_semaphore_handle_type);
        info.pNext      = nullptr;
        info.semaphore  = m_semaphore;

        #if defined(_WIN32)
        {
            info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR;
        }
        #else
        {
            info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
        }
        #endif

        #if defined(_WIN32)
            if (entrypoints_ptr->vkGetSemaphoreWin32HandleKHR(m_create_info_ptr->get_device()->get_device_vk(),
                                                             &info,
                                                             &result_handle) != VK_SUCCESS)
        #else
            if (entrypoints_ptr->vkGetSemaphoreFdKHR(m_create_info_ptr->get_device()->get_device_vk(),
                                                    &info,
                                                    &result_handle) != VK_SUCCESS)
        #endif
        {
            anvil_assert_fail();

            goto end;
        }

        if (result_handle == invalid_handle)
        {
            anvil_assert(result_handle != invalid_handle);

            goto end;
        }
    }

    /* Cache the newly created handle if it's a NT handle  */
    if (only_one_handle_ever_permitted)
    {
        m_external_semaphore_created_for_handle_type[in_semaphore_handle_type] = true;
    }

    result_ptr = Anvil::ExternalHandle::create(result_handle,
                                               is_autorelease_handle); /* in_close_at_destruction_time */

end:
    return result_ptr;
}
コード例 #21
0
ファイル: fence.cpp プロジェクト: mp3butcher/Anvil
/* Please see header for specification */
Anvil::ExternalHandleUniquePtr Anvil::Fence::export_to_external_handle(const Anvil::ExternalFenceHandleTypeBit& in_fence_handle_type)
{
    #if defined(_WIN32)
        const auto invalid_handle                 = nullptr;
        const bool is_autorelease_handle          = Anvil::Utils::is_nt_handle(in_fence_handle_type);
        const bool only_one_handle_ever_permitted = Anvil::Utils::is_nt_handle(in_fence_handle_type);
    #else
        const int  invalid_handle                 = -1;
        const bool is_autorelease_handle          = true;
        const bool only_one_handle_ever_permitted = false;
    #endif

    ExternalHandleType             result_handle = 0;
    Anvil::ExternalHandleUniquePtr result_ptr;

    /* Sanity checks */
    #if defined(_WIN32)
    {
        if (!m_create_info_ptr->get_device()->get_extension_info()->khr_external_fence_win32() )
        {
            anvil_assert(m_create_info_ptr->get_device()->get_extension_info()->khr_external_fence_win32() );

            goto end;
        }
    }
    #else
    {
        if (!m_create_info_ptr->get_device()->get_extension_info()->khr_external_fence_fd() )
        {
            anvil_assert(m_create_info_ptr->get_device()->get_extension_info()->khr_external_fence_fd() );

            goto end;
        }
    }
    #endif

    if (only_one_handle_ever_permitted                                                                                        &&
        m_external_fence_created_for_handle_type.find(in_fence_handle_type) != m_external_fence_created_for_handle_type.end() )
    {
        anvil_assert_fail();

        goto end;
    }

    /* Go and try to open a new handle. */
    #if defined(_WIN32)
    {
        const auto                   entrypoints_ptr = &m_create_info_ptr->get_device()->get_extension_khr_external_fence_win32_entrypoints();
        VkFenceGetWin32HandleInfoKHR info;

        anvil_assert(m_fence != VK_NULL_HANDLE);

        info.fence      = m_fence;
        info.handleType = static_cast<VkExternalFenceHandleTypeFlagBits>(Anvil::Utils::convert_external_fence_handle_type_bits_to_vk_external_fence_handle_type_flags(in_fence_handle_type) );
        info.pNext      = nullptr;
        info.sType      = VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR;

        if (entrypoints_ptr->vkGetFenceWin32HandleKHR(m_create_info_ptr->get_device()->get_device_vk(),
                                                      &info,
                                                      &result_handle) != VK_SUCCESS)
        {
            anvil_assert_fail();

            goto end;
        }
    }
    #else
    {
        const auto          entrypoints_ptr = &m_create_info_ptr->get_device()->get_extension_khr_external_fence_fd_entrypoints();
        VkFenceGetFdInfoKHR info;

        anvil_assert(m_fence != VK_NULL_HANDLE);

        info.fence      = m_fence;
        info.handleType = static_cast<VkExternalFenceHandleTypeFlagBits>(Anvil::Utils::convert_external_fence_handle_type_bits_to_vk_external_fence_handle_type_flags(in_fence_handle_type) );
        info.pNext      = nullptr;
        info.sType      = VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR;

        if (entrypoints_ptr->vkGetFenceFdKHR(m_create_info_ptr->get_device()->get_device_vk(),
                                            &info,
                                            &result_handle) != VK_SUCCESS)
        {
            anvil_assert_fail();

            goto end;
        }
    }
    #endif

    if (result_handle == invalid_handle)
    {
        anvil_assert(result_handle != invalid_handle);

        goto end;
    }

    /* If this is necessary, cache the newly created handle so that we do not let the app attempt to re-create the external handle
     * for the same Vulkan fence handle.
     */
    if (only_one_handle_ever_permitted)
    {
        m_external_fence_created_for_handle_type[in_fence_handle_type] = true;
    }

    result_ptr = Anvil::ExternalHandle::create(result_handle,
                                               is_autorelease_handle); /* in_close_at_destruction_time */

end:
    return result_ptr;
}
コード例 #22
0
ファイル: queue.cpp プロジェクト: mp3butcher/Anvil
/** Please see header for specification */
void Anvil::Queue::submit(const Anvil::SubmitInfo& in_submit_info)
{
    Anvil::Fence*                      fence_ptr        (in_submit_info.get_fence() );
    bool                               needs_fence_reset(false);
    VkResult                           result           (VK_ERROR_INITIALIZATION_FAILED);
    Anvil::StructChainer<VkSubmitInfo> struct_chainer;

    std::vector<VkCommandBuffer> cmd_buffers_vk      (in_submit_info.get_n_command_buffers  () );
    std::vector<VkSemaphore>     signal_semaphores_vk(in_submit_info.get_n_signal_semaphores() );
    std::vector<VkSemaphore>     wait_semaphores_vk  (in_submit_info.get_n_wait_semaphores  () );

    ANVIL_REDUNDANT_VARIABLE(result);

    /* Prepare for the submission */
    switch (in_submit_info.get_type() )
    {
        case SubmissionType::SGPU:
        {
            VkSubmitInfo submit_info;

            for (uint32_t n_command_buffer = 0;
                          n_command_buffer < in_submit_info.get_n_command_buffers();
                        ++n_command_buffer)
            {
                cmd_buffers_vk.at(n_command_buffer) = in_submit_info.get_command_buffers_sgpu()[n_command_buffer]->get_command_buffer();
            }

            for (uint32_t n_signal_semaphore = 0;
                          n_signal_semaphore < in_submit_info.get_n_signal_semaphores();
                        ++n_signal_semaphore)
            {
                auto sem_ptr = in_submit_info.get_signal_semaphores_sgpu()[n_signal_semaphore];

                signal_semaphores_vk.at(n_signal_semaphore) = sem_ptr->get_semaphore();
            }

            for (uint32_t n_wait_semaphore = 0;
                          n_wait_semaphore < in_submit_info.get_n_wait_semaphores();
                        ++n_wait_semaphore)
            {
                wait_semaphores_vk.at(n_wait_semaphore) = in_submit_info.get_wait_semaphores_sgpu()[n_wait_semaphore]->get_semaphore();
            }

            submit_info.commandBufferCount   = in_submit_info.get_n_command_buffers ();
            submit_info.pCommandBuffers      = (in_submit_info.get_n_command_buffers()   != 0) ? &cmd_buffers_vk.at(0)       : nullptr;
            submit_info.pNext                = nullptr;
            submit_info.pSignalSemaphores    = (in_submit_info.get_n_signal_semaphores() != 0) ? &signal_semaphores_vk.at(0) : nullptr;
            submit_info.pWaitDstStageMask    = in_submit_info.get_destination_stage_wait_masks();
            submit_info.pWaitSemaphores      = (in_submit_info.get_n_wait_semaphores()   != 0) ? &wait_semaphores_vk.at(0)   : nullptr;
            submit_info.signalSemaphoreCount = in_submit_info.get_n_signal_semaphores();
            submit_info.sType                = VK_STRUCTURE_TYPE_SUBMIT_INFO;
            submit_info.waitSemaphoreCount   = in_submit_info.get_n_wait_semaphores();

            struct_chainer.append_struct(submit_info);

            break;
        }

        default:
        {
            anvil_assert_fail();
        }
    }

    /* Any additional structs to chain? */
    #if defined(_WIN32)
    {
        const uint64_t* d3d12_fence_signal_semaphore_values_ptr = nullptr;
        const uint64_t* d3d12_fence_wait_semaphore_values_ptr   = nullptr;

        if (in_submit_info.get_d3d12_fence_semaphore_values(&d3d12_fence_signal_semaphore_values_ptr,
                                                            &d3d12_fence_wait_semaphore_values_ptr) )
        {
            VkD3D12FenceSubmitInfoKHR fence_info;

            fence_info.pNext                      = nullptr;
            fence_info.pSignalSemaphoreValues     = d3d12_fence_signal_semaphore_values_ptr;
            fence_info.pWaitSemaphoreValues       = d3d12_fence_wait_semaphore_values_ptr;
            fence_info.signalSemaphoreValuesCount = in_submit_info.get_n_signal_semaphores();
            fence_info.sType                      = VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR;
            fence_info.waitSemaphoreValuesCount   = in_submit_info.get_n_wait_semaphores();

            struct_chainer.append_struct(fence_info);
        }
    }
    #endif

    /* Go for it */
    if (fence_ptr                         == nullptr &&
        in_submit_info.get_should_block() )
    {
        fence_ptr         = m_submit_fence_ptr.get();
        needs_fence_reset = true;
    }

    switch (in_submit_info.get_type() )
    {
        case SubmissionType::SGPU:
        {
             submit_command_buffers_lock_unlock(in_submit_info.get_n_command_buffers     (),
                                                in_submit_info.get_command_buffers_sgpu  (),
                                                in_submit_info.get_n_signal_semaphores   (),
                                                in_submit_info.get_signal_semaphores_sgpu(),
                                                in_submit_info.get_n_wait_semaphores     (),
                                                in_submit_info.get_wait_semaphores_sgpu  (),
                                                fence_ptr,
                                                true); /* in_should_lock */

             break;
        }

        default:
        {
            anvil_assert_fail();
        }
     }

     {
        auto chain_ptr = struct_chainer.create_chain();

        if (needs_fence_reset)
        {
            m_submit_fence_ptr->reset();
        }

         result = vkQueueSubmit(m_queue,
                                1, /* submitCount */
                                chain_ptr->get_root_struct(),
                               (fence_ptr != nullptr) ? fence_ptr->get_fence() 
                                                      : VK_NULL_HANDLE);

        if (in_submit_info.get_should_block() )
        {
            /* Wait till initialization finishes GPU-side */
            result = vkWaitForFences(m_device_ptr->get_device_vk(),
                                     1, /* fenceCount */
                                     fence_ptr->get_fence_ptr(),
                                     VK_TRUE,     /* waitAll */
                                     UINT64_MAX); /* timeout */

            anvil_assert_vk_call_succeeded(result);
        }
     }

     switch (in_submit_info.get_type() )
     {
         case SubmissionType::SGPU:
         {
             submit_command_buffers_lock_unlock(in_submit_info.get_n_command_buffers     (),
                                                in_submit_info.get_command_buffers_sgpu  (),
                                                in_submit_info.get_n_signal_semaphores   (),
                                                in_submit_info.get_signal_semaphores_sgpu(),
                                                in_submit_info.get_n_wait_semaphores     (),
                                                in_submit_info.get_wait_semaphores_sgpu  (),
                                                fence_ptr,
                                                false); /* in_should_lock */

             break;
         }

         default:
         {
             anvil_assert_fail();
         }
     }

     anvil_assert_vk_call_succeeded(result);
}