/* Please see header for specification */
bool Anvil::BasePipelineManager::add_pipeline(Anvil::BasePipelineCreateInfoUniquePtr in_pipeline_create_info_ptr,
                                              PipelineID*                            out_pipeline_id_ptr)
{
    const Anvil::PipelineID                base_pipeline_id = in_pipeline_create_info_ptr->get_base_pipeline_id();
    auto                                   callback_arg     = Anvil::OnNewPipelineCreatedCallbackData(UINT32_MAX);
    std::unique_lock<std::recursive_mutex> mutex_lock;
    auto                                   mutex_ptr        = get_mutex();
    PipelineID                             new_pipeline_id  = 0;
    std::unique_ptr<Pipeline>              new_pipeline_ptr;
    bool                                   result           = false;

    if (mutex_ptr != nullptr)
    {
        mutex_lock = std::move(
            std::unique_lock<std::recursive_mutex>(*mutex_ptr)
        );
    }

    if (base_pipeline_id != UINT32_MAX)
    {
        Anvil::BasePipelineCreateInfo* base_pipeline_create_info_ptr = nullptr;
        auto                           base_pipeline_iterator        = m_baked_pipelines.find(base_pipeline_id);

        if (base_pipeline_iterator == m_baked_pipelines.end() )
        {
            base_pipeline_iterator = m_outstanding_pipelines.find(base_pipeline_id);

            if (base_pipeline_iterator != m_outstanding_pipelines.end() )
            {
                base_pipeline_create_info_ptr = base_pipeline_iterator->second->pipeline_create_info_ptr.get();
            }
        }
        else
        {
            base_pipeline_create_info_ptr = base_pipeline_iterator->second->pipeline_create_info_ptr.get();
        }

        if (base_pipeline_create_info_ptr != nullptr)
        {
            anvil_assert(base_pipeline_create_info_ptr->allows_derivatives() );
        }
        else
        {
            /* Base pipeline ID is invalid */
            anvil_assert(base_pipeline_create_info_ptr != nullptr);

            goto end;
        }
    }

    /* Create & store the new descriptor */
    new_pipeline_id = (m_pipeline_counter.fetch_add(1) );

    /* NOTE: in_pipeline_create_info_ptr becomes NULL after the call below */
    new_pipeline_ptr.reset(
        new Pipeline(
            m_device_ptr,
            std::move(in_pipeline_create_info_ptr),
            is_mt_safe() )
    );

    if (new_pipeline_ptr->pipeline_create_info_ptr->is_proxy() )
    {
        m_baked_pipelines[new_pipeline_id] = std::move(new_pipeline_ptr);
    }
    else
    {
        m_outstanding_pipelines[new_pipeline_id] = std::move(new_pipeline_ptr);
    }

    *out_pipeline_id_ptr = new_pipeline_id;

    /* Inform subscribers about the new pipeline. */
    callback_arg.new_pipeline_id = new_pipeline_id;

    callback(BASE_PIPELINE_MANAGER_CALLBACK_ID_ON_NEW_PIPELINE_CREATED,
            &callback_arg);

    /* All done */
    result = true;
end:
    return result;
}
예제 #2
0
/** Initializes the swapchain object. */
bool Anvil::Swapchain::init()
{
    uint32_t                                              n_swapchain_images             = 0;
    auto                                                  parent_surface_ptr             = m_create_info_ptr->get_rendering_surface();
    VkResult                                              result                         = VK_ERROR_INITIALIZATION_FAILED;
    Anvil::StructChainUniquePtr<VkSwapchainCreateInfoKHR> struct_chain_ptr;
    std::vector<VkImage>                                  swapchain_images;
    const VkSurfaceTransformFlagBitsKHR                   swapchain_transformation       = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
    const WindowPlatform                                  window_platform                = m_create_info_ptr->get_window()->get_platform();
    const bool                                            is_offscreen_rendering_enabled = (window_platform   == WINDOW_PLATFORM_DUMMY                     ||
                                                                                            window_platform   == WINDOW_PLATFORM_DUMMY_WITH_PNG_SNAPSHOTS);

    m_size.width  = parent_surface_ptr->get_width ();
    m_size.height = parent_surface_ptr->get_height();

    /* not doing offscreen rendering */
    if (!is_offscreen_rendering_enabled)
    {
        const auto&                                    khr_swapchain_entrypoints = m_device_ptr->get_extension_khr_swapchain_entrypoints();
        Anvil::StructChainer<VkSwapchainCreateInfoKHR> struct_chainer;

        #ifdef _DEBUG
        {
            const Anvil::SGPUDevice* sgpu_device_ptr(dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr) );

            const Anvil::DeviceType    device_type                     = m_device_ptr->get_type();
            uint32_t                   n_physical_devices              = 0;
            bool                       result_bool                     = false;
            const char*                required_surface_extension_name = nullptr;
            VkSurfaceCapabilitiesKHR   surface_caps;
            VkCompositeAlphaFlagsKHR   supported_composite_alpha_flags = static_cast<VkCompositeAlphaFlagsKHR>(0);
            VkSurfaceTransformFlagsKHR supported_surface_transform_flags;

            #ifdef _WIN32
                #if defined(ANVIL_INCLUDE_WIN3264_WINDOW_SYSTEM_SUPPORT)
                    required_surface_extension_name = VK_KHR_WIN32_SURFACE_EXTENSION_NAME;
                #endif
            #else
                #if defined(ANVIL_INCLUDE_XCB_WINDOW_SYSTEM_SUPPORT)
                    required_surface_extension_name = VK_KHR_XCB_SURFACE_EXTENSION_NAME;
                #endif
            #endif

            anvil_assert(required_surface_extension_name == nullptr                                                            ||
                         m_device_ptr->get_parent_instance()->is_instance_extension_supported(required_surface_extension_name) );

            switch (device_type)
            {
                case Anvil::DEVICE_TYPE_SINGLE_GPU: n_physical_devices = 1; break;

                default:
                {
                    anvil_assert_fail();
                }
            }

            for (uint32_t n_physical_device = 0;
                          n_physical_device < n_physical_devices;
                        ++n_physical_device)
            {
                const Anvil::PhysicalDevice* current_physical_device_ptr = nullptr;

                switch (device_type)
                {
                    case Anvil::DEVICE_TYPE_SINGLE_GPU: current_physical_device_ptr = sgpu_device_ptr->get_physical_device(); break;

                    default:
                    {
                        anvil_assert_fail();
                    }
                }

                /* Ensure opaque composite alpha mode is supported */
                anvil_assert(parent_surface_ptr->get_supported_composite_alpha_flags(&supported_composite_alpha_flags) );

                anvil_assert(supported_composite_alpha_flags & VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR);

                /* Ensure we can use the swapchain image format  */
                anvil_assert(parent_surface_ptr->is_compatible_with_image_format(m_create_info_ptr->get_format(),
                                                                                &result_bool) );
                anvil_assert(result_bool);

                /* Ensure the transformation we're about to request is supported by the rendering surface */
                anvil_assert(parent_surface_ptr->get_supported_transformations(&supported_surface_transform_flags) );

                anvil_assert(supported_surface_transform_flags & swapchain_transformation);

                /* Ensure the requested number of swapchain images is reasonable*/
                anvil_assert(parent_surface_ptr->get_capabilities(&surface_caps) );

                anvil_assert(surface_caps.maxImageCount == 0                                 ||
                             surface_caps.maxImageCount >= m_create_info_ptr->get_n_images() );
            }
        }
        #endif

        {
            VkSwapchainCreateInfoKHR create_info;

            create_info.clipped               = true; /* we won't be reading from the presentable images */
            create_info.compositeAlpha        = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
            create_info.flags                 = m_create_info_ptr->get_flags();
            create_info.imageArrayLayers      = 1;
            create_info.imageColorSpace       = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
            create_info.imageExtent.height    = parent_surface_ptr->get_height();
            create_info.imageExtent.width     = parent_surface_ptr->get_width ();
            create_info.imageFormat           = m_create_info_ptr->get_format ();
            create_info.imageSharingMode      = VK_SHARING_MODE_EXCLUSIVE;
            create_info.imageUsage            = m_create_info_ptr->get_usage_flags();
            create_info.minImageCount         = m_create_info_ptr->get_n_images   ();
            create_info.oldSwapchain          = VK_NULL_HANDLE;
            create_info.pNext                 = nullptr;
            create_info.pQueueFamilyIndices   = nullptr;
            create_info.presentMode           = m_create_info_ptr->get_present_mode();
            create_info.preTransform          = swapchain_transformation;
            create_info.queueFamilyIndexCount = 0;
            create_info.sType                 = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
            create_info.surface               = parent_surface_ptr->get_surface();

            struct_chainer.append_struct(create_info);
        }

        struct_chain_ptr = struct_chainer.create_chain();

        parent_surface_ptr->lock();
        {
            result = khr_swapchain_entrypoints.vkCreateSwapchainKHR(m_device_ptr->get_device_vk(),
                                                                    struct_chain_ptr->get_root_struct(),
                                                                    nullptr, /* pAllocator */
                                                                   &m_swapchain);
        }
        parent_surface_ptr->unlock();

        anvil_assert_vk_call_succeeded(result);
        if (is_vk_call_successful(result) )
        {
            set_vk_handle(m_swapchain);
        }

        /* Retrieve swap-chain images */
        result = khr_swapchain_entrypoints.vkGetSwapchainImagesKHR(m_device_ptr->get_device_vk(),
                                                                   m_swapchain,
                                                                  &n_swapchain_images,
                                                                   nullptr); /* pSwapchainImages */

        anvil_assert_vk_call_succeeded(result);
        anvil_assert                  (n_swapchain_images >  0);

        swapchain_images.resize(n_swapchain_images);

        result = khr_swapchain_entrypoints.vkGetSwapchainImagesKHR(m_device_ptr->get_device_vk(),
                                                                   m_swapchain,
                                                                  &n_swapchain_images,
                                                                  &swapchain_images[0]);

        anvil_assert_vk_call_succeeded(result);
    }
    else /* offscreen rendering */
    {
        m_create_info_ptr->set_usage_flags(m_create_info_ptr->get_usage_flags() | VK_IMAGE_USAGE_TRANSFER_SRC_BIT);

        n_swapchain_images = m_create_info_ptr->get_n_images();
    }

    for (uint32_t n_result_image = 0;
                  n_result_image < n_swapchain_images;
                ++n_result_image)
    {
        /* Spawn an Image wrapper class for the swap-chain image. */
        if (!is_offscreen_rendering_enabled)
        {
            auto create_info_ptr = Anvil::ImageCreateInfo::create_swapchain_wrapper(m_device_ptr,
                                                                                    this,
                                                                                    swapchain_images[n_result_image],
                                                                                    n_result_image);

            create_info_ptr->set_mt_safety(Anvil::Utils::convert_boolean_to_mt_safety_enum(is_mt_safe() ) );

            m_image_ptrs[n_result_image] = Anvil::Image::create(std::move(create_info_ptr) );
        }
        else
        {
            auto create_info_ptr = Anvil::ImageCreateInfo::create_nonsparse_alloc(m_device_ptr,
                                                                                  VK_IMAGE_TYPE_2D,
                                                                                  m_create_info_ptr->get_format(),
                                                                                  VK_IMAGE_TILING_OPTIMAL,
                                                                                  m_create_info_ptr->get_usage_flags(),
                                                                                  m_size.width,
                                                                                  m_size.height,
                                                                                  1, /* base_mipmap_depth */
                                                                                  1,
                                                                                  VK_SAMPLE_COUNT_1_BIT,
                                                                                  QUEUE_FAMILY_GRAPHICS_BIT,
                                                                                  VK_SHARING_MODE_EXCLUSIVE,
                                                                                  false, /* in_use_full_mipmap_chain */
                                                                                  0,     /* in_memory_features       */
                                                                                  0,     /* in_create_flags          */
                                                                                  VK_IMAGE_LAYOUT_GENERAL,
                                                                                  nullptr);

            create_info_ptr->set_mt_safety(Anvil::Utils::convert_boolean_to_mt_safety_enum(is_mt_safe() ) );

            m_image_ptrs[n_result_image] = Anvil::Image::create(std::move(create_info_ptr) );
        }

        /* For each swap-chain image, create a relevant view */
        {
            auto create_info_ptr = Anvil::ImageViewCreateInfo::create_2D(m_device_ptr,
                                                                         m_image_ptrs[n_result_image].get(),
                                                                         0, /* n_base_layer */
                                                                         0, /* n_base_mipmap_level */
                                                                         1, /* n_mipmaps           */
                                                                         VK_IMAGE_ASPECT_COLOR_BIT,
                                                                         m_create_info_ptr->get_format(),
                                                                         VK_COMPONENT_SWIZZLE_R,
                                                                         VK_COMPONENT_SWIZZLE_G,
                                                                         VK_COMPONENT_SWIZZLE_B,
                                                                         VK_COMPONENT_SWIZZLE_A);

            create_info_ptr->set_mt_safety(Anvil::Utils::convert_boolean_to_mt_safety_enum(is_mt_safe() ) );

            m_image_view_ptrs[n_result_image] = Anvil::ImageView::create(std::move(create_info_ptr) );
        }

        result = VK_SUCCESS;
    }

    /* Sign up for present submission notifications. This is needed to ensure that number of presented frames ==
     * number of acquired frames at destruction time.
     */
    {
        std::vector<Anvil::Queue*> queues;

        switch (m_device_ptr->get_type() )
        {
            case Anvil::DEVICE_TYPE_SINGLE_GPU:
            {
                const std::vector<uint32_t>* queue_fams_with_present_support_ptr(nullptr);
                const auto                   rendering_surface_ptr              (m_create_info_ptr->get_rendering_surface() );
                const Anvil::SGPUDevice*     sgpu_device_ptr                    (dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr) );

                if (!rendering_surface_ptr->get_queue_families_with_present_support(&queue_fams_with_present_support_ptr) )
                {
                    break;
                }

                if (queue_fams_with_present_support_ptr == nullptr)
                {
                    anvil_assert(queue_fams_with_present_support_ptr != nullptr);
                }
                else
                {
                    for (const auto queue_fam : *queue_fams_with_present_support_ptr)
                    {
                        const uint32_t n_queues = sgpu_device_ptr->get_n_queues(queue_fam);

                        for (uint32_t n_queue = 0;
                                      n_queue < n_queues;
                                    ++n_queue)
                        {
                            auto queue_ptr = sgpu_device_ptr->get_queue_for_queue_family_index(queue_fam,
                                                                                               n_queue);

                            anvil_assert(queue_ptr != nullptr);

                            if (std::find(queues.begin(),
                                          queues.end(),
                                          queue_ptr) == queues.end() )
                            {
                                queues.push_back(queue_ptr);
                            }
                        }
                    }
                }

                break;
            }
        }

        for (auto queue_ptr : queues)
        {
            queue_ptr->register_for_callbacks(
                QUEUE_CALLBACK_ID_PRESENT_REQUEST_ISSUED,
                std::bind(&Swapchain::on_present_request_issued,
                          this,
                          std::placeholders::_1),
                this
            );

            m_observed_queues.push_back(queue_ptr);
        }
    }

    /* Sign up for "about to close the parent window" notifications. Swapchain instance SHOULD be deinitialized
     * before the window is destroyed, so we're going to act as nice citizens.
     */
    m_create_info_ptr->get_window()->register_for_callbacks(
        WINDOW_CALLBACK_ID_ABOUT_TO_CLOSE,
        std::bind(&Swapchain::on_parent_window_about_to_close,
                  this),
        this
    );

    return is_vk_call_successful(result);
}
/* Please see header for specification */
bool Anvil::PipelineLayoutManager::get_layout(const std::vector<DescriptorSetCreateInfoUniquePtr>* in_ds_create_info_items_ptr,
                                              const PushConstantRanges&                            in_push_constant_ranges,
                                              Anvil::PipelineLayoutUniquePtr*                      out_pipeline_layout_ptr_ptr)
{
    std::unique_lock<std::recursive_mutex> mutex_lock;
    auto                                   mutex_ptr                   = get_mutex();
    const uint32_t                         n_descriptor_sets_in_in_dsg = static_cast<uint32_t>(in_ds_create_info_items_ptr->size() );
    bool                                   result                      = false;
    Anvil::PipelineLayout*                 result_pipeline_layout_ptr  = nullptr;

    if (mutex_ptr != nullptr)
    {
        mutex_lock = std::move(
            std::unique_lock<std::recursive_mutex>(*mutex_ptr)
        );
    }

    for (auto layout_iterator  = m_pipeline_layouts.begin();
              layout_iterator != m_pipeline_layouts.end();
            ++layout_iterator)
    {
        auto&      current_pipeline_layout_container_ptr     = *layout_iterator;
        auto&      current_pipeline_layout_ptr               = current_pipeline_layout_container_ptr->pipeline_layout_ptr;
        auto       current_pipeline_ds_create_info_ptrs      = current_pipeline_layout_ptr->get_ds_create_info_ptrs();
        bool       dss_match                                 = true;
        const auto n_descriptor_sets_in_current_pipeline_dsg = static_cast<uint32_t>(current_pipeline_ds_create_info_ptrs->size() );

        if (n_descriptor_sets_in_current_pipeline_dsg != n_descriptor_sets_in_in_dsg)
        {
            continue;
        }

        if (current_pipeline_layout_ptr->get_attached_push_constant_ranges() != in_push_constant_ranges)
        {
            continue;
        }

        for (uint32_t n_ds = 0;
                      n_ds < n_descriptor_sets_in_in_dsg && dss_match;
                    ++n_ds)
        {
            auto&       in_dsg_ds_create_info_ptr               = in_ds_create_info_items_ptr->at         (n_ds);
            const auto& current_pipeline_dsg_ds_create_info_ptr = current_pipeline_ds_create_info_ptrs->at(n_ds);

            if ((in_dsg_ds_create_info_ptr != nullptr && current_pipeline_dsg_ds_create_info_ptr == nullptr) ||
                (in_dsg_ds_create_info_ptr == nullptr && current_pipeline_dsg_ds_create_info_ptr != nullptr) )
            {
                dss_match = false;

                break;
            }

            if (in_dsg_ds_create_info_ptr               != nullptr &&
                current_pipeline_dsg_ds_create_info_ptr != nullptr)
            {
                if (!(*in_dsg_ds_create_info_ptr == *current_pipeline_dsg_ds_create_info_ptr) )
                {
                    dss_match = false;

                    break;
                }
            }
        }

        if (!dss_match)
        {
            continue;
        }

        result                       = true;
        result_pipeline_layout_ptr   = current_pipeline_layout_container_ptr->pipeline_layout_ptr.get();

        current_pipeline_layout_container_ptr->n_references.fetch_add(1);

        break;
    }

    if (!result)
    {
        auto new_layout_ptr           = Anvil::PipelineLayout::create(m_device_ptr,
                                                                      in_ds_create_info_items_ptr,
                                                                      in_push_constant_ranges,
                                                                      is_mt_safe() );
        auto new_layout_container_ptr = std::unique_ptr<PipelineLayoutContainer>(
            new PipelineLayoutContainer()
        );

        result                                        = true;
        result_pipeline_layout_ptr                    = new_layout_ptr.get();
        new_layout_container_ptr->pipeline_layout_ptr = std::move(new_layout_ptr);

        m_pipeline_layouts.push_back(
            std::move(new_layout_container_ptr)
        );
    }

    if (result)
    {
        anvil_assert(result_pipeline_layout_ptr != nullptr);

        *out_pipeline_layout_ptr_ptr = Anvil::PipelineLayoutUniquePtr(result_pipeline_layout_ptr,
                                                                      std::bind(&PipelineLayoutManager::on_pipeline_layout_dereferenced,
                                                                                this,
                                                                                result_pipeline_layout_ptr)
        );
    }

    return result;
}
예제 #4
0
파일: queue.cpp 프로젝트: mp3butcher/Anvil
/** Please see header for specification */
bool Anvil::Queue::bind_sparse_memory(Anvil::SparseMemoryBindingUpdateInfo& in_update)
{
    const VkBindSparseInfo* bind_info_items   = nullptr;
    Anvil::Fence*           fence_ptr         = nullptr;
    const bool              mt_safe           = is_mt_safe();
    uint32_t                n_bind_info_items = 0;
    VkResult                result            = VK_ERROR_INITIALIZATION_FAILED;

    in_update.get_bind_sparse_call_args(&n_bind_info_items,
                                        &bind_info_items,
                                        &fence_ptr);

    if (mt_safe)
    {
        bind_sparse_memory_lock_unlock(in_update,
                                       true); /* in_should_lock */
    }
    {
        result = vkQueueBindSparse(m_queue,
                                   n_bind_info_items,
                                   bind_info_items,
                                   (fence_ptr != nullptr) ? fence_ptr->get_fence() : VK_NULL_HANDLE);
    }
    if (mt_safe)
    {
        bind_sparse_memory_lock_unlock(in_update,
                                       false); /* in_should_lock */
    }

    anvil_assert(result == VK_SUCCESS);

    for (uint32_t n_bind_info = 0;
                  n_bind_info < n_bind_info_items;
                ++n_bind_info)
    {
        uint32_t n_buffer_memory_updates       = 0;
        uint32_t n_image_memory_updates        = 0;
        uint32_t n_image_opaque_memory_updates = 0;

        in_update.get_bind_info_properties(n_bind_info,
                                          &n_buffer_memory_updates,
                                          &n_image_memory_updates,
                                          &n_image_opaque_memory_updates,
                                           nullptr,  /* out_opt_n_signal_semaphores_ptr   */
                                           nullptr,  /* out_opt_signal_semaphores_ptr_ptr */
                                           nullptr,  /* out_opt_n_wait_semaphores_ptr     */
                                           nullptr); /* out_opt_wait_semaphores_ptr_ptr   */

        for (uint32_t n_buffer_memory_update = 0;
                      n_buffer_memory_update < n_buffer_memory_updates;
                    ++n_buffer_memory_update)
        {
            VkDeviceSize        alloc_size                   = UINT64_MAX;
            VkDeviceSize        buffer_memory_start_offset   = UINT64_MAX;
            Anvil::Buffer*      buffer_ptr                   = nullptr;
            bool                memory_block_owned_by_buffer = false;
            Anvil::MemoryBlock* memory_block_ptr             = nullptr;
            VkDeviceSize        memory_block_start_offset;

            in_update.get_buffer_memory_update_properties(n_bind_info,
                                                          n_buffer_memory_update,
                                                         &buffer_ptr,
                                                         &buffer_memory_start_offset,
                                                         &memory_block_ptr,
                                                         &memory_block_start_offset,
                                                         &memory_block_owned_by_buffer,
                                                          &alloc_size);

            buffer_ptr->set_memory_sparse(memory_block_ptr,
                                          memory_block_owned_by_buffer,
                                          memory_block_start_offset,
                                          buffer_memory_start_offset,
                                          alloc_size);
        }

        for (uint32_t n_image_memory_update = 0;
                      n_image_memory_update < n_image_memory_updates;
                    ++n_image_memory_update)
        {
            Anvil::Image*           image_ptr                   = nullptr;
            VkExtent3D              extent;
            VkSparseMemoryBindFlags flags;
            bool                    memory_block_owned_by_image = false;
            Anvil::MemoryBlock*     memory_block_ptr            = nullptr;
            VkDeviceSize            memory_block_start_offset;
            VkOffset3D              offset;
            VkImageSubresource      subresource;

            in_update.get_image_memory_update_properties(n_bind_info,
                                                         n_image_memory_update,
                                                        &image_ptr,
                                                        &subresource,
                                                        &offset,
                                                        &extent,
                                                        &flags,
                                                        &memory_block_ptr,
                                                        &memory_block_start_offset,
                                                        &memory_block_owned_by_image);

            image_ptr->on_memory_backing_update(subresource,
                                                offset,
                                                extent,
                                                memory_block_ptr,
                                                memory_block_start_offset,
                                                memory_block_owned_by_image);
        }

        for (uint32_t n_image_opaque_memory_update = 0;
                      n_image_opaque_memory_update < n_image_opaque_memory_updates;
                    ++n_image_opaque_memory_update)
        {
            VkSparseMemoryBindFlags flags;
            Anvil::Image*           image_ptr                   = nullptr;
            bool                    memory_block_owned_by_image = false;
            Anvil::MemoryBlock*     memory_block_ptr            = nullptr;
            VkDeviceSize            memory_block_start_offset;
            VkDeviceSize            resource_offset;
            VkDeviceSize            size;

            in_update.get_image_opaque_memory_update_properties(n_bind_info,
                                                                n_image_opaque_memory_update,
                                                               &image_ptr,
                                                               &resource_offset,
                                                               &size,
                                                               &flags,
                                                               &memory_block_ptr,
                                                               &memory_block_start_offset,
                                                               &memory_block_owned_by_image);

            image_ptr->on_memory_backing_opaque_update(resource_offset,
                                                       size,
                                                       memory_block_ptr,
                                                       memory_block_start_offset,
                                                       memory_block_owned_by_image);
        }
    }

    return (result == VK_SUCCESS);
}
예제 #5
0
파일: queue.cpp 프로젝트: mp3butcher/Anvil
/** Please see header for specification */
Anvil::Queue::Queue(const Anvil::BaseDevice* in_device_ptr,
                    uint32_t                 in_queue_family_index,
                    uint32_t                 in_queue_index,
                    bool                     in_mt_safe)

    :CallbacksSupportProvider  (QUEUE_CALLBACK_ID_COUNT),
     DebugMarkerSupportProvider(in_device_ptr,
                                VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT),
     MTSafetySupportProvider   (in_mt_safe),
     m_device_ptr              (in_device_ptr),
     m_queue                   (VK_NULL_HANDLE),
     m_queue_family_index      (in_queue_family_index),
     m_queue_index             (in_queue_index)
{
    /* Retrieve the Vulkan handle */
    vkGetDeviceQueue(m_device_ptr->get_device_vk(),
                     in_queue_family_index,
                     in_queue_index,
                    &m_queue);

    anvil_assert(m_queue != VK_NULL_HANDLE);

    /* Determine whether the queue supports sparse bindings */
    m_supports_sparse_bindings = !!(m_device_ptr->get_queue_family_info(in_queue_family_index)->flags & VK_QUEUE_SPARSE_BINDING_BIT);

    /* Cache a fence that may be optionally used for submissions */
    {
        auto create_info_ptr = Anvil::FenceCreateInfo::create(m_device_ptr,
                                                              false); /* create_signalled */

        create_info_ptr->set_mt_safety(Anvil::Utils::convert_boolean_to_mt_safety_enum(is_mt_safe()) );

        m_submit_fence_ptr = Anvil::Fence::create(std::move(create_info_ptr) );
    }

    /* OK, register the wrapper instance and leave */
    Anvil::ObjectTracker::get()->register_object(Anvil::OBJECT_TYPE_QUEUE,
                                                  this);
}
/* Please see header for specification */
bool Anvil::DescriptorSetLayoutManager::get_layout(const DescriptorSetCreateInfo*       in_ds_create_info_ptr,
                                                   Anvil::DescriptorSetLayoutUniquePtr* out_ds_layout_ptr_ptr)
{
    std::unique_lock<std::recursive_mutex> mutex_lock;
    auto                                   mutex_ptr            = get_mutex();
    bool                                   result               = false;
    Anvil::DescriptorSetLayout*            result_ds_layout_ptr = nullptr;

    anvil_assert(in_ds_create_info_ptr != nullptr);

    if (mutex_ptr != nullptr)
    {
        mutex_lock = std::move(
            std::unique_lock<std::recursive_mutex>(*mutex_ptr)
        );
    }

    for (auto layout_iterator  = m_descriptor_set_layouts.begin();
              layout_iterator != m_descriptor_set_layouts.end();
            ++layout_iterator)
    {
        auto&  current_ds_layout_container_ptr  = *layout_iterator;
        auto&  current_ds_layout_ptr            = current_ds_layout_container_ptr->ds_layout_ptr;
        auto   current_ds_create_info_ptr       = current_ds_layout_ptr->get_create_info();

        if (*in_ds_create_info_ptr == *current_ds_create_info_ptr)
        {
            result               = true;
            result_ds_layout_ptr = current_ds_layout_ptr.get();

            current_ds_layout_container_ptr->n_references.fetch_add(1);

            break;
        }
    }

    if (!result)
    {
        auto ds_create_info_clone_ptr    = DescriptorSetCreateInfoUniquePtr             (new DescriptorSetCreateInfo(*in_ds_create_info_ptr),
                                                                                         std::default_delete<Anvil::DescriptorSetCreateInfo>() );
        auto new_ds_layout_ptr           = Anvil::DescriptorSetLayout::create           (std::move(ds_create_info_clone_ptr),
                                                                                         m_device_ptr,
                                                                                         Anvil::Utils::convert_boolean_to_mt_safety_enum(is_mt_safe() ));
        auto new_ds_layout_container_ptr = std::unique_ptr<DescriptorSetLayoutContainer>(new DescriptorSetLayoutContainer() );

        result                                     = true;
        result_ds_layout_ptr                       = new_ds_layout_ptr.get();
        new_ds_layout_container_ptr->ds_layout_ptr = std::move(new_ds_layout_ptr);

        m_descriptor_set_layouts.push_back(
            std::move(new_ds_layout_container_ptr)
        );
    }

    if (result)
    {
        anvil_assert(result_ds_layout_ptr != nullptr);

        *out_ds_layout_ptr_ptr = Anvil::DescriptorSetLayoutUniquePtr(result_ds_layout_ptr,
                                                                     std::bind(&DescriptorSetLayoutManager::on_descriptor_set_layout_dereferenced,
                                                                               this,
                                                                               result_ds_layout_ptr)
        );
    }

    return result;
}