Esempio n. 1
0
void SparseBindingImage::Init(RandomNumberGenerator& rand)
{
    assert(g_SparseBindingEnabled && g_hSparseBindingQueue);

    // Create image.
    FillImageCreateInfo(rand);
    m_CreateInfo.flags |= VK_IMAGE_CREATE_SPARSE_BINDING_BIT;
    ERR_GUARD_VULKAN( vkCreateImage(g_hDevice, &m_CreateInfo, nullptr, &m_Image) );

    // Get memory requirements.
    VkMemoryRequirements imageMemReq;
    vkGetImageMemoryRequirements(g_hDevice, m_Image, &imageMemReq);

    // This is just to silence validation layer warning.
    // But it doesn't help. Looks like a bug in Vulkan validation layers.
    // See: https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/364
    uint32_t sparseMemReqCount = 0;
    vkGetImageSparseMemoryRequirements(g_hDevice, m_Image, &sparseMemReqCount, nullptr);
    TEST(sparseMemReqCount <= 8);
    VkSparseImageMemoryRequirements sparseMemReq[8];
    vkGetImageSparseMemoryRequirements(g_hDevice, m_Image, &sparseMemReqCount, sparseMemReq);

    // According to Vulkan specification, for sparse resources memReq.alignment is also page size.
    const VkDeviceSize pageSize = imageMemReq.alignment;
    const uint32_t pageCount = (uint32_t)ceil_div<VkDeviceSize>(imageMemReq.size, pageSize);

    VmaAllocationCreateInfo allocCreateInfo = {};
    allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;

    VkMemoryRequirements pageMemReq = imageMemReq;
    pageMemReq.size = pageSize;

    // Allocate and bind memory pages.
    m_Allocations.resize(pageCount);
    std::fill(m_Allocations.begin(), m_Allocations.end(), nullptr);
    std::vector<VkSparseMemoryBind> binds{pageCount};
    std::vector<VmaAllocationInfo> allocInfo{pageCount};
    ERR_GUARD_VULKAN( vmaAllocateMemoryPages(g_hAllocator, &pageMemReq, &allocCreateInfo, pageCount, m_Allocations.data(), allocInfo.data()) );

    for(uint32_t i = 0; i < pageCount; ++i)
    {
        binds[i] = {};
        binds[i].resourceOffset = pageSize * i;
        binds[i].size = pageSize;
        binds[i].memory = allocInfo[i].deviceMemory;
        binds[i].memoryOffset = allocInfo[i].offset;
    }

    VkSparseImageOpaqueMemoryBindInfo imageBindInfo;
    imageBindInfo.image = m_Image;
    imageBindInfo.bindCount = pageCount;
    imageBindInfo.pBinds = binds.data();

    VkBindSparseInfo bindSparseInfo = { VK_STRUCTURE_TYPE_BIND_SPARSE_INFO };
    bindSparseInfo.pImageOpaqueBinds = &imageBindInfo;
    bindSparseInfo.imageOpaqueBindCount = 1;
    
    ERR_GUARD_VULKAN( vkResetFences(g_hDevice, 1, &g_ImmediateFence) );
    ERR_GUARD_VULKAN( vkQueueBindSparse(g_hSparseBindingQueue, 1, &bindSparseInfo, g_ImmediateFence) );
    ERR_GUARD_VULKAN( vkWaitForFences(g_hDevice, 1, &g_ImmediateFence, VK_TRUE, UINT64_MAX) );
}
Esempio n. 2
0
/** Please see header for specification */
bool Anvil::Queue::bind_sparse_memory(Anvil::SparseMemoryBindingUpdateInfo& in_update)
{
    const VkBindSparseInfo* bind_info_items   = nullptr;
    Anvil::Fence*           fence_ptr         = nullptr;
    const bool              mt_safe           = is_mt_safe();
    uint32_t                n_bind_info_items = 0;
    VkResult                result            = VK_ERROR_INITIALIZATION_FAILED;

    in_update.get_bind_sparse_call_args(&n_bind_info_items,
                                        &bind_info_items,
                                        &fence_ptr);

    if (mt_safe)
    {
        bind_sparse_memory_lock_unlock(in_update,
                                       true); /* in_should_lock */
    }
    {
        result = vkQueueBindSparse(m_queue,
                                   n_bind_info_items,
                                   bind_info_items,
                                   (fence_ptr != nullptr) ? fence_ptr->get_fence() : VK_NULL_HANDLE);
    }
    if (mt_safe)
    {
        bind_sparse_memory_lock_unlock(in_update,
                                       false); /* in_should_lock */
    }

    anvil_assert(result == VK_SUCCESS);

    for (uint32_t n_bind_info = 0;
                  n_bind_info < n_bind_info_items;
                ++n_bind_info)
    {
        uint32_t n_buffer_memory_updates       = 0;
        uint32_t n_image_memory_updates        = 0;
        uint32_t n_image_opaque_memory_updates = 0;

        in_update.get_bind_info_properties(n_bind_info,
                                          &n_buffer_memory_updates,
                                          &n_image_memory_updates,
                                          &n_image_opaque_memory_updates,
                                           nullptr,  /* out_opt_n_signal_semaphores_ptr   */
                                           nullptr,  /* out_opt_signal_semaphores_ptr_ptr */
                                           nullptr,  /* out_opt_n_wait_semaphores_ptr     */
                                           nullptr); /* out_opt_wait_semaphores_ptr_ptr   */

        for (uint32_t n_buffer_memory_update = 0;
                      n_buffer_memory_update < n_buffer_memory_updates;
                    ++n_buffer_memory_update)
        {
            VkDeviceSize        alloc_size                   = UINT64_MAX;
            VkDeviceSize        buffer_memory_start_offset   = UINT64_MAX;
            Anvil::Buffer*      buffer_ptr                   = nullptr;
            bool                memory_block_owned_by_buffer = false;
            Anvil::MemoryBlock* memory_block_ptr             = nullptr;
            VkDeviceSize        memory_block_start_offset;

            in_update.get_buffer_memory_update_properties(n_bind_info,
                                                          n_buffer_memory_update,
                                                         &buffer_ptr,
                                                         &buffer_memory_start_offset,
                                                         &memory_block_ptr,
                                                         &memory_block_start_offset,
                                                         &memory_block_owned_by_buffer,
                                                          &alloc_size);

            buffer_ptr->set_memory_sparse(memory_block_ptr,
                                          memory_block_owned_by_buffer,
                                          memory_block_start_offset,
                                          buffer_memory_start_offset,
                                          alloc_size);
        }

        for (uint32_t n_image_memory_update = 0;
                      n_image_memory_update < n_image_memory_updates;
                    ++n_image_memory_update)
        {
            Anvil::Image*           image_ptr                   = nullptr;
            VkExtent3D              extent;
            VkSparseMemoryBindFlags flags;
            bool                    memory_block_owned_by_image = false;
            Anvil::MemoryBlock*     memory_block_ptr            = nullptr;
            VkDeviceSize            memory_block_start_offset;
            VkOffset3D              offset;
            VkImageSubresource      subresource;

            in_update.get_image_memory_update_properties(n_bind_info,
                                                         n_image_memory_update,
                                                        &image_ptr,
                                                        &subresource,
                                                        &offset,
                                                        &extent,
                                                        &flags,
                                                        &memory_block_ptr,
                                                        &memory_block_start_offset,
                                                        &memory_block_owned_by_image);

            image_ptr->on_memory_backing_update(subresource,
                                                offset,
                                                extent,
                                                memory_block_ptr,
                                                memory_block_start_offset,
                                                memory_block_owned_by_image);
        }

        for (uint32_t n_image_opaque_memory_update = 0;
                      n_image_opaque_memory_update < n_image_opaque_memory_updates;
                    ++n_image_opaque_memory_update)
        {
            VkSparseMemoryBindFlags flags;
            Anvil::Image*           image_ptr                   = nullptr;
            bool                    memory_block_owned_by_image = false;
            Anvil::MemoryBlock*     memory_block_ptr            = nullptr;
            VkDeviceSize            memory_block_start_offset;
            VkDeviceSize            resource_offset;
            VkDeviceSize            size;

            in_update.get_image_opaque_memory_update_properties(n_bind_info,
                                                                n_image_opaque_memory_update,
                                                               &image_ptr,
                                                               &resource_offset,
                                                               &size,
                                                               &flags,
                                                               &memory_block_ptr,
                                                               &memory_block_start_offset,
                                                               &memory_block_owned_by_image);

            image_ptr->on_memory_backing_opaque_update(resource_offset,
                                                       size,
                                                       memory_block_ptr,
                                                       memory_block_start_offset,
                                                       memory_block_owned_by_image);
        }
    }

    return (result == VK_SUCCESS);
}
Esempio n. 3
0
VkResult Queue::bindSparse(const uint32_t bindInfoCount, const VkBindSparseInfo* bindInfo, const VkFence fence) const
{
    return vkQueueBindSparse(queue, bindInfoCount, bindInfo, fence);
}