Ejemplo n.º 1
0
/** Creates a new VMA allocator instance.
 *
 *  @return true if successful, false otherwise.
 **/
bool Anvil::MemoryAllocatorBackends::VMA::VMAAllocator::init()
{
    VmaAllocatorCreateInfo create_info = {};
    VkResult               result        (VK_ERROR_DEVICE_LOST);

    switch (m_device_ptr->get_type() )
    {
        case Anvil::DEVICE_TYPE_SINGLE_GPU:
        {
            const Anvil::SGPUDevice* sgpu_device_ptr(dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr) );

            create_info.physicalDevice = sgpu_device_ptr->get_physical_device()->get_physical_device();
            break;
        }

        default:
        {
            anvil_assert_fail();
        }
    }

    create_info.device                      = m_device_ptr->get_device_vk();
    create_info.pAllocationCallbacks        = nullptr;
    create_info.preferredLargeHeapBlockSize = 0;

    result = vmaCreateAllocator(&create_info,
                                &m_allocator);

    anvil_assert_vk_call_succeeded(result);
    return is_vk_call_successful(result);
}
Ejemplo n.º 2
0
void
Driver::initialise(vk::PhysicalDevice physDevice, vk::Device device, vk::Queue queue, uint32_t queueFamilyIndex)
{
   if (mRunState != RunState::None) {
      return;
   }

   mPhysDevice = physDevice;
   mDevice = device;
   mQueue = queue;
   mRunState = RunState::Running;

   // Allocate a command pool to use
   vk::CommandPoolCreateInfo commandPoolDesc;
   commandPoolDesc.flags = vk::CommandPoolCreateFlagBits::eTransient | vk::CommandPoolCreateFlagBits::eResetCommandBuffer;
   commandPoolDesc.queueFamilyIndex = queueFamilyIndex;
   mCommandPool = mDevice.createCommandPool(commandPoolDesc);

   // Start our fence thread
   mFenceThread = std::thread(std::bind(&Driver::fenceWaiterThread, this));

   // Set up the VMA
   VmaAllocatorCreateInfo allocatorDesc = {};
   allocatorDesc.physicalDevice = mPhysDevice;
   allocatorDesc.device = mDevice;
   vmaCreateAllocator(&allocatorDesc, &mAllocator);

   // Set up our drawing pipeline layout
   auto makeStageSet = [&](int stageIndex)
   {
      vk::ShaderStageFlags stageFlags;
      if (stageIndex == 0) {
         stageFlags = vk::ShaderStageFlagBits::eVertex;
      } else if (stageIndex == 1) {
         stageFlags = vk::ShaderStageFlagBits::eGeometry;
      } else if (stageIndex == 2) {
         stageFlags = vk::ShaderStageFlagBits::eFragment;
      } else {
         decaf_abort("Unexpected shader stage index");
      }

      std::vector<vk::DescriptorSetLayoutBinding> bindings;

      for (auto i = 0u; i < latte::MaxSamplers; ++i) {
         vk::DescriptorSetLayoutBinding sampBindingDesc;
         sampBindingDesc.binding = i;
         sampBindingDesc.descriptorType = vk::DescriptorType::eSampler;
         sampBindingDesc.descriptorCount = 1;
         sampBindingDesc.stageFlags = stageFlags;
         sampBindingDesc.pImmutableSamplers = nullptr;
         bindings.push_back(sampBindingDesc);
      }

      for (auto i = 0u; i < latte::MaxTextures; ++i) {
         vk::DescriptorSetLayoutBinding texBindingDesc;
         texBindingDesc.binding = latte::MaxSamplers + i;
         texBindingDesc.descriptorType = vk::DescriptorType::eSampledImage;
         texBindingDesc.descriptorCount = 1;
         texBindingDesc.stageFlags = stageFlags;
         texBindingDesc.pImmutableSamplers = nullptr;
         bindings.push_back(texBindingDesc);
      }

      for (auto i = 0u; i < latte::MaxUniformBlocks; ++i) {
         if (i >= 15) {
            // Vulkan does not support more than 15 uniform blocks unfortunately,
            // if we ever encounter a game needing all 15, we will need to do block
            // splitting or utilize SSBO's.
            break;
         }

         vk::DescriptorSetLayoutBinding cbufferBindingDesc;
         cbufferBindingDesc.binding = latte::MaxSamplers + latte::MaxTextures + i;
         cbufferBindingDesc.descriptorType = vk::DescriptorType::eUniformBuffer;
         cbufferBindingDesc.descriptorCount = 1;
         cbufferBindingDesc.stageFlags = stageFlags;
         cbufferBindingDesc.pImmutableSamplers = nullptr;
         bindings.push_back(cbufferBindingDesc);
      }

      vk::DescriptorSetLayoutCreateInfo descriptorSetLayoutDesc;
      descriptorSetLayoutDesc.bindingCount = static_cast<uint32_t>(bindings.size());
      descriptorSetLayoutDesc.pBindings = bindings.data();
      return mDevice.createDescriptorSetLayout(descriptorSetLayoutDesc);
   };

   mVertexDescriptorSetLayout = makeStageSet(0);
   mGeometryDescriptorSetLayout = makeStageSet(1);
   mPixelDescriptorSetLayout = makeStageSet(2);

   std::array<vk::DescriptorSetLayout, 3> descriptorLayouts = {
      mVertexDescriptorSetLayout,
      mGeometryDescriptorSetLayout,
      mPixelDescriptorSetLayout };


   std::vector<vk::PushConstantRange> pushConstants;

   vk::PushConstantRange screenSpaceConstants;
   screenSpaceConstants.stageFlags = vk::ShaderStageFlagBits::eVertex;
   screenSpaceConstants.offset = 0;
   screenSpaceConstants.size = 32;
   pushConstants.push_back(screenSpaceConstants);

   vk::PushConstantRange alphaRefConstants;
   alphaRefConstants.stageFlags = vk::ShaderStageFlagBits::eFragment;
   alphaRefConstants.offset = 32;
   alphaRefConstants.size = 8;
   pushConstants.push_back(alphaRefConstants);

   vk::PipelineLayoutCreateInfo pipelineLayoutDesc;
   pipelineLayoutDesc.setLayoutCount = static_cast<uint32_t>(descriptorLayouts.size());
   pipelineLayoutDesc.pSetLayouts = descriptorLayouts.data();
   pipelineLayoutDesc.pushConstantRangeCount = static_cast<uint32_t>(pushConstants.size());
   pipelineLayoutDesc.pPushConstantRanges = pushConstants.data();
   mPipelineLayout = mDevice.createPipelineLayout(pipelineLayoutDesc);
}
Ejemplo n.º 3
0
/** Creates a new VMA allocator instance.
 *
 *  @return true if successful, false otherwise.
 **/
bool Anvil::MemoryAllocatorBackends::VMA::VMAAllocator::init()
{
    VmaAllocatorCreateInfo create_info                        = {};
    const bool             khr_dedicated_allocation_supported = m_device_ptr->get_extension_info()->khr_dedicated_allocation();
    VkResult               result                             = VK_ERROR_DEVICE_LOST;

    /* Prepare VK func ptr array */
    m_vma_func_ptrs.reset(
        new VmaVulkanFunctions()
    );

    if (m_vma_func_ptrs == nullptr)
    {
        anvil_assert(m_vma_func_ptrs != nullptr);

        goto end;
    }

    m_vma_func_ptrs->vkAllocateMemory                    = Vulkan::vkAllocateMemory;
    m_vma_func_ptrs->vkBindBufferMemory                  = Vulkan::vkBindBufferMemory;
    m_vma_func_ptrs->vkBindImageMemory                   = Vulkan::vkBindImageMemory;
    m_vma_func_ptrs->vkCreateBuffer                      = Vulkan::vkCreateBuffer;
    m_vma_func_ptrs->vkCreateImage                       = Vulkan::vkCreateImage;
    m_vma_func_ptrs->vkDestroyBuffer                     = Vulkan::vkDestroyBuffer;
    m_vma_func_ptrs->vkDestroyImage                      = Vulkan::vkDestroyImage;
    m_vma_func_ptrs->vkFreeMemory                        = Vulkan::vkFreeMemory;
    m_vma_func_ptrs->vkGetBufferMemoryRequirements       = Vulkan::vkGetBufferMemoryRequirements;
    m_vma_func_ptrs->vkGetImageMemoryRequirements        = Vulkan::vkGetImageMemoryRequirements;
    m_vma_func_ptrs->vkGetPhysicalDeviceMemoryProperties = Vulkan::vkGetPhysicalDeviceMemoryProperties;
    m_vma_func_ptrs->vkGetPhysicalDeviceProperties       = Vulkan::vkGetPhysicalDeviceProperties;
    m_vma_func_ptrs->vkMapMemory                         = Vulkan::vkMapMemory;
    m_vma_func_ptrs->vkUnmapMemory                       = Vulkan::vkUnmapMemory;

    if (m_device_ptr->get_extension_info()->khr_get_memory_requirements2() )
    {
        m_vma_func_ptrs->vkGetBufferMemoryRequirements2KHR = m_device_ptr->get_extension_khr_get_memory_requirements2_entrypoints().vkGetBufferMemoryRequirements2KHR;
        m_vma_func_ptrs->vkGetImageMemoryRequirements2KHR  = m_device_ptr->get_extension_khr_get_memory_requirements2_entrypoints().vkGetImageMemoryRequirements2KHR;
    }
    else
    {
        m_vma_func_ptrs->vkGetBufferMemoryRequirements2KHR = nullptr;
        m_vma_func_ptrs->vkGetImageMemoryRequirements2KHR  = nullptr;
    }

    /* Prepare VMA create info struct */
    switch (m_device_ptr->get_type() )
    {
        case Anvil::DeviceType::MULTI_GPU:
        {
            /* VMA library takes a physical device handle to extract info regarding supported
             * memory types and the like. As VK_KHR_device_group provide explicit mGPU support,
             * it is guaranteed all physical devices within a logical device offer exactly the
             * same capabilities. This means we're safe to pass zeroth physical device to the
             * library, and everything will still be OK.
             */
            const Anvil::MGPUDevice* mgpu_device_ptr(dynamic_cast<const Anvil::MGPUDevice*>(m_device_ptr) );

            create_info.physicalDevice = mgpu_device_ptr->get_physical_device(0)->get_physical_device();
            break;
        }

        case Anvil::DeviceType::SINGLE_GPU:
        {
            const Anvil::SGPUDevice* sgpu_device_ptr(dynamic_cast<const Anvil::SGPUDevice*>(m_device_ptr) );

            create_info.physicalDevice = sgpu_device_ptr->get_physical_device()->get_physical_device();
            break;
        }

        default:
        {
            anvil_assert_fail();
        }
    }

    create_info.flags                       = (khr_dedicated_allocation_supported) ? VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT : 0;
    create_info.device                      = m_device_ptr->get_device_vk();
    create_info.pAllocationCallbacks        = nullptr;
    create_info.preferredLargeHeapBlockSize = 0;
    create_info.pVulkanFunctions            = m_vma_func_ptrs.get();

    result = vmaCreateAllocator(&create_info,
                                &m_allocator);

    anvil_assert_vk_call_succeeded(result);
end:
    return is_vk_call_successful(result);
}