//Destroyer VulkanBase::~VulkanBase() { // Wait until all operations are complete to destroy stuff CHECK_RESULT(vkQueueWaitIdle(queue)); swapchain.clean(); //vkFreeCommandBuffers(device, commandPool, commandBuffersVector.size(), commandBuffersVector.data()); vkDestroyCommandPool(device, commandPool, nullptr); // Destroy synchronization elements vkDestroySemaphore(device, imageAcquiredSemaphore, nullptr); vkDestroySemaphore(device, renderingCompletedSemaphore, nullptr); vkDestroyFence(device, presentFence, nullptr); // Destroy logical device vkDestroyDevice(device, nullptr); #ifdef _DEBUG if (enableValidation) { vkDebug::freeDebugCallback(instance); } #endif // _DEBUG vkDestroyInstance(instance,nullptr); }
void VKDevice::destroy_cmd_pool (VkCommandPool pool) { XCAM_ASSERT (XCAM_IS_VALID_VK_ID (_dev_id)); XCAM_ASSERT (XCAM_IS_VALID_VK_ID (pool)); vkDestroyCommandPool (_dev_id, pool, _allocator.ptr ()); }
void Tutorial03::ChildClear() { if( GetDevice() != VK_NULL_HANDLE ) { vkDeviceWaitIdle( GetDevice() ); if( (Vulkan.GraphicsCommandBuffers.size() > 0) && (Vulkan.GraphicsCommandBuffers[0] != VK_NULL_HANDLE) ) { vkFreeCommandBuffers( GetDevice(), Vulkan.GraphicsCommandPool, static_cast<uint32_t>(Vulkan.GraphicsCommandBuffers.size()), &Vulkan.GraphicsCommandBuffers[0] ); Vulkan.GraphicsCommandBuffers.clear(); } if( Vulkan.GraphicsCommandPool != VK_NULL_HANDLE ) { vkDestroyCommandPool( GetDevice(), Vulkan.GraphicsCommandPool, nullptr ); Vulkan.GraphicsCommandPool = VK_NULL_HANDLE; } if( Vulkan.GraphicsPipeline != VK_NULL_HANDLE ) { vkDestroyPipeline( GetDevice(), Vulkan.GraphicsPipeline, nullptr ); Vulkan.GraphicsPipeline = VK_NULL_HANDLE; } if( Vulkan.RenderPass != VK_NULL_HANDLE ) { vkDestroyRenderPass( GetDevice(), Vulkan.RenderPass, nullptr ); Vulkan.RenderPass = VK_NULL_HANDLE; } for( size_t i = 0; i < Vulkan.Framebuffers.size(); ++i ) { if( Vulkan.Framebuffers[i] != VK_NULL_HANDLE ) { vkDestroyFramebuffer( GetDevice(), Vulkan.Framebuffers[i], nullptr ); Vulkan.Framebuffers[i] = VK_NULL_HANDLE; } } Vulkan.Framebuffers.clear(); } }
void CommandBufferManager::DestroyCommandBuffers() { VkDevice device = g_vulkan_context->GetDevice(); for (FrameResources& resources : m_frame_resources) { for (auto& it : resources.cleanup_resources) it(); resources.cleanup_resources.clear(); if (resources.fence != VK_NULL_HANDLE) { vkDestroyFence(device, resources.fence, nullptr); resources.fence = VK_NULL_HANDLE; } if (resources.descriptor_pool != VK_NULL_HANDLE) { vkDestroyDescriptorPool(device, resources.descriptor_pool, nullptr); resources.descriptor_pool = VK_NULL_HANDLE; } if (resources.command_buffers[0] != VK_NULL_HANDLE) { vkFreeCommandBuffers(device, resources.command_pool, static_cast<u32>(resources.command_buffers.size()), resources.command_buffers.data()); resources.command_buffers.fill(VK_NULL_HANDLE); } if (resources.command_pool != VK_NULL_HANDLE) { vkDestroyCommandPool(device, resources.command_pool, nullptr); resources.command_pool = VK_NULL_HANDLE; } } }
void CommandBufferManager::DestroyCommandBuffers() { VkDevice device = g_vulkan_context->GetDevice(); for (FrameResources& resources : m_frame_resources) { // The Vulkan spec section 5.2 says: "When a pool is destroyed, all command buffers allocated // from the pool are freed.". So we don't need to free the command buffers, just the pools. // We destroy the command pool first, to avoid any warnings from the validation layers about // objects which are pending destruction being in-use. if (resources.command_pool != VK_NULL_HANDLE) vkDestroyCommandPool(device, resources.command_pool, nullptr); // Destroy any pending objects. for (auto& it : resources.cleanup_resources) it(); if (resources.semaphore != VK_NULL_HANDLE) vkDestroySemaphore(device, resources.semaphore, nullptr); if (resources.fence != VK_NULL_HANDLE) vkDestroyFence(device, resources.fence, nullptr); if (resources.descriptor_pool != VK_NULL_HANDLE) vkDestroyDescriptorPool(device, resources.descriptor_pool, nullptr); } vkDestroySemaphore(device, m_present_semaphore, nullptr); }
static void cleanup_vulkan() { vkDestroyDescriptorPool(g_Device, g_DescriptorPool, g_Allocator); for (int i = 0; i < IMGUI_VK_QUEUED_FRAMES; i++) { vkDestroyFence(g_Device, g_Fence[i], g_Allocator); vkFreeCommandBuffers(g_Device, g_CommandPool[i], 1, &g_CommandBuffer[i]); vkDestroyCommandPool(g_Device, g_CommandPool[i], g_Allocator); vkDestroySemaphore(g_Device, g_PresentCompleteSemaphore[i], g_Allocator); vkDestroySemaphore(g_Device, g_RenderCompleteSemaphore[i], g_Allocator); } for (uint32_t i = 0; i < g_BackBufferCount; i++) { vkDestroyImageView(g_Device, g_BackBufferView[i], g_Allocator); vkDestroyFramebuffer(g_Device, g_Framebuffer[i], g_Allocator); } vkDestroyRenderPass(g_Device, g_RenderPass, g_Allocator); vkDestroySwapchainKHR(g_Device, g_Swapchain, g_Allocator); vkDestroySurfaceKHR(g_Instance, g_Surface, g_Allocator); #ifdef IMGUI_VULKAN_DEBUG_REPORT // get the proc address of the function pointer, required for used extensions auto vkDestroyDebugReportCallbackEXT = (PFN_vkDestroyDebugReportCallbackEXT)vkGetInstanceProcAddr(g_Instance, "vkDestroyDebugReportCallbackEXT"); vkDestroyDebugReportCallbackEXT(g_Instance, g_Debug_Report, g_Allocator); #endif // IMGUI_VULKAN_DEBUG_REPORT vkDestroyDevice(g_Device, g_Allocator); vkDestroyInstance(g_Instance, g_Allocator); }
void CommandBufferManager::DestroyCommandPool() { if (m_command_pool) { vkDestroyCommandPool(g_vulkan_context->GetDevice(), m_command_pool, nullptr); m_command_pool = VK_NULL_HANDLE; } }
VulkanExampleBase::~VulkanExampleBase() { // Clean up Vulkan resources swapChain.cleanup(); if (descriptorPool != VK_NULL_HANDLE) { vkDestroyDescriptorPool(device, descriptorPool, nullptr); } if (setupCmdBuffer != VK_NULL_HANDLE) { vkFreeCommandBuffers(device, cmdPool, 1, &setupCmdBuffer); } destroyCommandBuffers(); vkDestroyRenderPass(device, renderPass, nullptr); for (uint32_t i = 0; i < frameBuffers.size(); i++) { vkDestroyFramebuffer(device, frameBuffers[i], nullptr); } for (auto& shaderModule : shaderModules) { vkDestroyShaderModule(device, shaderModule, nullptr); } vkDestroyImageView(device, depthStencil.view, nullptr); vkDestroyImage(device, depthStencil.image, nullptr); vkFreeMemory(device, depthStencil.mem, nullptr); vkDestroyPipelineCache(device, pipelineCache, nullptr); if (textureLoader) { delete textureLoader; } vkDestroyCommandPool(device, cmdPool, nullptr); vkDestroySemaphore(device, semaphores.presentComplete, nullptr); vkDestroySemaphore(device, semaphores.renderComplete, nullptr); vkDestroyDevice(device, nullptr); if (enableValidation) { vkDebug::freeDebugCallback(instance); } vkDestroyInstance(instance, nullptr); #if defined(__linux) #if defined(__ANDROID__) // todo : android cleanup (if required) #else xcb_destroy_window(connection, window); xcb_disconnect(connection); #endif #endif }
void CommandAllocator::Destroy() { if (VK_NULL_HANDLE == m_Pool) { return; } vkDestroyCommandPool(GetRawDevice(), m_Pool, nullptr); m_Pool = VK_NULL_HANDLE; }
VulkanRenderManager::~VulkanRenderManager() { ILOG("VulkanRenderManager destructor"); StopThread(); vulkan_->WaitUntilQueueIdle(); VkDevice device = vulkan_->GetDevice(); vkDestroySemaphore(device, acquireSemaphore_, nullptr); vkDestroySemaphore(device, renderingCompleteSemaphore_, nullptr); for (int i = 0; i < vulkan_->GetInflightFrames(); i++) { VkCommandBuffer cmdBuf[2]{ frameData_[i].mainCmd, frameData_[i].initCmd }; vkFreeCommandBuffers(device, frameData_[i].cmdPoolInit, 1, &frameData_[i].initCmd); vkFreeCommandBuffers(device, frameData_[i].cmdPoolMain, 1, &frameData_[i].mainCmd); vkDestroyCommandPool(device, frameData_[i].cmdPoolInit, nullptr); vkDestroyCommandPool(device, frameData_[i].cmdPoolMain, nullptr); vkDestroyFence(device, frameData_[i].fence, nullptr); } queueRunner_.DestroyDeviceObjects(); }
/** * Default destructor * * @note Frees the logical device */ ~VulkanDevice() { if (commandPool) { vkDestroyCommandPool(logicalDevice, commandPool, nullptr); } if (logicalDevice) { vkDestroyDevice(logicalDevice, nullptr); } }
void VkContext::Destroy() { vkDeviceWaitIdle(dev); // Free command buffers if (drawCmdBuffers.size() > 0) { vkFreeCommandBuffers(dev, cmdPool, (u32)drawCmdBuffers.size(), &drawCmdBuffers[0]); } // Destroy command pools vkDestroyCommandPool(dev, cmdPool, nullptr); // Destroy semaphores vkDestroySemaphore(dev, acquireCompleteSemaphore, nullptr); vkDestroySemaphore(dev, renderCompleteSemaphore, nullptr); // Destroy swapchain image views for (u32 i = 0; i < swapchainImageViews.size(); i++) { vkDestroyImageView(dev, swapchainImageViews[i], nullptr); } // Destroy swapchain if (swapchain) { vkDestroySwapchainKHR(dev, swapchain, nullptr); } // Destroy surface if (surf) { vkDestroySurfaceKHR(instance, surf, nullptr); } // Destroy device if (dev) { vkDestroyDevice(dev, nullptr); } #ifdef VOXL_DEBUG // Destroy debug report callback if (msgCallback != VK_NULL_HANDLE) { vkDestroyDebugReportCallbackEXT(instance, msgCallback, nullptr); } #endif // Destroy instance if (instance) { vkDestroyInstance(instance, nullptr); } // Terminate GLFW glfwTerminate(); }
int sample_main() { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "Command Buffer Sample"; init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); /* VULKAN_KEY_START */ /* Create a command pool to allocate our command buffer from */ VkCommandPoolCreateInfo cmd_pool_info = {}; cmd_pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; cmd_pool_info.pNext = NULL; cmd_pool_info.queueFamilyIndex = info.graphics_queue_family_index; cmd_pool_info.flags = 0; res = vkCreateCommandPool(info.device, &cmd_pool_info, NULL, &info.cmd_pool); assert(res == VK_SUCCESS); /* Create the command buffer from the command pool */ VkCommandBufferAllocateInfo cmd = {}; cmd.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; cmd.pNext = NULL; cmd.commandPool = info.cmd_pool; cmd.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; cmd.commandBufferCount = 1; res = vkAllocateCommandBuffers(info.device, &cmd, &info.cmd); assert(res == VK_SUCCESS); /* VULKAN_KEY_END */ VkCommandBuffer cmd_bufs[1] = {info.cmd}; vkFreeCommandBuffers(info.device, info.cmd_pool, 1, cmd_bufs); vkDestroyCommandPool(info.device, info.cmd_pool, NULL); destroy_window(info); destroy_device(info); destroy_instance(info); return 0; }
RHI_CommandList::~RHI_CommandList() { auto cmd_pool_vk = static_cast<VkCommandPool>(m_cmd_pool); for (unsigned int i = 0; i < g_max_frames_in_flight; i++) { Vulkan_Common::fence::destroy(m_rhi_device, m_fences_in_flight[i]); Vulkan_Common::semaphore::destroy(m_rhi_device, m_semaphores_render_finished[i]); auto cmd_buffer = static_cast<VkCommandBuffer>(m_cmd_buffers[i]); vkFreeCommandBuffers(m_rhi_device->GetContext()->device, cmd_pool_vk, 1, &cmd_buffer); } m_cmd_buffers.clear(); m_semaphores_render_finished.clear(); m_fences_in_flight.clear(); vkDestroyCommandPool(m_rhi_device->GetContext()->device, cmd_pool_vk, nullptr); m_cmd_pool = nullptr; }
Context::~Context() { vkDestroyCommandPool(kDevice, kCmdPool, NULL); vkDestroyDevice(kDevice, NULL); if (enableValidationLayers) { auto func = (PFN_vkDestroyDebugReportCallbackEXT) vkGetInstanceProcAddr(kInstance, "vkDestroyDebugReportCallbackEXT"); if (func == nullptr) { throw std::runtime_error("Could not load vkDestroyDebugReportCallbackEXT"); } func(kInstance, kDebugReportCallback, NULL); } kShaders.clear(); vkDestroyInstance(kInstance, NULL); return; }
/** * Default destructor, frees up all Vulkan resources acquired by the text overlay */ ~VulkanTextOverlay() { // Free up all Vulkan resources requested by the text overlay vertexBuffer.destroy(); vkDestroySampler(vulkanDevice->logicalDevice, sampler, nullptr); vkDestroyImage(vulkanDevice->logicalDevice, image, nullptr); vkDestroyImageView(vulkanDevice->logicalDevice, view, nullptr); vkFreeMemory(vulkanDevice->logicalDevice, imageMemory, nullptr); vkDestroyDescriptorSetLayout(vulkanDevice->logicalDevice, descriptorSetLayout, nullptr); vkDestroyDescriptorPool(vulkanDevice->logicalDevice, descriptorPool, nullptr); vkDestroyPipelineLayout(vulkanDevice->logicalDevice, pipelineLayout, nullptr); vkDestroyPipelineCache(vulkanDevice->logicalDevice, pipelineCache, nullptr); vkDestroyPipeline(vulkanDevice->logicalDevice, pipeline, nullptr); vkDestroyRenderPass(vulkanDevice->logicalDevice, renderPass, nullptr); vkFreeCommandBuffers(vulkanDevice->logicalDevice, commandPool, static_cast<uint32_t>(cmdBuffers.size()), cmdBuffers.data()); vkDestroyCommandPool(vulkanDevice->logicalDevice, commandPool, nullptr); vkDestroyFence(vulkanDevice->logicalDevice, fence, nullptr); }
~VulkanExample() { // Clean up used Vulkan resources // Note : Inherited destructor cleans up resources stored in base class vkDestroyPipeline(device, pipelines.phong, nullptr); vkDestroyPipelineLayout(device, pipelineLayout, nullptr); vkDestroyDescriptorSetLayout(device, descriptorSetLayout, nullptr); vkTools::destroyUniformData(device, &uniformData.vsScene); vkMeshLoader::freeMeshBufferResources(device, &meshes.ufo); for (auto& thread : renderThreads) { vkFreeCommandBuffers(device, thread.cmdPool, thread.cmdBuffers.size(), thread.cmdBuffers.data()); vkDestroyCommandPool(device, thread.cmdPool, nullptr); } }
ICommandPoolSP VKTS_APIENTRY commandPoolCreate(const VkDevice device, const VkCommandPoolCreateFlags flags, const uint32_t queueFamilyIndex) { if (!device) { return ICommandPoolSP(); } VkResult result; VkCommandPoolCreateInfo commandPoolCreateInfo; memset(&commandPoolCreateInfo, 0, sizeof(VkCommandPoolCreateInfo)); commandPoolCreateInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; commandPoolCreateInfo.flags = flags; commandPoolCreateInfo.queueFamilyIndex = queueFamilyIndex; VkCommandPool commandPool; result = vkCreateCommandPool(device, &commandPoolCreateInfo, nullptr, &commandPool); if (result != VK_SUCCESS) { logPrint(VKTS_LOG_ERROR, "CommandPool: Could not create command pool."); return ICommandPoolSP(); } auto newInstance = new CommandPool(device, queueFamilyIndex, flags, commandPool); if (!newInstance) { vkDestroyCommandPool(device, commandPool, nullptr); return ICommandPoolSP(); } return ICommandPoolSP(newInstance); }
/////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // Main // int main() { if( createWindow() ) { if( initVkInstance( "vulkan_test", "lamp_engine" ) ) { getDevicesList(); if( findSupportedQueue() && createDevice() ) { if( initCommandBuffers() ) { if( initSwapChains() ) { ShowWindow( ghWnd, true ); // Start loop MSG msg; while( !gClose && GetMessage( &msg, NULL, 0, 0 ) > 0 ) { TranslateMessage( &msg ); DispatchMessage( &msg ); } } vkFreeCommandBuffers( gDevice, gCmdPool, 1, &gCmd ); vkDestroyCommandPool( gDevice, gCmdPool, nullptr ); } vkDestroyDevice( gDevice, nullptr ); } vkDestroyInstance( gInstance, nullptr ); } } system("PAUSE"); return 0; }
CommandBufferFactory::~CommandBufferFactory() { for(U i = 0; i < 2; ++i) { for(U j = 0; j < 2; ++j) { CmdbType& type = m_types[i][j]; if(type.m_count) { vkFreeCommandBuffers(m_dev, m_pool, type.m_count, &type.m_cmdbs[0]); } type.m_cmdbs.destroy(m_alloc); } } if(m_pool) { vkDestroyCommandPool(m_dev, m_pool, nullptr); } }
~VulkanExample() { // Clean up used Vulkan resources // Note : Inherited destructor cleans up resources stored in base class vkDestroyPipeline(device, pipelines.phong, nullptr); vkDestroyPipeline(device, pipelines.starsphere, nullptr); vkDestroyPipelineLayout(device, pipelineLayout, nullptr); vkFreeCommandBuffers(device, cmdPool, 1, &primaryCommandBuffer); vkFreeCommandBuffers(device, cmdPool, 1, &secondaryCommandBuffer); vkMeshLoader::freeMeshBufferResources(device, &meshes.ufo); vkMeshLoader::freeMeshBufferResources(device, &meshes.skysphere); for (auto& thread : threadData) { vkFreeCommandBuffers(device, thread.commandPool, thread.commandBuffer.size(), thread.commandBuffer.data()); vkDestroyCommandPool(device, thread.commandPool, nullptr); } vkDestroyFence(device, renderFence, nullptr); }
VulkanBase::~VulkanBase() { swapChain.cleanup(); vkDestroyDescriptorPool(device, descriptorPool, nullptr); if (setupCmdBuffer != VK_NULL_HANDLE) vkFreeCommandBuffers(device, cmdPool, 1, &setupCmdBuffer); destroyCommandBuffers(); vkDestroyRenderPass(device, renderPass, nullptr); for (uint32_t i = 0; i < frameBuffers.size(); i++) vkDestroyFramebuffer(device, frameBuffers[i], nullptr); for (auto& shaderModule : shaderModules) vkDestroyShaderModule(device, shaderModule, nullptr); vkDestroyImageView(device, depthStencil.view, nullptr); vkDestroyImage(device, depthStencil.image, nullptr); vkFreeMemory(device, depthStencil.mem, nullptr); vkDestroyPipelineCache(device, pipelineCache, nullptr); if (textureLoader) delete textureLoader; vkDestroyCommandPool(device, cmdPool, nullptr); vkDestroySemaphore(device, semaphores.presentComplete, nullptr); vkDestroySemaphore(device, semaphores.renderComplete, nullptr); vkDestroyDevice(device, nullptr); if (enableValidation) vkDebug::freeDebugCallback(instance); vkDestroyInstance(instance, nullptr); }
static void gst_vulkan_device_finalize (GObject * object) { GstVulkanDevice *device = GST_VULKAN_DEVICE (object); g_free (device->queue_family_props); device->queue_family_props = NULL; if (device->cmd_pool) vkDestroyCommandPool (device->device, device->cmd_pool, NULL); device->cmd_pool = VK_NULL_HANDLE; if (device->device) { vkDeviceWaitIdle (device->device); vkDestroyDevice (device->device, NULL); } device->device = VK_NULL_HANDLE; if (device->instance) gst_object_unref (device->instance); device->instance = VK_NULL_HANDLE; G_OBJECT_CLASS (parent_class)->finalize (object); }
bool initCommandBuffers() { std::cout << "creating command buffers..."; VkCommandPoolCreateInfo cmdPoolInfo = {}; cmdPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; cmdPoolInfo.pNext = nullptr; cmdPoolInfo.queueFamilyIndex = gQueueFamilyIndex; cmdPoolInfo.flags = 0; VkResult res = vkCreateCommandPool( gDevice, &cmdPoolInfo, nullptr, &gCmdPool ); if( res != VK_SUCCESS ) { std::cout << "error creating command pool\n"; return false; } else { VkCommandBufferAllocateInfo cmdInfo = {}; cmdInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; cmdInfo.pNext = nullptr; cmdInfo.commandPool = gCmdPool; cmdInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; cmdInfo.commandBufferCount = 1; res = vkAllocateCommandBuffers( gDevice, &cmdInfo, &gCmd ); if( res != VK_SUCCESS ) { std::cout << "error allocating command buffer\n"; vkDestroyCommandPool( gDevice, gCmdPool, nullptr ); return false; } } std::cout << "command buffer created\n"; return true; }
int main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "Swapchain Initialization Sample"; /* * Set up swapchain: * - Get supported uses for all queues * - Try to find a queue that supports both graphics and present * - If no queue supports both, find a present queue and make sure we have a * graphics queue * - Get a list of supported formats and use the first one * - Get surface properties and present modes and use them to create a swap * chain * - Create swap chain buffers * - For each buffer, create a color attachment view and set its layout to * color attachment */ init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_connection(info); init_window_size(info, 50, 50); init_window(info); /* VULKAN_KEY_START */ // Construct the surface description: #ifdef _WIN32 VkWin32SurfaceCreateInfoKHR createInfo = {}; createInfo.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR; createInfo.pNext = NULL; createInfo.hinstance = info.connection; createInfo.hwnd = info.window; res = vkCreateWin32SurfaceKHR(info.inst, &createInfo, NULL, &info.surface); #else // _WIN32 VkXcbSurfaceCreateInfoKHR createInfo = {}; createInfo.sType = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR; createInfo.pNext = NULL; createInfo.connection = info.connection; createInfo.window = info.window; res = vkCreateXcbSurfaceKHR(info.inst, &createInfo, NULL, &info.surface); #endif // _WIN32 assert(res == VK_SUCCESS); // Iterate over each queue to learn whether it supports presenting: VkBool32 *supportsPresent = (VkBool32 *)malloc(info.queue_count * sizeof(VkBool32)); for (uint32_t i = 0; i < info.queue_count; i++) { vkGetPhysicalDeviceSurfaceSupportKHR(info.gpus[0], i, info.surface, &supportsPresent[i]); } // Search for a graphics queue and a present queue in the array of queue // families, try to find one that supports both uint32_t graphicsQueueNodeIndex = UINT32_MAX; for (uint32_t i = 0; i < info.queue_count; i++) { if ((info.queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) != 0) { if (supportsPresent[i] == VK_TRUE) { graphicsQueueNodeIndex = i; break; } } } free(supportsPresent); // Generate error if could not find a queue that supports both a graphics // and present if (graphicsQueueNodeIndex == UINT32_MAX) { std::cout << "Could not find a queue that supports both graphics and " "present\n"; exit(-1); } info.graphics_queue_family_index = graphicsQueueNodeIndex; init_device(info); // Get the list of VkFormats that are supported: uint32_t formatCount; res = vkGetPhysicalDeviceSurfaceFormatsKHR(info.gpus[0], info.surface, &formatCount, NULL); assert(res == VK_SUCCESS); VkSurfaceFormatKHR *surfFormats = (VkSurfaceFormatKHR *)malloc(formatCount * sizeof(VkSurfaceFormatKHR)); res = vkGetPhysicalDeviceSurfaceFormatsKHR(info.gpus[0], info.surface, &formatCount, surfFormats); assert(res == VK_SUCCESS); // If the format list includes just one entry of VK_FORMAT_UNDEFINED, // the surface has no preferred format. Otherwise, at least one // supported format will be returned. if (formatCount == 1 && surfFormats[0].format == VK_FORMAT_UNDEFINED) { info.format = VK_FORMAT_B8G8R8A8_UNORM; } else { assert(formatCount >= 1); info.format = surfFormats[0].format; } VkSurfaceCapabilitiesKHR surfCapabilities; res = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(info.gpus[0], info.surface, &surfCapabilities); assert(res == VK_SUCCESS); uint32_t presentModeCount; res = vkGetPhysicalDeviceSurfacePresentModesKHR(info.gpus[0], info.surface, &presentModeCount, NULL); assert(res == VK_SUCCESS); VkPresentModeKHR *presentModes = (VkPresentModeKHR *)malloc(presentModeCount * sizeof(VkPresentModeKHR)); res = vkGetPhysicalDeviceSurfacePresentModesKHR( info.gpus[0], info.surface, &presentModeCount, presentModes); assert(res == VK_SUCCESS); VkExtent2D swapChainExtent; // width and height are either both -1, or both not -1. if (surfCapabilities.currentExtent.width == (uint32_t)-1) { // If the surface size is undefined, the size is set to // the size of the images requested. swapChainExtent.width = info.width; swapChainExtent.height = info.height; } else { // If the surface size is defined, the swap chain size must match swapChainExtent = surfCapabilities.currentExtent; } // If mailbox mode is available, use it, as is the lowest-latency non- // tearing mode. If not, try IMMEDIATE which will usually be available, // and is fastest (though it tears). If not, fall back to FIFO which is // always available. VkPresentModeKHR swapchainPresentMode = VK_PRESENT_MODE_FIFO_KHR; for (size_t i = 0; i < presentModeCount; i++) { if (presentModes[i] == VK_PRESENT_MODE_MAILBOX_KHR) { swapchainPresentMode = VK_PRESENT_MODE_MAILBOX_KHR; break; } if ((swapchainPresentMode != VK_PRESENT_MODE_MAILBOX_KHR) && (presentModes[i] == VK_PRESENT_MODE_IMMEDIATE_KHR)) { swapchainPresentMode = VK_PRESENT_MODE_IMMEDIATE_KHR; } } // Determine the number of VkImage's to use in the swap chain (we desire to // own only 1 image at a time, besides the images being displayed and // queued for display): uint32_t desiredNumberOfSwapChainImages = surfCapabilities.minImageCount + 1; if ((surfCapabilities.maxImageCount > 0) && (desiredNumberOfSwapChainImages > surfCapabilities.maxImageCount)) { // Application must settle for fewer images than desired: desiredNumberOfSwapChainImages = surfCapabilities.maxImageCount; } VkSurfaceTransformFlagBitsKHR preTransform; if (surfCapabilities.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR) { preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR; } else { preTransform = surfCapabilities.currentTransform; } VkSwapchainCreateInfoKHR swap_chain = {}; swap_chain.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR; swap_chain.pNext = NULL; swap_chain.surface = info.surface; swap_chain.minImageCount = desiredNumberOfSwapChainImages; swap_chain.imageFormat = info.format; swap_chain.imageExtent.width = swapChainExtent.width; swap_chain.imageExtent.height = swapChainExtent.height; swap_chain.preTransform = preTransform; swap_chain.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; swap_chain.imageArrayLayers = 1; swap_chain.presentMode = swapchainPresentMode; swap_chain.oldSwapchain = NULL; swap_chain.clipped = true; swap_chain.imageColorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR; swap_chain.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; swap_chain.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE; swap_chain.queueFamilyIndexCount = 0; swap_chain.pQueueFamilyIndices = NULL; res = vkCreateSwapchainKHR(info.device, &swap_chain, NULL, &info.swap_chain); assert(res == VK_SUCCESS); res = vkGetSwapchainImagesKHR(info.device, info.swap_chain, &info.swapchainImageCount, NULL); assert(res == VK_SUCCESS); VkImage *swapchainImages = (VkImage *)malloc(info.swapchainImageCount * sizeof(VkImage)); assert(swapchainImages); res = vkGetSwapchainImagesKHR(info.device, info.swap_chain, &info.swapchainImageCount, swapchainImages); assert(res == VK_SUCCESS); info.buffers.resize(info.swapchainImageCount); // Going to need a command buffer to send the memory barriers in // set_image_layout but we couldn't have created one before we knew // what our graphics_queue_family_index is, but now that we have it, // create the command buffer init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); vkGetDeviceQueue(info.device, info.graphics_queue_family_index, 0, &info.queue); for (uint32_t i = 0; i < info.swapchainImageCount; i++) { VkImageViewCreateInfo color_image_view = {}; color_image_view.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; color_image_view.pNext = NULL; color_image_view.format = info.format; color_image_view.components.r = VK_COMPONENT_SWIZZLE_R; color_image_view.components.g = VK_COMPONENT_SWIZZLE_G; color_image_view.components.b = VK_COMPONENT_SWIZZLE_B; color_image_view.components.a = VK_COMPONENT_SWIZZLE_A; color_image_view.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; color_image_view.subresourceRange.baseMipLevel = 0; color_image_view.subresourceRange.levelCount = 1; color_image_view.subresourceRange.baseArrayLayer = 0; color_image_view.subresourceRange.layerCount = 1; color_image_view.viewType = VK_IMAGE_VIEW_TYPE_2D; color_image_view.flags = 0; info.buffers[i].image = swapchainImages[i]; set_image_layout(info, info.buffers[i].image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); color_image_view.image = info.buffers[i].image; res = vkCreateImageView(info.device, &color_image_view, NULL, &info.buffers[i].view); assert(res == VK_SUCCESS); } free(swapchainImages); execute_end_command_buffer(info); execute_queue_command_buffer(info); /* VULKAN_KEY_END */ /* Clean Up */ VkCommandBuffer cmd_bufs[1] = {info.cmd}; vkFreeCommandBuffers(info.device, info.cmd_pool, 1, cmd_bufs); vkDestroyCommandPool(info.device, info.cmd_pool, NULL); for (uint32_t i = 0; i < info.swapchainImageCount; i++) { vkDestroyImageView(info.device, info.buffers[i].view, NULL); } vkDestroySwapchainKHR(info.device, info.swap_chain, NULL); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
#include "CommandPool.h" namespace Vulkan { CommandPool::CommandPool(Device* device, uint32_t queueFamilyIndex) : Devicer(device) { createInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; createInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; createInfo.queueFamilyIndex = queueFamilyIndex; } CommandPool::~CommandPool() { destroy(); } void CommandPool::create() { VULKAN_CHECK_RESULT(vkCreateCommandPool(device->getHandle(), &createInfo, nullptr, &handle), "Failed to create command pool") } void CommandPool::destroy() { VULKAN_DESTROY_HANDLE(vkDestroyCommandPool(device->getHandle(), handle, nullptr)) } void CommandPool::reset() { VULKAN_CHECK_RESULT(vkResetCommandPool(device->getHandle(), handle, VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT), "Failed to reset command pool"); } } // Vulkan
void VulkanDeleteList::PerformDeletes(VkDevice device) { for (auto &cmdPool : cmdPools_) { vkDestroyCommandPool(device, cmdPool, nullptr); } cmdPools_.clear(); for (auto &descPool : descPools_) { vkDestroyDescriptorPool(device, descPool, nullptr); } descPools_.clear(); for (auto &module : modules_) { vkDestroyShaderModule(device, module, nullptr); } modules_.clear(); for (auto &buf : buffers_) { vkDestroyBuffer(device, buf, nullptr); } buffers_.clear(); for (auto &bufView : bufferViews_) { vkDestroyBufferView(device, bufView, nullptr); } bufferViews_.clear(); for (auto &image : images_) { vkDestroyImage(device, image, nullptr); } images_.clear(); for (auto &imageView : imageViews_) { vkDestroyImageView(device, imageView, nullptr); } imageViews_.clear(); for (auto &mem : deviceMemory_) { vkFreeMemory(device, mem, nullptr); } deviceMemory_.clear(); for (auto &sampler : samplers_) { vkDestroySampler(device, sampler, nullptr); } samplers_.clear(); for (auto &pipeline : pipelines_) { vkDestroyPipeline(device, pipeline, nullptr); } pipelines_.clear(); for (auto &pcache : pipelineCaches_) { vkDestroyPipelineCache(device, pcache, nullptr); } pipelineCaches_.clear(); for (auto &renderPass : renderPasses_) { vkDestroyRenderPass(device, renderPass, nullptr); } renderPasses_.clear(); for (auto &framebuffer : framebuffers_) { vkDestroyFramebuffer(device, framebuffer, nullptr); } framebuffers_.clear(); for (auto &pipeLayout : pipelineLayouts_) { vkDestroyPipelineLayout(device, pipeLayout, nullptr); } pipelineLayouts_.clear(); for (auto &descSetLayout : descSetLayouts_) { vkDestroyDescriptorSetLayout(device, descSetLayout, nullptr); } descSetLayouts_.clear(); for (auto &callback : callbacks_) { callback.func(callback.userdata); } callbacks_.clear(); }
int main(){ printf("This code initializes a Vulkan device"); /** Steps required 1. Create Vulkan Instance 2. Enumerate the GPUs 3. Query Queues on the GPU (Queues represent on what 'channels' will the data process, query the queue for COMPUTE type queue or GRAPHICS type queue 4. Create a queue priority 5. Create a device with information from 2, 3 and 4 **/ /** 1. Create Vulkan Instance **/ VkApplicationInfo appInfo = {}; appInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO; appInfo.pNext = NULL; appInfo.pApplicationName = APP_SHORT_NAME; appInfo.applicationVersion = 1; appInfo.pEngineName = APP_SHORT_NAME; appInfo.engineVersion = 1; appInfo.apiVersion = VK_API_VERSION; VkInstanceCreateInfo icInfo = {}; icInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO; icInfo.pNext = NULL; icInfo.flags = 0; icInfo.pApplicationInfo = & appInfo; icInfo.enabledExtensionCount = 0; icInfo.ppEnabledExtensionNames = NULL; icInfo.enabledLayerCount = 0; icInfo.enabledLayerCount = NULL; VkInstance instance; VkResult res; res = vkCreateInstance(&icInfo, NULL, &instance); if(res == VK_ERROR_INCOMPATIBLE_DRIVER){ printf("Cannot find a Vulkan Compatible ICD\n"); exit(-1); } else if(res){ printf("Some error occured\n"); exit(-1); } printf("Yay! Vulkan is initialized\n"); /** 2. Enumerate the GPUs **/ uint32_t gpuCount = 0; res = vkEnumeratePhysicalDevices(instance, &gpuCount, NULL); printf("found %d gpus\n", gpuCount); VkPhysicalDevice* gpus = new VkPhysicalDevice[gpuCount]; printf("Listing gpus...\n", gpuCount); res = vkEnumeratePhysicalDevices(instance, &gpuCount, gpus); while(++idx < gpuCount){ VkPhysicalDeviceProperties props= {}; vkGetPhysicalDeviceProperties(gpus[idx], &props); printf("%d-%d-%d-%d-%s\n", props.apiVersion, props.driverVersion, props.vendorID, props.deviceID, props.deviceName); } /** 3. Query for the supported queues **/ /** From renderdocs vulkan Command buffers are submitted to a VkQueue. The notion of queues are how work becomes serialised to be passed to the GPU. A VkPhysicalDevice (remember way back? The GPU handle) can report a number of queue families with different capabilities. e.g. a graphics queue family and a compute-only queue family. When you create your device you ask for a certain number of queues from each family, and then you can enumerate them from the device after creation with vkGetDeviceQueue(). **/ uint32_t queue_count = 0; vkGetPhysicalDeviceQueueFamilyProperties(gpus[0], &queue_count, NULL); if(queue_count <= 0){ printf("No Queues found.. aborting\n"); exit(-1); } VkQueueFamilyProperties* queue_props = new VkQueueFamilyProperties[queue_count]; vkGetPhysicalDeviceQueueFamilyProperties(gpus[0], &queue_count, queue_props); float queue_priorities[1] = {0.0}; /** 4. Create a queue priority **/ VkDeviceQueueCreateInfo qi = {}; qi.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; qi.pNext = NULL; qi.queueCount = 1; qi.pQueuePriorities = queue_priorities; idx = -1; while(++idx < queue_count){ if(queue_props[idx].queueFlags & VK_QUEUE_GRAPHICS_BIT){ //Look for a queue that has Graphics capabilities qi.queueFamilyIndex = idx; break; } } /** 5. Create a device with information from 2, 3 and 4 **/ VkDeviceCreateInfo dci = {}; dci.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; dci.pNext = NULL; dci.queueCreateInfoCount = 1; dci.pQueueCreateInfos = &qi; dci.enabledExtensionCount = 0; dci.ppEnabledExtensionNames = NULL; dci.enabledLayerCount = 0; dci.ppEnabledLayerNames = NULL; dci.pEnabledFeatures = NULL; VkDevice device; res = vkCreateDevice(gpus[0], &dci, NULL, &device); if(res){ printf("There was a problem creating the device"); exit(-1); } /** All's great, first thing lets create a command buffer **/ VkCommandPoolCreateInfo cpci = {}; cpci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; cpci.pNext = NULL; cpci.queueFamilyIndex = qi.queueFamilyIndex; cpci.flags = 0; VkCommandPool cp; res = vkCreateCommandPool(device, &cpci, NULL, &cp); if(res){ printf("There was a problem creating a command pool"); exit(-1); } /** Create a command buffer from the command pool **/ VkCommandBufferAllocateInfo cbai = {}; cbai.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; cbai.pNext = NULL; cbai.commandPool = cp; cbai.commandBufferCount = 1; cbai.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; VkCommandBuffer* commandBuffer = new VkCommandBuffer[1]; //single command res = vkAllocateCommandBuffers(device, &cbai, commandBuffer); if(res){ printf("There was a problem creating a command buffer"); exit(-1); } /** no, we never leave the crap behind **/ vkFreeCommandBuffers(device, cp, 1, commandBuffer); vkDestroyCommandPool(device, cp, NULL); vkDestroyDevice(device, NULL); printf("Device created"); vkDestroyInstance(instance, NULL); delete[] gpus; return 0; }
void CommandBuffer::destroy() { vkFreeCommandBuffers(device, commandPool, 1, &commandBuffer); vkDestroyCommandPool(device, commandPool, nullptr); }
int sample_main() { VkResult U_ASSERT_ONLY res; char sample_title[] = "MT Cmd Buffer Sample"; const bool depthPresent = false; init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); VkSemaphoreCreateInfo presentCompleteSemaphoreCreateInfo; presentCompleteSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; presentCompleteSemaphoreCreateInfo.pNext = NULL; presentCompleteSemaphoreCreateInfo.flags = 0; res = vkCreateSemaphore(info.device, &presentCompleteSemaphoreCreateInfo, NULL, &info.presentCompleteSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, info.presentCompleteSemaphore, NULL, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); VkPipelineLayoutCreateInfo pPipelineLayoutCreateInfo = {}; pPipelineLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pPipelineLayoutCreateInfo.pNext = NULL; pPipelineLayoutCreateInfo.pushConstantRangeCount = 0; pPipelineLayoutCreateInfo.pPushConstantRanges = NULL; pPipelineLayoutCreateInfo.setLayoutCount = 0; pPipelineLayoutCreateInfo.pSetLayouts = NULL; res = vkCreatePipelineLayout(info.device, &pPipelineLayoutCreateInfo, NULL, &info.pipeline_layout); assert(res == VK_SUCCESS); init_renderpass( info, depthPresent, false); // Can't clear in renderpass load because we re-use pipeline init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, depthPresent); /* The binding and attributes should be the same for all 3 vertex buffers, * so init here */ info.vi_binding.binding = 0; info.vi_binding.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; info.vi_binding.stride = sizeof(triData[0]); info.vi_attribs[0].binding = 0; info.vi_attribs[0].location = 0; info.vi_attribs[0].format = VK_FORMAT_R32G32B32A32_SFLOAT; info.vi_attribs[0].offset = 0; info.vi_attribs[1].binding = 0; info.vi_attribs[1].location = 1; info.vi_attribs[1].format = VK_FORMAT_R32G32B32A32_SFLOAT; info.vi_attribs[1].offset = 16; init_pipeline_cache(info); init_pipeline(info, depthPresent); VkImageSubresourceRange srRange = {}; srRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; srRange.baseMipLevel = 0; srRange.levelCount = VK_REMAINING_MIP_LEVELS; srRange.baseArrayLayer = 0; srRange.layerCount = VK_REMAINING_ARRAY_LAYERS; VkClearColorValue clear_color[1]; clear_color[0].float32[0] = 0.2f; clear_color[0].float32[1] = 0.2f; clear_color[0].float32[2] = 0.2f; clear_color[0].float32[3] = 0.2f; /* We need to do the clear here instead of as a load op since all 3 threads * share the same pipeline / renderpass */ set_image_layout(info, info.buffers[info.current_buffer].image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); vkCmdClearColorImage(info.cmd, info.buffers[info.current_buffer].image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, clear_color, 1, &srRange); set_image_layout(info, info.buffers[info.current_buffer].image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); res = vkEndCommandBuffer(info.cmd); const VkCommandBuffer cmd_bufs[] = {info.cmd}; VkFence clearFence; init_fence(info, clearFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo submit_info[1] = {}; submit_info[0].pNext = NULL; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].waitSemaphoreCount = 1; submit_info[0].pWaitSemaphores = &info.presentCompleteSemaphore; submit_info[0].pWaitDstStageMask = NULL; submit_info[0].commandBufferCount = 1; submit_info[0].pCommandBuffers = cmd_bufs; submit_info[0].signalSemaphoreCount = 0; submit_info[0].pSignalSemaphores = NULL; /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, submit_info, clearFence); assert(!res); do { res = vkWaitForFences(info.device, 1, &clearFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); vkDestroyFence(info.device, clearFence, NULL); /* VULKAN_KEY_START */ /* Use the fourth slot in the command buffer array for the presentation */ /* barrier using the command buffer in info */ threadCmdBufs[3] = info.cmd; sample_platform_thread vk_threads[3]; for (size_t i = 0; i < 3; i++) { sample_platform_thread_create(&vk_threads[i], &per_thread_code, (void *)i); } VkCommandBufferBeginInfo cmd_buf_info = {}; cmd_buf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cmd_buf_info.pNext = NULL; cmd_buf_info.flags = 0; cmd_buf_info.pInheritanceInfo = NULL; res = vkBeginCommandBuffer(threadCmdBufs[3], &cmd_buf_info); assert(res == VK_SUCCESS); VkImageMemoryBarrier prePresentBarrier = {}; prePresentBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; prePresentBarrier.pNext = NULL; prePresentBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; prePresentBarrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT; prePresentBarrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; prePresentBarrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; prePresentBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; prePresentBarrier.subresourceRange.baseMipLevel = 0; prePresentBarrier.subresourceRange.levelCount = 1; prePresentBarrier.subresourceRange.baseArrayLayer = 0; prePresentBarrier.subresourceRange.layerCount = 1; prePresentBarrier.image = info.buffers[info.current_buffer].image; vkCmdPipelineBarrier(threadCmdBufs[3], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, NULL, 0, NULL, 1, &prePresentBarrier); res = vkEndCommandBuffer(threadCmdBufs[3]); assert(res == VK_SUCCESS); pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; submit_info[0].pNext = NULL; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].waitSemaphoreCount = 0; submit_info[0].pWaitSemaphores = NULL; submit_info[0].pWaitDstStageMask = &pipe_stage_flags; submit_info[0].commandBufferCount = 4; /* 3 from threads + prePresentBarrier */ submit_info[0].pCommandBuffers = threadCmdBufs; submit_info[0].signalSemaphoreCount = 0; submit_info[0].pSignalSemaphores = NULL; /* Wait for all of the threads to finish */ for (int i = 0; i < 3; i++) { sample_platform_thread_join(vk_threads[i], NULL); } VkFenceCreateInfo fenceInfo; VkFence drawFence; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = 0; vkCreateFence(info.device, &fenceInfo, NULL, &drawFence); /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, submit_info, drawFence); assert(!res); /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); execute_present_image(info); wait_seconds(1); /* VULKAN_KEY_END */ vkDestroyBuffer(info.device, vertex_buffer[0].buf, NULL); vkDestroyBuffer(info.device, vertex_buffer[1].buf, NULL); vkDestroyBuffer(info.device, vertex_buffer[2].buf, NULL); vkFreeMemory(info.device, vertex_buffer[0].mem, NULL); vkFreeMemory(info.device, vertex_buffer[1].mem, NULL); vkFreeMemory(info.device, vertex_buffer[2].mem, NULL); for (int i = 0; i < 3; i++) { vkFreeCommandBuffers(info.device, threadCmdPools[i], 1, &threadCmdBufs[i]); vkDestroyCommandPool(info.device, threadCmdPools[i], NULL); } vkDestroySemaphore(info.device, info.presentCompleteSemaphore, NULL); vkDestroyFence(info.device, drawFence, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); vkDestroyPipelineLayout(info.device, info.pipeline_layout, NULL); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_window(info); destroy_device(info); destroy_instance(info); return 0; }