static void cleanup_vulkan() { vkDestroyDescriptorPool(g_Device, g_DescriptorPool, g_Allocator); for (int i = 0; i < IMGUI_VK_QUEUED_FRAMES; i++) { vkDestroyFence(g_Device, g_Fence[i], g_Allocator); vkFreeCommandBuffers(g_Device, g_CommandPool[i], 1, &g_CommandBuffer[i]); vkDestroyCommandPool(g_Device, g_CommandPool[i], g_Allocator); vkDestroySemaphore(g_Device, g_PresentCompleteSemaphore[i], g_Allocator); vkDestroySemaphore(g_Device, g_RenderCompleteSemaphore[i], g_Allocator); } for (uint32_t i = 0; i < g_BackBufferCount; i++) { vkDestroyImageView(g_Device, g_BackBufferView[i], g_Allocator); vkDestroyFramebuffer(g_Device, g_Framebuffer[i], g_Allocator); } vkDestroyRenderPass(g_Device, g_RenderPass, g_Allocator); vkDestroySwapchainKHR(g_Device, g_Swapchain, g_Allocator); vkDestroySurfaceKHR(g_Instance, g_Surface, g_Allocator); #ifdef IMGUI_VULKAN_DEBUG_REPORT // get the proc address of the function pointer, required for used extensions auto vkDestroyDebugReportCallbackEXT = (PFN_vkDestroyDebugReportCallbackEXT)vkGetInstanceProcAddr(g_Instance, "vkDestroyDebugReportCallbackEXT"); vkDestroyDebugReportCallbackEXT(g_Instance, g_Debug_Report, g_Allocator); #endif // IMGUI_VULKAN_DEBUG_REPORT vkDestroyDevice(g_Device, g_Allocator); vkDestroyInstance(g_Instance, g_Allocator); }
std::unique_ptr<VulkanContext> VulkanContext::Create(VkInstance instance, VkPhysicalDevice gpu, VkSurfaceKHR surface, VideoConfig* config, bool enable_validation_layer) { std::unique_ptr<VulkanContext> context = std::make_unique<VulkanContext>(instance, gpu); // Initialize DriverDetails so that we can check for bugs to disable features if needed. DriverDetails::Init(DriverDetails::API_VULKAN, DriverDetails::TranslatePCIVendorID(context->m_device_properties.vendorID), DriverDetails::DRIVER_UNKNOWN, static_cast<double>(context->m_device_properties.driverVersion), DriverDetails::Family::UNKNOWN); // Enable debug reports if validation layer is enabled. if (enable_validation_layer) context->EnableDebugReports(); // Attempt to create the device. if (!context->CreateDevice(surface, enable_validation_layer)) { // Since we are destroying the instance, we're also responsible for destroying the surface. if (surface != VK_NULL_HANDLE) vkDestroySurfaceKHR(instance, surface, nullptr); return nullptr; } // Update video config with features. PopulateBackendInfoFeatures(config, gpu, context->m_device_features); PopulateBackendInfoMultisampleModes(config, gpu, context->m_device_properties); return context; }
std::unique_ptr<VulkanContext> VulkanContext::Create(VkInstance instance, VkPhysicalDevice gpu, VkSurfaceKHR surface, bool enable_debug_reports, bool enable_validation_layer) { std::unique_ptr<VulkanContext> context = std::make_unique<VulkanContext>(instance, gpu); // Initialize DriverDetails so that we can check for bugs to disable features if needed. context->InitDriverDetails(); // Enable debug reports if the "Host GPU" log category is enabled. if (enable_debug_reports) context->EnableDebugReports(); // Attempt to create the device. if (!context->CreateDevice(surface, enable_validation_layer)) { // Since we are destroying the instance, we're also responsible for destroying the surface. if (surface != VK_NULL_HANDLE) vkDestroySurfaceKHR(instance, surface, nullptr); return nullptr; } return context; }
void destroy_vulkan_renderer_backend(ReaperRoot& root, VulkanBackend& backend) { REAPER_PROFILE_SCOPE("Vulkan", MP_RED1); log_info(root, "vulkan: destroying backend"); destroy_vulkan_wm_swapchain(root, backend, backend.presentInfo); log_debug(root, "vulkan: waiting for current work to finish"); Assert(vkDeviceWaitIdle(backend.device) == VK_SUCCESS); log_debug(root, "vulkan: destroying logical device"); vkDestroyDevice(backend.device, nullptr); log_debug(root, "vulkan: destroying presentation surface"); vkDestroySurfaceKHR(backend.instance, backend.presentInfo.surface, nullptr); delete root.renderer->window; root.renderer->window = nullptr; #if defined(REAPER_DEBUG) log_debug(root, "vulkan: detaching debug callback"); vulkan_destroy_debug_callback(backend); #endif vkDestroyInstance(backend.instance, nullptr); log_debug(root, "vulkan: unloading {}", REAPER_VK_LIB_NAME); Assert(backend.vulkanLib != nullptr); dynlib::close(backend.vulkanLib); backend.vulkanLib = nullptr; }
void vkgfx_release_surface(void) { if (vkgfx_surface != VK_NULL_HANDLE) vkDestroySurfaceKHR(vkgfx_instance, vkgfx_surface, vkgfx_allocator); vkgfx_surface = VK_NULL_HANDLE; }
VulkanCommon::~VulkanCommon() { if( Vulkan.Device != VK_NULL_HANDLE ) { vkDeviceWaitIdle( Vulkan.Device ); for( size_t i = 0; i < Vulkan.SwapChain.Images.size(); ++i ) { if( Vulkan.SwapChain.Images[i].ImageView != VK_NULL_HANDLE ) { vkDestroyImageView( GetDevice(), Vulkan.SwapChain.Images[i].ImageView, nullptr ); } } if( Vulkan.SwapChain.Handle != VK_NULL_HANDLE ) { vkDestroySwapchainKHR( Vulkan.Device, Vulkan.SwapChain.Handle, nullptr ); } vkDestroyDevice( Vulkan.Device, nullptr ); } if( Vulkan.PresentationSurface != VK_NULL_HANDLE ) { vkDestroySurfaceKHR( Vulkan.Instance, Vulkan.PresentationSurface, nullptr ); } if( Vulkan.Instance != VK_NULL_HANDLE ) { vkDestroyInstance( Vulkan.Instance, nullptr ); } if( VulkanLibrary ) { #if defined(VK_USE_PLATFORM_WIN32_KHR) FreeLibrary( VulkanLibrary ); #elif defined(VK_USE_PLATFORM_XCB_KHR) || defined(VK_USE_PLATFORM_XLIB_KHR) dlclose( VulkanLibrary ); #endif } }
void Win32CleanShutdown(HWND window) { g_Running = false; vkDestroySurfaceKHR(VKInstance, surface, NULL); vkDestroyInstance(VKInstance, NULL); vkDestroyDevice(device_handle, NULL); ReleaseDC(window, g_WindowDC); PostQuitMessage(0); }
void VulkanWindow::FinalizeSurface() { if ( mVkSurfaceKHR != VK_NULL_HANDLE ) { vkDestroySurfaceKHR ( mVulkanRenderer.GetInstance(), mVkSurfaceKHR, nullptr ); mVkSurfaceKHR = VK_NULL_HANDLE; } }
void VulkanContext::DestroyObjects() { ILOG("VulkanContext::DestroyObjects (including swapchain)"); if (swapchain_ != VK_NULL_HANDLE) vkDestroySwapchainKHR(device_, swapchain_, nullptr); swapchain_ = VK_NULL_HANDLE; vkDestroySurfaceKHR(instance_, surface_, nullptr); surface_ = VK_NULL_HANDLE; }
// Free all Vulkan resources used by the swap chain void cleanup() { for (uint32_t i = 0; i < imageCount; i++) { vkDestroyImageView(device, buffers[i].view, nullptr); } fpDestroySwapchainKHR(device, swapChain, nullptr); vkDestroySurfaceKHR(instance, surface, nullptr); }
void Surface::destroy() { if (surface) { vkDestroySurfaceKHR(instance, surface, nullptr); surface = VK_NULL_HANDLE; } }
void swapChainCleanup( struct SwapChain *swapChain ) { struct SwapChainBuffer *buf; int n; for( n = 0; n < swapChain->nImages; n++ ) { vkDestroyImageView( swapChain->device, swapChain->images[n], NULL ); } swapChain->fpDestroySwapchainKHR( swapChain->device, swapChain->swapChain, NULL ); vkDestroySurfaceKHR( swapChain->instance, swapChain->surface, NULL ); }
void SwapChain::Destroy() { if (m_SwapChain) { fpDestroySwapchainKHR(GetRawDevice(), m_SwapChain, nullptr); m_SwapChain = VK_NULL_HANDLE; } if (m_Surface) { vkDestroySurfaceKHR(RHIRoot::GetInstance(), m_Surface, nullptr); m_Surface = VK_NULL_HANDLE; } }
void VK_Surface::resize() { VkResult result; vkDestroySurfaceKHR(*instance, *surface, allocs); if ((result = glfwCreateWindowSurface(*instance, window, allocs, surface)) != VK_SUCCESS) { std::cout << "Vulkan Error: Failed to create surface!\n"; surface = VK_NULL_HANDLE; } glfwGetWindowSize(window, &width, &height); }
void VkContext::Destroy() { vkDeviceWaitIdle(dev); // Free command buffers if (drawCmdBuffers.size() > 0) { vkFreeCommandBuffers(dev, cmdPool, (u32)drawCmdBuffers.size(), &drawCmdBuffers[0]); } // Destroy command pools vkDestroyCommandPool(dev, cmdPool, nullptr); // Destroy semaphores vkDestroySemaphore(dev, acquireCompleteSemaphore, nullptr); vkDestroySemaphore(dev, renderCompleteSemaphore, nullptr); // Destroy swapchain image views for (u32 i = 0; i < swapchainImageViews.size(); i++) { vkDestroyImageView(dev, swapchainImageViews[i], nullptr); } // Destroy swapchain if (swapchain) { vkDestroySwapchainKHR(dev, swapchain, nullptr); } // Destroy surface if (surf) { vkDestroySurfaceKHR(instance, surf, nullptr); } // Destroy device if (dev) { vkDestroyDevice(dev, nullptr); } #ifdef VOXL_DEBUG // Destroy debug report callback if (msgCallback != VK_NULL_HANDLE) { vkDestroyDebugReportCallbackEXT(instance, msgCallback, nullptr); } #endif // Destroy instance if (instance) { vkDestroyInstance(instance, nullptr); } // Terminate GLFW glfwTerminate(); }
SwapChain::~SwapChain() { if (VK_NULL_HANDLE != mVkSurfaceKHR) { const VulkanRenderer& vulkanRenderer = static_cast<VulkanRenderer&>(getRenderer()); const VkDevice vkDevice = vulkanRenderer.getContext().getVkDevice(); for (uint32_t i = 0; i < mSwapchainImageCount; ++i) { vkDestroyImageView(vkDevice, mSwapChainBuffer[i].view, nullptr); } if (VK_NULL_HANDLE != mVkSwapchainKHR) { vkDestroySwapchainKHR(vkDevice, mVkSwapchainKHR, nullptr); } vkDestroySurfaceKHR(vulkanRenderer.getVulkanRuntimeLinking().getVkInstance(), mVkSurfaceKHR, nullptr); } }
void cleanup() { vkDestroyPipelineLayout(device, pipelineLayout, nullptr); vkDestroyRenderPass(device, renderPass, nullptr); for (auto imageView : swapChainImageViews) { vkDestroyImageView(device, imageView, nullptr); } vkDestroySwapchainKHR(device, swapChain, nullptr); vkDestroyDevice(device, nullptr); DestroyDebugReportCallbackEXT(instance, callback, nullptr); vkDestroySurfaceKHR(instance, surface, nullptr); vkDestroyInstance(instance, nullptr); glfwDestroyWindow(window); glfwTerminate(); }
void shutdown() { on_window_resize_listener = static_cast<size_t >(-1); if (vk_globals::device != nullptr) { vkDeviceWaitIdle(vk_globals::device); shutdown_swap_chain(); vkDestroyDevice(vk_globals::device, nullptr); vk_globals::device = nullptr; } if (vk_globals::surface != nullptr) { vkDestroySurfaceKHR(vk_globals::instance, vk_globals::surface, nullptr); } if (vk_globals::instance != nullptr) { vkDestroyInstance(vk_globals::instance, nullptr); } }
/** * * @ThreadSafe */ ISurfaceSP VKTS_APIENTRY wsiSurfaceCreate(const VkInstance instance, VKTS_NATIVE_DISPLAY nativeDisplay, VKTS_NATIVE_WINDOW nativeWindow) { VkSurfaceKHR surface = _wsiSurfaceCreate(instance, nativeDisplay, nativeWindow); if (surface == VK_NULL_HANDLE) { return ISurfaceSP(); } auto newInstance = new Surface(instance, nativeDisplay, nativeWindow, surface); if (!newInstance) { vkDestroySurfaceKHR(instance, surface, nullptr); return ISurfaceSP(); } return ISurfaceSP(newInstance); }
void _agpu_swap_chain::lostReferences() { if (graphicsQueue) graphicsQueue->release(); if (presentationQueue) presentationQueue->release(); for (auto fb : framebuffers) { if(fb) fb->release(); } for(auto semaphore : semaphores) { if(semaphore) vkDestroySemaphore(device->device, semaphore, nullptr); } if (handle) vkDestroySwapchainKHR(device->device, handle, nullptr); if(surface) vkDestroySurfaceKHR(device->vulkanInstance, surface, nullptr); }
void Window::_DeInitSurface() { vkDestroySurfaceKHR( _renderer->GetVulkanInstance(), _surface, nullptr ); }
bool initialize(android_app* app) { // Load Android vulkan and retrieve vulkan API function pointers if (!InitVulkan()) { LOGE("Vulkan is unavailable, install vulkan and re-start"); return false; } VkApplicationInfo appInfo = { .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO, .pNext = nullptr, .apiVersion = VK_MAKE_VERSION(1, 0, 0), .applicationVersion = VK_MAKE_VERSION(1, 0, 0), .engineVersion = VK_MAKE_VERSION(1, 0, 0), .pApplicationName = "tutorial01_load_vulkan", .pEngineName = "tutorial", }; // prepare necessary extensions: Vulkan on Android need these to function std::vector<const char *> instanceExt, deviceExt; instanceExt.push_back("VK_KHR_surface"); instanceExt.push_back("VK_KHR_android_surface"); deviceExt.push_back("VK_KHR_swapchain"); // Create the Vulkan instance VkInstanceCreateInfo instanceCreateInfo { .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, .pNext = nullptr, .pApplicationInfo = &appInfo, .enabledExtensionCount = static_cast<uint32_t>(instanceExt.size()), .ppEnabledExtensionNames = instanceExt.data(), .enabledLayerCount = 0, .ppEnabledLayerNames = nullptr, }; CALL_VK(vkCreateInstance(&instanceCreateInfo, nullptr, &tutorialInstance)); // if we create a surface, we need the surface extension VkAndroidSurfaceCreateInfoKHR createInfo { .sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR, .pNext = nullptr, .flags = 0, .window = app->window }; CALL_VK(vkCreateAndroidSurfaceKHR(tutorialInstance, &createInfo, nullptr, &tutorialSurface)); // Find one GPU to use: // On Android, every GPU device is equal -- supporting graphics/compute/present // for this sample, we use the very first GPU device found on the system uint32_t gpuCount = 0; CALL_VK(vkEnumeratePhysicalDevices(tutorialInstance, &gpuCount, nullptr)); VkPhysicalDevice tmpGpus[gpuCount]; CALL_VK(vkEnumeratePhysicalDevices(tutorialInstance, &gpuCount, tmpGpus)); tutorialGpu = tmpGpus[0]; // Pick up the first GPU Device // check for vulkan info on this GPU device VkPhysicalDeviceProperties gpuProperties; vkGetPhysicalDeviceProperties(tutorialGpu, &gpuProperties); LOGI("Vulkan Physical Device Name: %s", gpuProperties.deviceName); LOGI("Vulkan Physical Device Info: apiVersion: %x \n\t driverVersion: %x", gpuProperties.apiVersion, gpuProperties.driverVersion); LOGI("API Version Supported: %d.%d.%d", VK_VERSION_MAJOR(gpuProperties.apiVersion), VK_VERSION_MINOR(gpuProperties.apiVersion), VK_VERSION_PATCH(gpuProperties.apiVersion)); VkSurfaceCapabilitiesKHR surfaceCapabilities; vkGetPhysicalDeviceSurfaceCapabilitiesKHR(tutorialGpu, tutorialSurface, &surfaceCapabilities); LOGI("Vulkan Surface Capabilities:\n"); LOGI("\timage count: %u - %u\n", surfaceCapabilities.minImageCount, surfaceCapabilities.maxImageCount); LOGI("\tarray layers: %u\n", surfaceCapabilities.maxImageArrayLayers); LOGI("\timage size (now): %dx%d\n", surfaceCapabilities.currentExtent.width, surfaceCapabilities.currentExtent.height); LOGI("\timage size (extent): %dx%d - %dx%d\n", surfaceCapabilities.minImageExtent.width, surfaceCapabilities.minImageExtent.height, surfaceCapabilities.maxImageExtent.width, surfaceCapabilities.maxImageExtent.height); LOGI("\tusage: %x\n", surfaceCapabilities.supportedUsageFlags); LOGI("\tcurrent transform: %u\n", surfaceCapabilities.currentTransform); LOGI("\tallowed transforms: %x\n", surfaceCapabilities.supportedTransforms); LOGI("\tcomposite alpha flags: %u\n", surfaceCapabilities.currentTransform); // Create a logical device from GPU we picked float priorities[] = { 1.0f, }; VkDeviceQueueCreateInfo queueCreateInfo { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, .pNext = nullptr, .flags = 0, .queueCount = 1, .queueFamilyIndex = 0, .pQueuePriorities = priorities, }; VkDeviceCreateInfo deviceCreateInfo { .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, .pNext = nullptr, .queueCreateInfoCount = 1, .pQueueCreateInfos = &queueCreateInfo, .enabledLayerCount = 0, .ppEnabledLayerNames = nullptr, .enabledExtensionCount = static_cast<uint32_t>(deviceExt.size()), .ppEnabledExtensionNames = deviceExt.data(), .pEnabledFeatures = nullptr, }; CALL_VK(vkCreateDevice(tutorialGpu, &deviceCreateInfo, nullptr, &tutorialDevice)); initialized_ = true; return 0; } void terminate() { vkDestroySurfaceKHR(tutorialInstance, tutorialSurface, nullptr); vkDestroyDevice(tutorialDevice, nullptr); vkDestroyInstance(tutorialInstance, nullptr); initialized_ = false; } // Process the next main command. void handle_cmd(android_app* app, int32_t cmd) { switch (cmd) { case APP_CMD_INIT_WINDOW: // The window is being shown, get it ready. initialize(app); break; case APP_CMD_TERM_WINDOW: // The window is being hidden or closed, clean it up. terminate(); break; default: LOGI("event not handled: %d", cmd); } }
VkResult create_surface(VkInstance vk_instance, VkPhysicalDevice gpu, VkDevice* out_device, DrawCommandBuffer* out_draw_command_buffer, SwapChain* out_swap_chain) { if ((out_swap_chain == nullptr) || (out_draw_command_buffer == nullptr)) { return VK_ERROR_INITIALIZATION_FAILED; } VkWin32SurfaceCreateInfoKHR surfaceCreateInfo; surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR; surfaceCreateInfo.hinstance = out_swap_chain->instance; surfaceCreateInfo.hwnd = out_swap_chain->window; VK_THROW(vkCreateWin32SurfaceKHR(vk_instance, &surfaceCreateInfo, NULL, &(out_swap_chain->surface))); uint32_t queue_count; vkGetPhysicalDeviceQueueFamilyProperties(gpu, &queue_count, nullptr); assert(queue_count > 0); std::vector<VkBool32> support_presentable_swap_chain(queue_count); std::vector<VkQueueFamilyProperties> properties(queue_count); vkGetPhysicalDeviceQueueFamilyProperties(gpu, &queue_count, properties.data()); for (uint32_t qidx = 0; qidx < queue_count; ++qidx) { vkGetPhysicalDeviceSurfaceSupportKHR(gpu, qidx, out_swap_chain->surface, &(support_presentable_swap_chain[qidx])); } uint32_t graphics_queue = UINT32_MAX; uint32_t swap_chain_queue = UINT32_MAX; for (uint32_t qidx = 0; qidx < queue_count; ++qidx) { if (check_flag(properties[qidx].queueFlags, VK_QUEUE_GRAPHICS_BIT) && properties[qidx].queueCount > 0) { graphics_queue = qidx; if (support_presentable_swap_chain[qidx]) { swap_chain_queue = qidx; break; } } } if (swap_chain_queue == UINT32_MAX) // Can't find a graphic queue that also support swap chain. Select two different queue. { for (uint32_t qidx = 0; qidx < queue_count; ++qidx) { if (support_presentable_swap_chain[qidx] && (properties[qidx].queueCount > 0)) { swap_chain_queue = qidx; break; } } } // Generate error if could not find both a graphics and a present queue if ((graphics_queue == UINT32_MAX) || (swap_chain_queue == UINT32_MAX)) { vkDestroySurfaceKHR(vk_instance, out_swap_chain->surface, nullptr); out_swap_chain->surface = nullptr; return VK_ERROR_INITIALIZATION_FAILED; } uint32_t format_count; VK_THROW(vkGetPhysicalDeviceSurfaceFormatsKHR(gpu, out_swap_chain->surface, &format_count, NULL)); std::vector<VkSurfaceFormatKHR> surface_formats(format_count); VK_THROW(vkGetPhysicalDeviceSurfaceFormatsKHR(gpu, out_swap_chain->surface, &format_count, surface_formats.data())); // If the format list includes just one entry of VK_FORMAT_UNDEFINED, // the surface has no preferred format. Otherwise, at least one // supported format will be returned if (format_count == 1 && surface_formats[0].format == VK_FORMAT_UNDEFINED) { out_swap_chain->surface_format.format = VK_FORMAT_B8G8R8A8_UNORM; out_swap_chain->surface_format.colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR; } else { assert(format_count > 0); out_swap_chain->surface_format = surface_formats[0]; } VK_THROW(create_device(gpu, graphics_queue, swap_chain_queue, out_device)); vkGetDeviceQueue(*out_device, graphics_queue, 0, &out_draw_command_buffer->draw_queue); vkGetDeviceQueue(*out_device, swap_chain_queue, 0, &out_swap_chain->present_queue); out_draw_command_buffer->queue_family_idx = graphics_queue; out_swap_chain->queue_family_idx = swap_chain_queue; out_swap_chain->gpu = gpu; return VK_SUCCESS; }
void createSwapChainAndImages(VulkanContext& context, VulkanSurfaceContext& surfaceContext) { // Pick an image count and format. According to section 30.5 of VK 1.1, maxImageCount of zero // apparently means "that there is no limit on the number of images, though there may be limits // related to the total amount of memory used by presentable images." uint32_t desiredImageCount = 2; const uint32_t maxImageCount = surfaceContext.surfaceCapabilities.maxImageCount; if (desiredImageCount < surfaceContext.surfaceCapabilities.minImageCount || (maxImageCount != 0 && desiredImageCount > maxImageCount)) { utils::slog.e << "Swap chain does not support " << desiredImageCount << " images.\n"; desiredImageCount = surfaceContext.surfaceCapabilities.minImageCount; } surfaceContext.surfaceFormat = surfaceContext.surfaceFormats[0]; for (const VkSurfaceFormatKHR& format : surfaceContext.surfaceFormats) { if (format.format == VK_FORMAT_R8G8B8A8_UNORM) { surfaceContext.surfaceFormat = format; break; } } const auto compositionCaps = surfaceContext.surfaceCapabilities.supportedCompositeAlpha; const auto compositeAlpha = (compositionCaps & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR) ? VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR : VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; // Create the low-level swap chain. const auto size = surfaceContext.surfaceCapabilities.currentExtent; VkSwapchainCreateInfoKHR createInfo { .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR, .surface = surfaceContext.surface, .minImageCount = desiredImageCount, .imageFormat = surfaceContext.surfaceFormat.format, .imageColorSpace = surfaceContext.surfaceFormat.colorSpace, .imageExtent = size, .imageArrayLayers = 1, .imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, .preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR, .compositeAlpha = compositeAlpha, .presentMode = VK_PRESENT_MODE_FIFO_KHR, .clipped = VK_TRUE }; VkSwapchainKHR swapchain; VkResult result = vkCreateSwapchainKHR(context.device, &createInfo, VKALLOC, &swapchain); ASSERT_POSTCONDITION(result == VK_SUCCESS, "vkGetPhysicalDeviceSurfaceFormatsKHR error."); surfaceContext.swapchain = swapchain; // Extract the VkImage handles from the swap chain. uint32_t imageCount; result = vkGetSwapchainImagesKHR(context.device, swapchain, &imageCount, nullptr); ASSERT_POSTCONDITION(result == VK_SUCCESS, "vkGetSwapchainImagesKHR count error."); surfaceContext.swapContexts.resize(imageCount); std::vector<VkImage> images(imageCount); result = vkGetSwapchainImagesKHR(context.device, swapchain, &imageCount, images.data()); ASSERT_POSTCONDITION(result == VK_SUCCESS, "vkGetSwapchainImagesKHR error."); for (size_t i = 0; i < images.size(); ++i) { surfaceContext.swapContexts[i].attachment = { .image = images[i], .format = surfaceContext.surfaceFormat.format }; } utils::slog.i << "vkCreateSwapchain" << ": " << size.width << "x" << size.height << ", " << surfaceContext.surfaceFormat.format << ", " << surfaceContext.surfaceFormat.colorSpace << ", " << imageCount << utils::io::endl; // Create image views. VkImageViewCreateInfo ivCreateInfo = {}; ivCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D; ivCreateInfo.format = surfaceContext.surfaceFormat.format; ivCreateInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; ivCreateInfo.subresourceRange.levelCount = 1; ivCreateInfo.subresourceRange.layerCount = 1; for (size_t i = 0; i < images.size(); ++i) { ivCreateInfo.image = images[i]; result = vkCreateImageView(context.device, &ivCreateInfo, VKALLOC, &surfaceContext.swapContexts[i].attachment.view); ASSERT_POSTCONDITION(result == VK_SUCCESS, "vkCreateImageView error."); } createSemaphore(context.device, &surfaceContext.imageAvailable); createSemaphore(context.device, &surfaceContext.renderingFinished); surfaceContext.depth = {}; } void createDepthBuffer(VulkanContext& context, VulkanSurfaceContext& surfaceContext, VkFormat depthFormat) { assert(context.cmdbuffer); // Create an appropriately-sized device-only VkImage. const auto size = surfaceContext.surfaceCapabilities.currentExtent; VkImage depthImage; VkImageCreateInfo imageInfo { .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, .imageType = VK_IMAGE_TYPE_2D, .extent = { size.width, size.height, 1 }, .format = depthFormat, .mipLevels = 1, .arrayLayers = 1, .usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, .samples = VK_SAMPLE_COUNT_1_BIT, }; VkResult error = vkCreateImage(context.device, &imageInfo, VKALLOC, &depthImage); ASSERT_POSTCONDITION(!error, "Unable to create depth image."); // Allocate memory for the VkImage and bind it. VkMemoryRequirements memReqs; vkGetImageMemoryRequirements(context.device, depthImage, &memReqs); VkMemoryAllocateInfo allocInfo { .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, .allocationSize = memReqs.size, .memoryTypeIndex = selectMemoryType(context, memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) }; error = vkAllocateMemory(context.device, &allocInfo, nullptr, &surfaceContext.depth.memory); ASSERT_POSTCONDITION(!error, "Unable to allocate depth memory."); error = vkBindImageMemory(context.device, depthImage, surfaceContext.depth.memory, 0); ASSERT_POSTCONDITION(!error, "Unable to bind depth memory."); // Create a VkImageView so that we can attach depth to the framebuffer. VkImageView depthView; VkImageViewCreateInfo viewInfo { .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, .image = depthImage, .viewType = VK_IMAGE_VIEW_TYPE_2D, .format = depthFormat, .subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT, .subresourceRange.levelCount = 1, .subresourceRange.layerCount = 1, }; error = vkCreateImageView(context.device, &viewInfo, VKALLOC, &depthView); ASSERT_POSTCONDITION(!error, "Unable to create depth view."); // Transition the depth image into an optimal layout. VkImageMemoryBarrier barrier { .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, .newLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .image = depthImage, .subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT, .subresourceRange.levelCount = 1, .subresourceRange.layerCount = 1, .dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT }; vkCmdPipelineBarrier(context.cmdbuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier); // Go ahead and set the depth attachment fields, which serves as a signal to VulkanDriver that // it is now ready. surfaceContext.depth.view = depthView; surfaceContext.depth.image = depthImage; surfaceContext.depth.format = depthFormat; } void transitionDepthBuffer(VulkanContext& context, VulkanSurfaceContext& sc, VkFormat depthFormat) { // Begin a new command buffer solely for the purpose of transitioning the image layout. SwapContext& swap = getSwapContext(context); VkResult result = vkWaitForFences(context.device, 1, &swap.fence, VK_FALSE, UINT64_MAX); ASSERT_POSTCONDITION(result == VK_SUCCESS, "vkWaitForFences error."); result = vkResetFences(context.device, 1, &swap.fence); ASSERT_POSTCONDITION(result == VK_SUCCESS, "vkResetFences error."); VkCommandBuffer cmdbuffer = swap.cmdbuffer; result = vkResetCommandBuffer(cmdbuffer, 0); ASSERT_POSTCONDITION(result == VK_SUCCESS, "vkResetCommandBuffer error."); VkCommandBufferBeginInfo beginInfo = { .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, .flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT, }; result = vkBeginCommandBuffer(cmdbuffer, &beginInfo); ASSERT_POSTCONDITION(result == VK_SUCCESS, "vkBeginCommandBuffer error."); context.cmdbuffer = cmdbuffer; // Create the depth buffer and issue a pipeline barrier command. createDepthBuffer(context, sc, depthFormat); // Flush the command buffer. result = vkEndCommandBuffer(context.cmdbuffer); ASSERT_POSTCONDITION(result == VK_SUCCESS, "vkEndCommandBuffer error."); context.cmdbuffer = nullptr; VkSubmitInfo submitInfo { .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, .commandBufferCount = 1, .pCommandBuffers = &swap.cmdbuffer, }; result = vkQueueSubmit(context.graphicsQueue, 1, &submitInfo, swap.fence); ASSERT_POSTCONDITION(result == VK_SUCCESS, "vkQueueSubmit error."); swap.submitted = false; } void createCommandBuffersAndFences(VulkanContext& context, VulkanSurfaceContext& surfaceContext) { // Allocate command buffers. VkCommandBufferAllocateInfo allocateInfo = {}; allocateInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; allocateInfo.commandPool = context.commandPool; allocateInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; allocateInfo.commandBufferCount = (uint32_t) surfaceContext.swapContexts.size(); std::vector<VkCommandBuffer> cmdbufs(allocateInfo.commandBufferCount); VkResult result = vkAllocateCommandBuffers(context.device, &allocateInfo, cmdbufs.data()); ASSERT_POSTCONDITION(result == VK_SUCCESS, "vkAllocateCommandBuffers error."); for (uint32_t i = 0; i < allocateInfo.commandBufferCount; ++i) { surfaceContext.swapContexts[i].cmdbuffer = cmdbufs[i]; } // Create fences. VkFenceCreateInfo fenceCreateInfo = {}; fenceCreateInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceCreateInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT; for (uint32_t i = 0; i < allocateInfo.commandBufferCount; i++) { result = vkCreateFence(context.device, &fenceCreateInfo, VKALLOC, &surfaceContext.swapContexts[i].fence); ASSERT_POSTCONDITION(result == VK_SUCCESS, "vkCreateFence error."); } } void destroySurfaceContext(VulkanContext& context, VulkanSurfaceContext& surfaceContext) { for (SwapContext& swapContext : surfaceContext.swapContexts) { vkFreeCommandBuffers(context.device, context.commandPool, 1, &swapContext.cmdbuffer); vkDestroyFence(context.device, swapContext.fence, VKALLOC); vkDestroyImageView(context.device, swapContext.attachment.view, VKALLOC); swapContext.fence = VK_NULL_HANDLE; swapContext.attachment.view = VK_NULL_HANDLE; } vkDestroySwapchainKHR(context.device, surfaceContext.swapchain, VKALLOC); vkDestroySemaphore(context.device, surfaceContext.imageAvailable, VKALLOC); vkDestroySemaphore(context.device, surfaceContext.renderingFinished, VKALLOC); vkDestroySurfaceKHR(context.instance, surfaceContext.surface, VKALLOC); vkDestroyImageView(context.device, surfaceContext.depth.view, VKALLOC); vkDestroyImage(context.device, surfaceContext.depth.image, VKALLOC); vkFreeMemory(context.device, surfaceContext.depth.memory, VKALLOC); if (context.currentSurface == &surfaceContext) { context.currentSurface = nullptr; } } uint32_t selectMemoryType(VulkanContext& context, uint32_t flags, VkFlags reqs) { for (uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; i++) { if (flags & 1) { if ((context.memoryProperties.memoryTypes[i].propertyFlags & reqs) == reqs) { return i; } } flags >>= 1; } ASSERT_POSTCONDITION(false, "Unable to find a memory type that meets requirements."); return (uint32_t) ~0ul; } VkFormat getVkFormat(ElementType type, bool normalized) { using ElementType = ElementType; if (normalized) { switch (type) { // Single Component Types case ElementType::BYTE: return VK_FORMAT_R8_SNORM; case ElementType::UBYTE: return VK_FORMAT_R8_UNORM; case ElementType::SHORT: return VK_FORMAT_R16_SNORM; case ElementType::USHORT: return VK_FORMAT_R16_UNORM; // Two Component Types case ElementType::BYTE2: return VK_FORMAT_R8G8_SNORM; case ElementType::UBYTE2: return VK_FORMAT_R8G8_UNORM; case ElementType::SHORT2: return VK_FORMAT_R16G16_SNORM; case ElementType::USHORT2: return VK_FORMAT_R16G16_UNORM; // Three Component Types case ElementType::BYTE3: return VK_FORMAT_R8G8B8_SNORM; case ElementType::UBYTE3: return VK_FORMAT_R8G8B8_UNORM; case ElementType::SHORT3: return VK_FORMAT_R16G16B16_SNORM; case ElementType::USHORT3: return VK_FORMAT_R16G16B16_UNORM; // Four Component Types case ElementType::BYTE4: return VK_FORMAT_R8G8B8A8_SNORM; case ElementType::UBYTE4: return VK_FORMAT_R8G8B8A8_UNORM; case ElementType::SHORT4: return VK_FORMAT_R16G16B16A16_SNORM; case ElementType::USHORT4: return VK_FORMAT_R16G16B16A16_UNORM; default: ASSERT_POSTCONDITION(false, "Normalized format does not exist."); return VK_FORMAT_UNDEFINED; } } switch (type) { // Single Component Types case ElementType::BYTE: return VK_FORMAT_R8_SINT; case ElementType::UBYTE: return VK_FORMAT_R8_UINT; case ElementType::SHORT: return VK_FORMAT_R16_SINT; case ElementType::USHORT: return VK_FORMAT_R16_UINT; case ElementType::HALF: return VK_FORMAT_R16_SFLOAT; case ElementType::INT: return VK_FORMAT_R32_SINT; case ElementType::UINT: return VK_FORMAT_R32_UINT; case ElementType::FLOAT: return VK_FORMAT_R32_SFLOAT; // Two Component Types case ElementType::BYTE2: return VK_FORMAT_R8G8_SINT; case ElementType::UBYTE2: return VK_FORMAT_R8G8_UINT; case ElementType::SHORT2: return VK_FORMAT_R16G16_SINT; case ElementType::USHORT2: return VK_FORMAT_R16G16_UINT; case ElementType::HALF2: return VK_FORMAT_R16G16_SFLOAT; case ElementType::FLOAT2: return VK_FORMAT_R32G32_SFLOAT; // Three Component Types case ElementType::BYTE3: return VK_FORMAT_R8G8B8_SINT; case ElementType::UBYTE3: return VK_FORMAT_R8G8B8_UINT; case ElementType::SHORT3: return VK_FORMAT_R16G16B16_SINT; case ElementType::USHORT3: return VK_FORMAT_R16G16B16_UINT; case ElementType::HALF3: return VK_FORMAT_R16G16B16_SFLOAT; case ElementType::FLOAT3: return VK_FORMAT_R32G32B32_SFLOAT; // Four Component Types case ElementType::BYTE4: return VK_FORMAT_R8G8B8A8_SINT; case ElementType::UBYTE4: return VK_FORMAT_R8G8B8A8_UINT; case ElementType::SHORT4: return VK_FORMAT_R16G16B16A16_SINT; case ElementType::USHORT4: return VK_FORMAT_R16G16B16A16_UINT; case ElementType::HALF4: return VK_FORMAT_R16G16B16A16_SFLOAT; case ElementType::FLOAT4: return VK_FORMAT_R32G32B32A32_SFLOAT; } return VK_FORMAT_UNDEFINED; } VkFormat getVkFormat(TextureFormat format) { using TextureFormat = TextureFormat; switch (format) { // 8 bits per element. case TextureFormat::R8: return VK_FORMAT_R8_UNORM; case TextureFormat::R8_SNORM: return VK_FORMAT_R8_SNORM; case TextureFormat::R8UI: return VK_FORMAT_R8_UINT; case TextureFormat::R8I: return VK_FORMAT_R8_SINT; case TextureFormat::STENCIL8: return VK_FORMAT_S8_UINT; // 16 bits per element. case TextureFormat::R16F: return VK_FORMAT_R16_SFLOAT; case TextureFormat::R16UI: return VK_FORMAT_R16_UINT; case TextureFormat::R16I: return VK_FORMAT_R16_SINT; case TextureFormat::RG8: return VK_FORMAT_R8G8_UNORM; case TextureFormat::RG8_SNORM: return VK_FORMAT_R8G8_SNORM; case TextureFormat::RG8UI: return VK_FORMAT_R8G8_UINT; case TextureFormat::RG8I: return VK_FORMAT_R8G8_SINT; case TextureFormat::RGB565: return VK_FORMAT_R5G6B5_UNORM_PACK16; case TextureFormat::RGB5_A1: return VK_FORMAT_R5G5B5A1_UNORM_PACK16; case TextureFormat::RGBA4: return VK_FORMAT_R4G4B4A4_UNORM_PACK16; case TextureFormat::DEPTH16: return VK_FORMAT_D16_UNORM; // 24 bits per element. In practice, very few GPU vendors support these. For simplicity // we just assume they are not supported, not bothering to query the device capabilities. // Note that VK_FORMAT_ enums for 24-bit formats exist, but are meant for vertex attributes. case TextureFormat::RGB8: case TextureFormat::SRGB8: case TextureFormat::RGB8_SNORM: case TextureFormat::RGB8UI: case TextureFormat::RGB8I: case TextureFormat::DEPTH24: return VK_FORMAT_UNDEFINED; // 32 bits per element. case TextureFormat::R32F: return VK_FORMAT_R32_SFLOAT; case TextureFormat::R32UI: return VK_FORMAT_R32_UINT; case TextureFormat::R32I: return VK_FORMAT_R32_SINT; case TextureFormat::RG16F: return VK_FORMAT_R16G16_SFLOAT; case TextureFormat::RG16UI: return VK_FORMAT_R16G16_UINT; case TextureFormat::RG16I: return VK_FORMAT_R16G16_SINT; case TextureFormat::R11F_G11F_B10F: return VK_FORMAT_B10G11R11_UFLOAT_PACK32; case TextureFormat::RGB9_E5: return VK_FORMAT_E5B9G9R9_UFLOAT_PACK32; case TextureFormat::RGBA8: return VK_FORMAT_R8G8B8A8_UNORM; case TextureFormat::SRGB8_A8: return VK_FORMAT_R8G8B8A8_SRGB; case TextureFormat::RGBA8_SNORM: return VK_FORMAT_R8G8B8A8_SNORM; case TextureFormat::RGBM: return VK_FORMAT_R8G8B8A8_UNORM; case TextureFormat::RGB10_A2: return VK_FORMAT_A2R10G10B10_UNORM_PACK32; case TextureFormat::RGBA8UI: return VK_FORMAT_R8G8B8A8_UINT; case TextureFormat::RGBA8I: return VK_FORMAT_R8G8B8A8_SINT; case TextureFormat::DEPTH32F: return VK_FORMAT_D32_SFLOAT; case TextureFormat::DEPTH24_STENCIL8: return VK_FORMAT_D24_UNORM_S8_UINT; case TextureFormat::DEPTH32F_STENCIL8: return VK_FORMAT_D32_SFLOAT_S8_UINT; // 48 bits per element. Note that many GPU vendors do not support these. case TextureFormat::RGB16F: return VK_FORMAT_R16G16B16_SFLOAT; case TextureFormat::RGB16UI: return VK_FORMAT_R16G16B16_UINT; case TextureFormat::RGB16I: return VK_FORMAT_R16G16B16_SINT; // 64 bits per element. case TextureFormat::RG32F: return VK_FORMAT_R32G32_SFLOAT; case TextureFormat::RG32UI: return VK_FORMAT_R32G32_UINT; case TextureFormat::RG32I: return VK_FORMAT_R32G32_SINT; case TextureFormat::RGBA16F: return VK_FORMAT_R16G16B16A16_SFLOAT; case TextureFormat::RGBA16UI: return VK_FORMAT_R16G16B16A16_UINT; case TextureFormat::RGBA16I: return VK_FORMAT_R16G16B16A16_SINT; // 96-bits per element. case TextureFormat::RGB32F: return VK_FORMAT_R32G32B32_SFLOAT; case TextureFormat::RGB32UI: return VK_FORMAT_R32G32B32_UINT; case TextureFormat::RGB32I: return VK_FORMAT_R32G32B32_SINT; // 128-bits per element case TextureFormat::RGBA32F: return VK_FORMAT_R32G32B32A32_SFLOAT; case TextureFormat::RGBA32UI: return VK_FORMAT_R32G32B32A32_UINT; case TextureFormat::RGBA32I: return VK_FORMAT_R32G32B32A32_SINT; default: return VK_FORMAT_UNDEFINED; } } uint32_t getBytesPerPixel(TextureFormat format) { return details::FTexture::getFormatSize(format); } // See also FTexture::computeTextureDataSize, which takes a public-facing Texture format rather // than a driver-level Texture format, and can account for a specified byte alignment. uint32_t computeSize(TextureFormat format, uint32_t w, uint32_t h, uint32_t d) { const size_t bytesPerTexel = details::FTexture::getFormatSize(format); return bytesPerTexel * w * h * d; } SwapContext& getSwapContext(VulkanContext& context) { VulkanSurfaceContext& surface = *context.currentSurface; return surface.swapContexts[surface.currentSwapIndex]; } bool hasPendingWork(VulkanContext& context) { if (context.pendingWork.size() > 0) { return true; } if (context.currentSurface) { for (auto& swapContext : context.currentSurface->swapContexts) { if (swapContext.pendingWork.size() > 0) { return true; } } } return false; } VkCompareOp getCompareOp(SamplerCompareFunc func) { using Compare = driver::SamplerCompareFunc; switch (func) { case Compare::LE: return VK_COMPARE_OP_LESS_OR_EQUAL; case Compare::GE: return VK_COMPARE_OP_GREATER_OR_EQUAL; case Compare::L: return VK_COMPARE_OP_LESS; case Compare::G: return VK_COMPARE_OP_GREATER; case Compare::E: return VK_COMPARE_OP_EQUAL; case Compare::NE: return VK_COMPARE_OP_NOT_EQUAL; case Compare::A: return VK_COMPARE_OP_ALWAYS; case Compare::N: return VK_COMPARE_OP_NEVER; } } VkBlendFactor getBlendFactor(BlendFunction mode) { using BlendFunction = filament::driver::BlendFunction; switch (mode) { case BlendFunction::ZERO: return VK_BLEND_FACTOR_ZERO; case BlendFunction::ONE: return VK_BLEND_FACTOR_ONE; case BlendFunction::SRC_COLOR: return VK_BLEND_FACTOR_SRC_COLOR; case BlendFunction::ONE_MINUS_SRC_COLOR: return VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR; case BlendFunction::DST_COLOR: return VK_BLEND_FACTOR_DST_COLOR; case BlendFunction::ONE_MINUS_DST_COLOR: return VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR; case BlendFunction::SRC_ALPHA: return VK_BLEND_FACTOR_SRC_ALPHA; case BlendFunction::ONE_MINUS_SRC_ALPHA: return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; case BlendFunction::DST_ALPHA: return VK_BLEND_FACTOR_DST_ALPHA; case BlendFunction::ONE_MINUS_DST_ALPHA: return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA; case BlendFunction::SRC_ALPHA_SATURATE: return VK_BLEND_FACTOR_SRC_ALPHA_SATURATE; } } void waitForIdle(VulkanContext& context) { // If there's no valid GPU then we have nothing to do. if (!context.device) { return; } // If there's no surface, then there's no command buffer. if (!context.currentSurface) { return; } // First, wait for submitted command buffer(s) to finish. VkFence fences[2]; uint32_t nfences = 0; auto& surfaceContext = *context.currentSurface; for (auto& swapContext : surfaceContext.swapContexts) { assert(nfences < 2); if (swapContext.submitted && swapContext.fence) { fences[nfences++] = swapContext.fence; swapContext.submitted = false; } } if (nfences > 0) { vkWaitForFences(context.device, nfences, fences, VK_FALSE, ~0ull); } // If we don't have any pending work, we're done. if (!hasPendingWork(context)) { return; } // We cannot invoke arbitrary commands inside a render pass. assert(context.currentRenderPass.renderPass == VK_NULL_HANDLE); // Create a one-off command buffer to avoid the cost of swap chain acquisition and to avoid // the possibility of SURFACE_LOST. Note that Vulkan command buffers use the Allocate/Free // model instead of Create/Destroy and are therefore okay to create at a high frequency. VkCommandBuffer cmdbuffer; VkFence fence; VkCommandBufferBeginInfo beginInfo { .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO }; VkCommandBufferAllocateInfo allocateInfo = { .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, .commandPool = context.commandPool, .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, .commandBufferCount = 1 }; VkFenceCreateInfo fenceCreateInfo { .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, }; vkAllocateCommandBuffers(context.device, &allocateInfo, &cmdbuffer); vkCreateFence(context.device, &fenceCreateInfo, VKALLOC, &fence); // Keep performing work until there's nothing queued up. This should never iterate more than // a couple times because the only work we queue up is for resource transition / reclamation. VkPipelineStageFlags waitDestStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; VkSubmitInfo submitInfo { .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, .pWaitDstStageMask = &waitDestStageMask, .commandBufferCount = 1, .pCommandBuffers = &cmdbuffer, }; int cycles = 0; while (hasPendingWork(context)) { if (cycles++ > 2) { utils::slog.e << "Unexpected daisychaining of pending work." << utils::io::endl; break; } for (auto& swapContext : context.currentSurface->swapContexts) { vkBeginCommandBuffer(cmdbuffer, &beginInfo); performPendingWork(context, swapContext, cmdbuffer); vkEndCommandBuffer(cmdbuffer); vkQueueSubmit(context.graphicsQueue, 1, &submitInfo, fence); vkWaitForFences(context.device, 1, &fence, VK_FALSE, UINT64_MAX); vkResetFences(context.device, 1, &fence); vkResetCommandBuffer(cmdbuffer, 0); } } vkFreeCommandBuffers(context.device, context.commandPool, 1, &cmdbuffer); vkDestroyFence(context.device, fence, VKALLOC); } void acquireCommandBuffer(VulkanContext& context) { // Ask Vulkan for the next image in the swap chain and update the currentSwapIndex. VulkanSurfaceContext& surface = *context.currentSurface; VkResult result = vkAcquireNextImageKHR(context.device, surface.swapchain, UINT64_MAX, surface.imageAvailable, VK_NULL_HANDLE, &surface.currentSwapIndex); ASSERT_POSTCONDITION(result != VK_ERROR_OUT_OF_DATE_KHR, "Stale / resized swap chain not yet supported."); ASSERT_POSTCONDITION(result == VK_SUBOPTIMAL_KHR || result == VK_SUCCESS, "vkAcquireNextImageKHR error."); SwapContext& swap = getSwapContext(context); // Ensure that the previous submission of this command buffer has finished. result = vkWaitForFences(context.device, 1, &swap.fence, VK_FALSE, UINT64_MAX); ASSERT_POSTCONDITION(result == VK_SUCCESS, "vkWaitForFences error."); // Restart the command buffer. result = vkResetFences(context.device, 1, &swap.fence); ASSERT_POSTCONDITION(result == VK_SUCCESS, "vkResetFences error."); VkCommandBuffer cmdbuffer = swap.cmdbuffer; VkResult error = vkResetCommandBuffer(cmdbuffer, 0); ASSERT_POSTCONDITION(not error, "vkResetCommandBuffer error."); VkCommandBufferBeginInfo beginInfo { .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, .flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT, }; error = vkBeginCommandBuffer(cmdbuffer, &beginInfo); ASSERT_POSTCONDITION(not error, "vkBeginCommandBuffer error."); context.cmdbuffer = cmdbuffer; swap.submitted = false; } void releaseCommandBuffer(VulkanContext& context) { // Finalize the command buffer and set the cmdbuffer pointer to null. VkResult result = vkEndCommandBuffer(context.cmdbuffer); ASSERT_POSTCONDITION(result == VK_SUCCESS, "vkEndCommandBuffer error."); context.cmdbuffer = nullptr; // Submit the command buffer. VkPipelineStageFlags waitDestStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; VulkanSurfaceContext& surfaceContext = *context.currentSurface; SwapContext& swapContext = getSwapContext(context); VkSubmitInfo submitInfo { .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, .waitSemaphoreCount = 1u, .pWaitSemaphores = &surfaceContext.imageAvailable, .pWaitDstStageMask = &waitDestStageMask, .commandBufferCount = 1, .pCommandBuffers = &swapContext.cmdbuffer, .signalSemaphoreCount = 1u, .pSignalSemaphores = &surfaceContext.renderingFinished, }; result = vkQueueSubmit(context.graphicsQueue, 1, &submitInfo, swapContext.fence); ASSERT_POSTCONDITION(result == VK_SUCCESS, "vkQueueSubmit error."); swapContext.submitted = true; } void performPendingWork(VulkanContext& context, SwapContext& swapContext, VkCommandBuffer cmdbuf) { // First, execute pending tasks that are specific to this swap context. Copy the tasks into a // local queue first, which allows newly added tasks to be deferred until the next frame. decltype(swapContext.pendingWork) tasks; tasks.swap(swapContext.pendingWork); for (auto& callback : tasks) { callback(cmdbuf); } // Next, execute the global pending work. Again, we copy the work queue into a local queue // to allow tasks to re-add themselves. tasks.clear(); tasks.swap(context.pendingWork); for (auto& callback : tasks) { callback(cmdbuf); } } // Flushes the command buffer and waits for it to finish executing. Useful for diagnosing // sychronization issues. void flushCommandBuffer(VulkanContext& context) { VulkanSurfaceContext& surface = *context.currentSurface; const SwapContext& sc = surface.swapContexts[surface.currentSwapIndex]; // Submit the command buffer. VkResult error = vkEndCommandBuffer(context.cmdbuffer); ASSERT_POSTCONDITION(!error, "vkEndCommandBuffer error."); VkPipelineStageFlags waitDestStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; VkSubmitInfo submitInfo { .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, .pWaitDstStageMask = &waitDestStageMask, .commandBufferCount = 1, .pCommandBuffers = &context.cmdbuffer, }; error = vkQueueSubmit(context.graphicsQueue, 1, &submitInfo, sc.fence); ASSERT_POSTCONDITION(!error, "vkQueueSubmit error."); // Restart the command buffer. error = vkWaitForFences(context.device, 1, &sc.fence, VK_FALSE, UINT64_MAX); ASSERT_POSTCONDITION(!error, "vkWaitForFences error."); error = vkResetFences(context.device, 1, &sc.fence); ASSERT_POSTCONDITION(!error, "vkResetFences error."); error = vkResetCommandBuffer(context.cmdbuffer, 0); ASSERT_POSTCONDITION(!error, "vkResetCommandBuffer error."); VkCommandBufferBeginInfo beginInfo { .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, .flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT, }; error = vkBeginCommandBuffer(context.cmdbuffer, &beginInfo); ASSERT_POSTCONDITION(!error, "vkBeginCommandBuffer error."); } VkFormat findSupportedFormat(VulkanContext& context, const std::vector<VkFormat>& candidates, VkImageTiling tiling, VkFormatFeatureFlags features) { for (VkFormat format : candidates) { VkFormatProperties props; vkGetPhysicalDeviceFormatProperties(context.physicalDevice, format, &props); if (tiling == VK_IMAGE_TILING_LINEAR && (props.linearTilingFeatures & features) == features) { return format; } else if (tiling == VK_IMAGE_TILING_OPTIMAL && (props.optimalTilingFeatures & features) == features) { return format; } } return VK_FORMAT_UNDEFINED; } } // namespace filament } // namespace driver
void VulkanContext::ReinitSurface(int width, int height) { if (surface_ != VK_NULL_HANDLE) { ILOG("Destroying Vulkan surface (%d, %d)", width_, height_); vkDestroySurfaceKHR(instance_, surface_, nullptr); surface_ = VK_NULL_HANDLE; } ILOG("Creating Vulkan surface (%d, %d)", width, height); switch (winsys_) { #ifdef _WIN32 case WINDOWSYSTEM_WIN32: { HINSTANCE connection = (HINSTANCE)winsysData1_; HWND window = (HWND)winsysData2_; RECT rc; GetClientRect(window, &rc); width = rc.right - rc.left; height = rc.bottom - rc.top; VkWin32SurfaceCreateInfoKHR win32{ VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR }; win32.flags = 0; win32.hwnd = window; win32.hinstance = connection; VkResult res = vkCreateWin32SurfaceKHR(instance_, &win32, nullptr, &surface_); assert(res == VK_SUCCESS); break; } #endif #if defined(__ANDROID__) case WINDOWSYSTEM_ANDROID: { ANativeWindow *wnd = (ANativeWindow *)winsysData1_; VkAndroidSurfaceCreateInfoKHR android{ VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR }; android.flags = 0; android.window = wnd; VkResult res = vkCreateAndroidSurfaceKHR(instance_, &android, nullptr, &surface_); assert(res == VK_SUCCESS); break; } #endif #if defined(VK_USE_PLATFORM_XLIB_KHR) case WINDOWSYSTEM_XLIB: { VkXlibSurfaceCreateInfoKHR xlib = { VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR }; xlib.flags = 0; xlib.dpy = (Display *)winsysData1_; xlib.window = (Window)winsysData2_; VkResult res = vkCreateXlibSurfaceKHR(instance_, &xlib, nullptr, &surface_); assert(res == VK_SUCCESS); break; } #endif #if defined(VK_USE_PLATFORM_XCB_KHR) case WINDOWSYSTEM_XCB: { VkXCBSurfaceCreateInfoKHR xcb = { VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR }; xcb.flags = 0; xcb.connection = (Connection *)winsysData1_; xcb.window = (Window)(uintptr_t)winsysData2_; VkResult res = vkCreateXcbSurfaceKHR(instance_, &xcb, nullptr, &surface_); assert(res == VK_SUCCESS); break; } #endif #if defined(VK_USE_PLATFORM_WAYLAND_KHR) case WINDOWSYSTEM_WAYLAND: { VkWaylandSurfaceCreateInfoKHR wayland = { VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR }; wayland.flags = 0; wayland.display = (wl_display *)winsysData1_; wayland.surface = (wl_surface *)winsysData2_; VkResult res = vkCreateWaylandSurfaceKHR(instance_, &wayland, nullptr, &surface_); assert(res == VK_SUCCESS); break; } #endif default: _assert_msg_(G3D, false, "Vulkan support for chosen window system not implemented"); break; } width_ = width; height_ = height; }
void destroyPlatformSpecificSurface() { vkDestroySurfaceKHR(renderer->getVulkanInstance(), surface, nullptr); }
VK_Surface::~VK_Surface() { vkDestroySurfaceKHR(*instance, *surface, allocs); free((void*)surface); }
/** * Good ol' main function. */ int main(int argc, char* argv[]) { static int windowWidth = 800; static int windowHeight = 600; static const char * applicationName = "SdlVulkanDemo_03_double_buffering"; static const char * engineName = applicationName; bool boolResult; VkResult result; /* * SDL2 Initialization */ SDL_Window *mySdlWindow; SDL_SysWMinfo mySdlSysWmInfo; boolResult = vkdemos::utils::sdl2Initialization(applicationName, windowWidth, windowHeight, mySdlWindow, mySdlSysWmInfo); assert(boolResult); /* * Vulkan initialization. */ std::vector<const char *> layersNamesToEnable; layersNamesToEnable.push_back("VK_LAYER_LUNARG_standard_validation"); std::vector<const char *> extensionsNamesToEnable; extensionsNamesToEnable.push_back(VK_KHR_SURFACE_EXTENSION_NAME); extensionsNamesToEnable.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME); extensionsNamesToEnable.push_back(VK_KHR_XCB_SURFACE_EXTENSION_NAME); // TODO: add support for other windowing systems VkInstance myInstance; boolResult = vkdemos::createVkInstance(layersNamesToEnable, extensionsNamesToEnable, applicationName, engineName, myInstance); assert(boolResult); VkDebugReportCallbackEXT myDebugReportCallback; vkdemos::createDebugReportCallback(myInstance, VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT | VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT | VK_DEBUG_REPORT_DEBUG_BIT_EXT, vkdemos::debugCallback, myDebugReportCallback ); VkPhysicalDevice myPhysicalDevice; boolResult = vkdemos::chooseVkPhysicalDevice(myInstance, 0, myPhysicalDevice); assert(boolResult); VkSurfaceKHR mySurface; boolResult = vkdemos::createVkSurface(myInstance, mySdlSysWmInfo, mySurface); assert(boolResult); VkDevice myDevice; VkQueue myQueue; uint32_t myQueueFamilyIndex; boolResult = vkdemos::createVkDeviceAndVkQueue(myPhysicalDevice, mySurface, layersNamesToEnable, myDevice, myQueue, myQueueFamilyIndex); assert(boolResult); VkSwapchainKHR mySwapchain; VkFormat mySurfaceFormat; boolResult = vkdemos::createVkSwapchain(myPhysicalDevice, myDevice, mySurface, windowWidth, windowHeight, FRAME_LAG, VK_NULL_HANDLE, mySwapchain, mySurfaceFormat); assert(boolResult); std::vector<VkImage> mySwapchainImagesVector; std::vector<VkImageView> mySwapchainImageViewsVector; boolResult = vkdemos::getSwapchainImagesAndViews(myDevice, mySwapchain, mySurfaceFormat, mySwapchainImagesVector, mySwapchainImageViewsVector); assert(boolResult); VkCommandPool myCommandPool; boolResult = vkdemos::createCommandPool(myDevice, myQueueFamilyIndex, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, myCommandPool); assert(boolResult); VkCommandBuffer myCmdBufferInitialization; boolResult = vkdemos::allocateCommandBuffer(myDevice, myCommandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, myCmdBufferInitialization); assert(boolResult); /* * Initializations from Demo 02 (Triangle). */ VkPhysicalDeviceMemoryProperties myMemoryProperties; vkGetPhysicalDeviceMemoryProperties(myPhysicalDevice, &myMemoryProperties); // Create the Depth Buffer's Image and View. const VkFormat myDepthBufferFormat = VK_FORMAT_D16_UNORM; VkImage myDepthImage; VkImageView myDepthImageView; VkDeviceMemory myDepthMemory; boolResult = vkdemos::createAndAllocateImage(myDevice, myMemoryProperties, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, 0, myDepthBufferFormat, windowWidth, windowHeight, myDepthImage, myDepthMemory, &myDepthImageView, VK_IMAGE_ASPECT_DEPTH_BIT ); assert(boolResult); // Create the renderpass. VkRenderPass myRenderPass; boolResult = demo02CreateRenderPass(myDevice, mySurfaceFormat, myDepthBufferFormat, myRenderPass); assert(boolResult); // Create the Framebuffers, based on the number of swapchain images. std::vector<VkFramebuffer> myFramebuffersVector; myFramebuffersVector.reserve(mySwapchainImageViewsVector.size()); for(const auto view : mySwapchainImageViewsVector) { VkFramebuffer fb; boolResult = vkdemos::utils::createFramebuffer(myDevice, myRenderPass, {view, myDepthImageView}, windowWidth, windowHeight, fb); assert(boolResult); myFramebuffersVector.push_back(fb); } // Create a buffer to use as the vertex buffer. const size_t vertexBufferSize = sizeof(TriangleDemoVertex)*NUM_DEMO_VERTICES; VkBuffer myVertexBuffer; VkDeviceMemory myVertexBufferMemory; boolResult = vkdemos::createAndAllocateBuffer(myDevice, myMemoryProperties, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, vertexBufferSize, myVertexBuffer, myVertexBufferMemory ); assert(boolResult); // Map vertex buffer and insert data { void *mappedBuffer; result = vkMapMemory(myDevice, myVertexBufferMemory, 0, VK_WHOLE_SIZE, 0, &mappedBuffer); assert(result == VK_SUCCESS); memcpy(mappedBuffer, vertices, vertexBufferSize); vkUnmapMemory(myDevice, myVertexBufferMemory); } // Create the pipeline. const VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo = { .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, .pNext = nullptr, .flags = 0, .setLayoutCount = 0, .pSetLayouts = nullptr, .pushConstantRangeCount = 0, .pPushConstantRanges = nullptr, }; VkPipelineLayout myPipelineLayout; result = vkCreatePipelineLayout(myDevice, &pipelineLayoutCreateInfo, nullptr, &myPipelineLayout); assert(result == VK_SUCCESS); // Create Pipeline. VkPipeline myGraphicsPipeline; boolResult = demo02CreatePipeline(myDevice, myRenderPass, myPipelineLayout, VERTEX_SHADER_FILENAME, FRAGMENT_SHADER_FILENAME, VERTEX_INPUT_BINDING, myGraphicsPipeline); assert(boolResult); /* * In the PerFrameData struct we group all the objects that are used in a single frame, and that * must remain valid for the duration of that frame. * */ PerFrameData perFrameDataVector[FRAME_LAG]; for(int i = 0; i < FRAME_LAG; i++) { boolResult = vkdemos::allocateCommandBuffer(myDevice, myCommandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, perFrameDataVector[i].presentCmdBuffer); assert(boolResult); result = vkdemos::utils::createFence(myDevice, perFrameDataVector[i].presentFence); assert(result == VK_SUCCESS); result = vkdemos::utils::createSemaphore(myDevice, perFrameDataVector[i].imageAcquiredSemaphore); assert(result == VK_SUCCESS); result = vkdemos::utils::createSemaphore(myDevice, perFrameDataVector[i].renderingCompletedSemaphore); assert(result == VK_SUCCESS); perFrameDataVector[i].fenceInitialized = false; } /* * Generation and submission of the initialization commands' command buffer. */ // We fill the initialization command buffer with... the initialization commands. boolResult = demo02FillInitializationCommandBuffer(myCmdBufferInitialization, myDepthImage); assert(boolResult); // We now submit the command buffer to the queue we created before, and we wait // for its completition. VkSubmitInfo submitInfo = { .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, .pNext = nullptr, .waitSemaphoreCount = 0, .pWaitSemaphores = nullptr, .pWaitDstStageMask = nullptr, .commandBufferCount = 1, .pCommandBuffers = &myCmdBufferInitialization, .signalSemaphoreCount = 0, .pSignalSemaphores = nullptr }; result = vkQueueSubmit(myQueue, 1, &submitInfo, VK_NULL_HANDLE); assert(result == VK_SUCCESS); // Wait for the queue to complete its work. result = vkQueueWaitIdle(myQueue); assert(result == VK_SUCCESS); /* * Event loop */ SDL_Event sdlEvent; bool quit = false; // Just some variables for frame statistics long frameNumber = 0; long frameMaxTime = LONG_MIN; long frameMinTime = LONG_MAX; long frameAvgTimeSum = 0; long frameAvgTimeSumSquare = 0; constexpr long FRAMES_PER_STAT = 120; // How many frames to wait before printing frame time statistics. // The main event/render loop. while(!quit) { // Process events for this frame while(SDL_PollEvent(&sdlEvent)) { if (sdlEvent.type == SDL_QUIT) { quit = true; } if (sdlEvent.type == SDL_KEYDOWN && sdlEvent.key.keysym.sym == SDLK_ESCAPE) { quit = true; } } // Rendering code if(!quit) { // Render a single frame auto renderStartTime = std::chrono::high_resolution_clock::now(); quit = !demo03RenderSingleFrame(myDevice, myQueue, mySwapchain, myFramebuffersVector, myRenderPass, myGraphicsPipeline, myVertexBuffer, VERTEX_INPUT_BINDING, perFrameDataVector[frameNumber % FRAME_LAG], windowWidth, windowHeight); auto renderStopTime = std::chrono::high_resolution_clock::now(); // Compute frame time statistics auto elapsedTimeUs = std::chrono::duration_cast<std::chrono::microseconds>(renderStopTime - renderStartTime).count(); frameMaxTime = std::max(frameMaxTime, elapsedTimeUs); frameMinTime = std::min(frameMinTime, elapsedTimeUs); frameAvgTimeSum += elapsedTimeUs; frameAvgTimeSumSquare += elapsedTimeUs*elapsedTimeUs; // Print statistics if necessary if(frameNumber % FRAMES_PER_STAT == 0) { auto average = frameAvgTimeSum/FRAMES_PER_STAT; auto stddev = std::sqrt(frameAvgTimeSumSquare/FRAMES_PER_STAT - average*average); std::cout << "Frame time: average " << std::setw(6) << average << " us, maximum " << std::setw(6) << frameMaxTime << " us, minimum " << std::setw(6) << frameMinTime << " us, stddev " << (long)stddev << " (" << std::fixed << std::setprecision(2) << (stddev/average * 100.0f) << "%)" << std::endl; frameMaxTime = LONG_MIN; frameMinTime = LONG_MAX; frameAvgTimeSum = 0; frameAvgTimeSumSquare = 0; } frameNumber++; } } /* * Deinitialization */ // We wait for pending operations to complete before starting to destroy stuff. result = vkQueueWaitIdle(myQueue); assert(result == VK_SUCCESS); // Destroy the objects in the perFrameDataVector array. for(int i = 0; i < FRAME_LAG; i++) { vkDestroyFence(myDevice, perFrameDataVector[i].presentFence, nullptr); vkDestroySemaphore(myDevice, perFrameDataVector[i].imageAcquiredSemaphore, nullptr); vkDestroySemaphore(myDevice, perFrameDataVector[i].renderingCompletedSemaphore, nullptr); } /* * For more informations on the following commands, refer to Demo 02. */ vkDestroyPipeline(myDevice, myGraphicsPipeline, nullptr); vkDestroyPipelineLayout(myDevice, myPipelineLayout, nullptr); vkDestroyBuffer(myDevice, myVertexBuffer, nullptr); vkFreeMemory(myDevice, myVertexBufferMemory, nullptr); for(auto framebuffer : myFramebuffersVector) vkDestroyFramebuffer(myDevice, framebuffer, nullptr); vkDestroyRenderPass(myDevice, myRenderPass, nullptr); vkDestroyImageView(myDevice, myDepthImageView, nullptr); vkDestroyImage(myDevice, myDepthImage, nullptr); vkFreeMemory(myDevice, myDepthMemory, nullptr); /* * For more informations on the following commands, refer to Demo 01. */ vkDestroyCommandPool(myDevice, myCommandPool, nullptr); for(auto imgView : mySwapchainImageViewsVector) vkDestroyImageView(myDevice, imgView, nullptr); vkDestroySwapchainKHR(myDevice, mySwapchain, nullptr); vkDestroyDevice(myDevice, nullptr); vkDestroySurfaceKHR(myInstance, mySurface, nullptr); vkdemos::destroyDebugReportCallback(myInstance, myDebugReportCallback); vkDestroyInstance(myInstance, nullptr); SDL_DestroyWindow(mySdlWindow); SDL_Quit(); return 0; }
void VulkanDevice::Unload(VulkanInstance * vulkanInstance) { vkDestroyDevice(device, VK_NULL_HANDLE); vkDestroySurfaceKHR(vulkanInstance->GetInstance(), surface, VK_NULL_HANDLE); }
void DestroySurfaceInfo(VkInstance vkInstance, SurfaceInfo* surfaceInfo) { vkDestroySurfaceKHR(vkInstance, surfaceInfo->surface, nullptr); *surfaceInfo = {}; }