//------------------------------------------------------------------------------ // method for GPUInterface //------------------------------------------------------------------------------ unsigned long long RendererVk::TimerResult(nv_helpers::Profiler::TimerIdx idxBegin, nv_helpers::Profiler::TimerIdx idxEnd) { if(m_bValid == false) return 0; uint64_t end = 0; uint64_t begin = 0; vkGetQueryPoolResults(nvk.m_device, m_timePool, idxEnd, 1, sizeof(uint64_t), &end, 0, VK_QUERY_RESULT_WAIT_BIT | VK_QUERY_RESULT_64_BIT); vkGetQueryPoolResults(nvk.m_device, m_timePool, idxBegin, 1, sizeof(uint64_t), &begin, 0, VK_QUERY_RESULT_WAIT_BIT | VK_QUERY_RESULT_64_BIT); return uint64_t(double(end - begin) * m_timeStampFrequency); }
//============================================================================== OcclusionQueryResult OcclusionQueryImpl::getResult() const { ANKI_ASSERT(m_handle); U64 out = 0; VkResult res; ANKI_VK_CHECKF( res = vkGetQueryPoolResults(getDevice(), m_handle, 0, 1, sizeof(out), &out, sizeof(out), VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WITH_AVAILABILITY_BIT | VK_QUERY_RESULT_PARTIAL_BIT)); OcclusionQueryResult qout = OcclusionQueryResult::NOT_AVAILABLE; if(res == VK_SUCCESS) { qout = (out) ? OcclusionQueryResult::VISIBLE : OcclusionQueryResult::NOT_VISIBLE; } else if(res == VK_NOT_READY) { qout = OcclusionQueryResult::NOT_AVAILABLE; } else { ANKI_ASSERT(0); } return qout; }
nvh::Profiler::SectionID ProfilerVK::beginSection(const char* name, VkCommandBuffer cmd) { nvh::Profiler::gpuTimeProvider_fn fnProvider = [&](SectionID i, uint32_t queryFrame, double& gpuTime) { uint32_t idxBegin = getTimerIdx(i, queryFrame, true); uint32_t idxEnd = getTimerIdx(i, queryFrame, false); uint64_t times[2]; VkResult result = vkGetQueryPoolResults(m_device, m_queryPool, idxBegin, 2, sizeof(uint64_t) * 2, times, 0, VK_QUERY_RESULT_64_BIT); if(result == VK_SUCCESS) { gpuTime = (double(times[1] - times[0]) * double(m_frequency)) / double(1000); return true; } else { return false; } }; SectionID slot = Profiler::beginSection(name, "VK ", fnProvider); if (getRequiredTimers() > m_queryPoolSize) { resize(); } uint32_t idx = getTimerIdx(slot, getSubFrame(), true); // clear begin and end vkCmdResetQueryPool(cmd, m_queryPool, idx, 2); // not ideal to do this per query vkCmdWriteTimestamp(cmd, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, m_queryPool, idx); return slot; }
void PerfQuery::ReadbackQueries(u32 query_count) { // Should be at maximum query_count queries pending. ASSERT(query_count <= m_query_count && (m_query_readback_pos + query_count) <= PERF_QUERY_BUFFER_SIZE); // Read back from the GPU. VkResult res = vkGetQueryPoolResults(g_vulkan_context->GetDevice(), m_query_pool, m_query_readback_pos, query_count, query_count * sizeof(PerfQueryDataType), m_query_result_buffer.data(), sizeof(PerfQueryDataType), 0); if (res != VK_SUCCESS) LOG_VULKAN_ERROR(res, "vkGetQueryPoolResults failed: "); // Remove pending queries. for (u32 i = 0; i < query_count; i++) { u32 index = (m_query_readback_pos + i) % PERF_QUERY_BUFFER_SIZE; ActiveQuery& entry = m_query_buffer[index]; // Should have a fence associated with it (waiting for a result). DEBUG_ASSERT(entry.fence_counter != 0); entry.fence_counter = 0; entry.has_value = false; // NOTE: Reported pixel metrics should be referenced to native resolution m_results[entry.query_type] += static_cast<u32>(static_cast<u64>(m_query_result_buffer[i]) * EFB_WIDTH / g_renderer->GetTargetWidth() * EFB_HEIGHT / g_renderer->GetTargetHeight()); } m_query_readback_pos = (m_query_readback_pos + query_count) % PERF_QUERY_BUFFER_SIZE; m_query_count -= query_count; }
VkResult QueryPool::results(uint32_t first, uint32_t count, size_t size, void *data, size_t stride) { VkResult err = vkGetQueryPoolResults(device(), handle(), first, count, size, data, stride, 0); EXPECT(err == VK_SUCCESS || err == VK_NOT_READY); return err; }
void Collect( VkCommandBuffer cmdbuf ) { ZoneScopedC( Color::Red4 ); if( m_tail == m_head ) return; #ifdef TRACY_ON_DEMAND if( !s_profiler.IsConnected() ) { vkCmdResetQueryPool( cmdbuf, m_query, 0, QueryCount ); m_head = m_tail = 0; return; } #endif unsigned int cnt; if( m_oldCnt != 0 ) { cnt = m_oldCnt; m_oldCnt = 0; } else { cnt = m_head < m_tail ? QueryCount - m_tail : m_head - m_tail; } int64_t res[QueryCount]; if( vkGetQueryPoolResults( m_device, m_query, m_tail, cnt, sizeof( res ), res, sizeof( *res ), VK_QUERY_RESULT_64_BIT ) == VK_NOT_READY ) { m_oldCnt = cnt; return; } Magic magic; auto& token = s_token.ptr; auto& tail = token->get_tail_index(); for( unsigned int idx=0; idx<cnt; idx++ ) { auto item = token->enqueue_begin<tracy::moodycamel::CanAlloc>( magic ); MemWrite( &item->hdr.type, QueueType::GpuTime ); MemWrite( &item->gpuTime.gpuTime, res[idx] ); MemWrite( &item->gpuTime.queryId, uint16_t( m_tail + idx ) ); MemWrite( &item->gpuTime.context, m_context ); tail.store( magic + 1, std::memory_order_release ); } vkCmdResetQueryPool( cmdbuf, m_query, m_tail, cnt ); m_tail += cnt; if( m_tail == QueryCount ) m_tail = 0; }
// Retrieves the results of the pipeline statistics query submitted to the command buffer void getQueryResults() { uint32_t count = static_cast<uint32_t>(pipelineStats.size()); vkGetQueryPoolResults( device, queryPool, 0, 1, count * sizeof(uint64_t), pipelineStats.data(), sizeof(uint64_t), VK_QUERY_RESULT_64_BIT); }
// Retrieves the results of the pipeline statistics query submitted to the command buffer void getQueryResults() { // We use vkGetQueryResults to copy the results into a host visible buffer vkGetQueryPoolResults( device, queryPool, 0, 1, sizeof(pipelineStats), pipelineStats, sizeof(uint64_t), VK_QUERY_RESULT_64_BIT); }
bool VulkanQuery::getResult(UINT64& result) const { // Note: A potentially better approach to get results is to make the query pool a VulkanResource, which we attach // to a command buffer upon use. Then when CB finishes executing we perform vkGetQueryPoolResults on all queries // in the pool at once. VkDevice vkDevice = mOwner->getDevice().getLogical(); VkResult vkResult = vkGetQueryPoolResults(vkDevice, mPool, 0, 1, sizeof(result), &result, sizeof(result), VK_QUERY_RESULT_64_BIT); assert(vkResult == VK_SUCCESS || vkResult == VK_NOT_READY); return vkResult == VK_SUCCESS; }
// Retrieves the results of the occlusion queries submitted to the command buffer void getQueryResults() { VkResult err; err = vkGetQueryPoolResults( device, queryPool, 0, 2, sizeof(passedSamples), passedSamples, sizeof(uint64_t), VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT); assert(!err); }
// Retrieves the results of the occlusion queries submitted to the command buffer void getQueryResults() { // We use vkGetQueryResults to copy the results into a host visible buffer vkGetQueryPoolResults( device, queryPool, 0, 2, sizeof(passedSamples), passedSamples, sizeof(uint64_t), // Store results a 64 bit values and wait until the results have been finished // If you don't want to wait, you can use VK_QUERY_RESULT_WITH_AVAILABILITY_BIT // which also returns the state of the result (ready) in the result VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT); }
VkCtx( VkPhysicalDevice physdev, VkDevice device, VkQueue queue, VkCommandBuffer cmdbuf ) : m_device( device ) , m_queue( queue ) , m_context( s_gpuCtxCounter.fetch_add( 1, std::memory_order_relaxed ) ) , m_head( 0 ) , m_tail( 0 ) , m_oldCnt( 0 ) { assert( m_context != 255 ); VkPhysicalDeviceProperties prop; vkGetPhysicalDeviceProperties( physdev, &prop ); const float period = prop.limits.timestampPeriod; VkQueryPoolCreateInfo poolInfo = {}; poolInfo.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; poolInfo.queryCount = QueryCount; poolInfo.queryType = VK_QUERY_TYPE_TIMESTAMP; vkCreateQueryPool( device, &poolInfo, nullptr, &m_query ); VkCommandBufferBeginInfo beginInfo = {}; beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; beginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; VkSubmitInfo submitInfo = {}; submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submitInfo.commandBufferCount = 1; submitInfo.pCommandBuffers = &cmdbuf; vkBeginCommandBuffer( cmdbuf, &beginInfo ); vkCmdResetQueryPool( cmdbuf, m_query, 0, QueryCount ); vkEndCommandBuffer( cmdbuf ); vkQueueSubmit( queue, 1, &submitInfo, VK_NULL_HANDLE ); vkQueueWaitIdle( queue ); vkBeginCommandBuffer( cmdbuf, &beginInfo ); vkCmdWriteTimestamp( cmdbuf, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, m_query, 0 ); vkEndCommandBuffer( cmdbuf ); vkQueueSubmit( queue, 1, &submitInfo, VK_NULL_HANDLE ); vkQueueWaitIdle( queue ); int64_t tcpu = Profiler::GetTime(); int64_t tgpu; vkGetQueryPoolResults( device, m_query, 0, 1, sizeof( tgpu ), &tgpu, sizeof( tgpu ), VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT ); vkBeginCommandBuffer( cmdbuf, &beginInfo ); vkCmdResetQueryPool( cmdbuf, m_query, 0, 1 ); vkEndCommandBuffer( cmdbuf ); vkQueueSubmit( queue, 1, &submitInfo, VK_NULL_HANDLE ); vkQueueWaitIdle( queue ); Magic magic; auto& token = s_token.ptr; auto& tail = token->get_tail_index(); auto item = token->enqueue_begin<tracy::moodycamel::CanAlloc>( magic ); MemWrite( &item->hdr.type, QueueType::GpuNewContext ); MemWrite( &item->gpuNewContext.cpuTime, tcpu ); MemWrite( &item->gpuNewContext.gpuTime, tgpu ); memset( &item->gpuNewContext.thread, 0, sizeof( item->gpuNewContext.thread ) ); MemWrite( &item->gpuNewContext.period, period ); MemWrite( &item->gpuNewContext.context, m_context ); MemWrite( &item->gpuNewContext.accuracyBits, uint8_t( 0 ) ); #ifdef TRACY_ON_DEMAND s_profiler.DeferItem( *item ); #endif tail.store( magic + 1, std::memory_order_release ); }
int main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; bool U_ASSERT_ONLY pass; struct sample_info info = {}; char sample_title[] = "Draw Cube"; process_command_line_args(info, argc, argv); init_global_layer_properties(info); info.instance_extension_names.push_back(VK_KHR_SURFACE_EXTENSION_NAME); #ifdef _WIN32 info.instance_extension_names.push_back( VK_KHR_WIN32_SURFACE_EXTENSION_NAME); #else info.instance_extension_names.push_back(VK_KHR_XCB_SURFACE_EXTENSION_NAME); #endif info.device_extension_names.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_uniform_buffer(info); init_descriptor_and_pipeline_layouts(info, false); init_renderpass(info, DEPTH_PRESENT); init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, DEPTH_PRESENT); init_vertex_buffer(info, g_vb_solid_face_colors_Data, sizeof(g_vb_solid_face_colors_Data), sizeof(g_vb_solid_face_colors_Data[0]), false); init_descriptor_pool(info, false); init_descriptor_set(info, false); init_pipeline_cache(info); init_pipeline(info, DEPTH_PRESENT); /* VULKAN_KEY_START */ VkClearValue clear_values[2]; clear_values[0].color.float32[0] = 0.2f; clear_values[0].color.float32[1] = 0.2f; clear_values[0].color.float32[2] = 0.2f; clear_values[0].color.float32[3] = 0.2f; clear_values[1].depthStencil.depth = 1.0f; clear_values[1].depthStencil.stencil = 0; VkSemaphore presentCompleteSemaphore; VkSemaphoreCreateInfo presentCompleteSemaphoreCreateInfo; presentCompleteSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; presentCompleteSemaphoreCreateInfo.pNext = NULL; presentCompleteSemaphoreCreateInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT; res = vkCreateSemaphore(info.device, &presentCompleteSemaphoreCreateInfo, NULL, &presentCompleteSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, presentCompleteSemaphore, NULL, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); /* Allocate a uniform buffer that will take query results. */ VkBuffer query_result_buf; VkDeviceMemory query_result_mem; VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; buf_info.size = 4 * sizeof(uint64_t); buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; res = vkCreateBuffer(info.device, &buf_info, NULL, &query_result_buf); assert(res == VK_SUCCESS); VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(info.device, query_result_buf, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; alloc_info.allocationSize = mem_reqs.size; pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &alloc_info.memoryTypeIndex); assert(pass); res = vkAllocateMemory(info.device, &alloc_info, NULL, &query_result_mem); assert(res == VK_SUCCESS); res = vkBindBufferMemory(info.device, query_result_buf, query_result_mem, 0); assert(res == VK_SUCCESS); VkQueryPool query_pool; VkQueryPoolCreateInfo query_pool_info; query_pool_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; query_pool_info.pNext = NULL; query_pool_info.queryType = VK_QUERY_TYPE_OCCLUSION; query_pool_info.flags = 0; query_pool_info.queryCount = 2; query_pool_info.pipelineStatistics = 0; res = vkCreateQueryPool(info.device, &query_pool_info, NULL, &query_pool); assert(res == VK_SUCCESS); vkCmdResetQueryPool(info.cmd, query_pool, 0 /*startQuery*/, 2 /*queryCount*/); VkRenderPassBeginInfo rp_begin; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = info.render_pass; rp_begin.framebuffer = info.framebuffers[info.current_buffer]; rp_begin.renderArea.offset.x = 0; rp_begin.renderArea.offset.y = 0; rp_begin.renderArea.extent.width = info.width; rp_begin.renderArea.extent.height = info.height; rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 0, NULL); const VkDeviceSize offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, offsets); VkViewport viewport; viewport.height = (float)info.height; viewport.width = (float)info.width; viewport.minDepth = (float)0.0f; viewport.maxDepth = (float)1.0f; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(info.cmd, 0, NUM_VIEWPORTS, &viewport); VkRect2D scissor; scissor.extent.width = info.width; scissor.extent.height = info.height; scissor.offset.x = 0; scissor.offset.y = 0; vkCmdSetScissor(info.cmd, 0, NUM_SCISSORS, &scissor); vkCmdBeginQuery(info.cmd, query_pool, 0 /*slot*/, 0 /*flags*/); vkCmdEndQuery(info.cmd, query_pool, 0 /*slot*/); vkCmdBeginQuery(info.cmd, query_pool, 1 /*slot*/, 0 /*flags*/); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); vkCmdEndQuery(info.cmd, query_pool, 1 /*slot*/); vkCmdCopyQueryPoolResults( info.cmd, query_pool, 0 /*firstQuery*/, 2 /*queryCount*/, query_result_buf, 0 /*dstOffset*/, sizeof(uint64_t) /*stride*/, VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT); VkImageMemoryBarrier prePresentBarrier = {}; prePresentBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; prePresentBarrier.pNext = NULL; prePresentBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; prePresentBarrier.dstAccessMask = 0; prePresentBarrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; prePresentBarrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; prePresentBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; prePresentBarrier.subresourceRange.baseMipLevel = 0; prePresentBarrier.subresourceRange.levelCount = 1; prePresentBarrier.subresourceRange.baseArrayLayer = 0; prePresentBarrier.subresourceRange.layerCount = 1; prePresentBarrier.image = info.buffers[info.current_buffer].image; vkCmdPipelineBarrier(info.cmd, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, NULL, 0, NULL, 1, &prePresentBarrier); res = vkEndCommandBuffer(info.cmd); const VkCommandBuffer cmd_bufs[] = {info.cmd}; VkFenceCreateInfo fenceInfo; VkFence drawFence; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = 0; vkCreateFence(info.device, &fenceInfo, NULL, &drawFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo submit_info[1] = {}; submit_info[0].pNext = NULL; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].waitSemaphoreCount = 1; submit_info[0].pWaitSemaphores = &presentCompleteSemaphore; submit_info[0].pWaitDstStageMask = &pipe_stage_flags; submit_info[0].commandBufferCount = 1; submit_info[0].pCommandBuffers = cmd_bufs; submit_info[0].signalSemaphoreCount = 0; submit_info[0].pSignalSemaphores = NULL; /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, submit_info, drawFence); assert(res == VK_SUCCESS); res = vkQueueWaitIdle(info.queue); assert(res == VK_SUCCESS); uint64_t samples_passed[4]; samples_passed[0] = 0; samples_passed[1] = 0; res = vkGetQueryPoolResults( info.device, query_pool, 0 /*firstQuery*/, 2 /*queryCount*/, sizeof(samples_passed) /*dataSize*/, samples_passed, sizeof(uint64_t) /*stride*/, VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT); assert(res == VK_SUCCESS); std::cout << "vkGetQueryPoolResults data" << "\n"; std::cout << "samples_passed[0] = " << samples_passed[0] << "\n"; std::cout << "samples_passed[1] = " << samples_passed[1] << "\n"; /* Read back query result from buffer */ uint64_t *samples_passed_ptr; res = vkMapMemory(info.device, query_result_mem, 0, mem_reqs.size, 0, (void **)&samples_passed_ptr); assert(res == VK_SUCCESS); std::cout << "vkCmdCopyQueryPoolResults data" << "\n"; std::cout << "samples_passed[0] = " << samples_passed_ptr[0] << "\n"; std::cout << "samples_passed[1] = " << samples_passed_ptr[1] << "\n"; vkUnmapMemory(info.device, query_result_mem); /* Now present the image in the window */ VkPresentInfoKHR present; present.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR; present.pNext = NULL; present.swapchainCount = 1; present.pSwapchains = &info.swap_chain; present.pImageIndices = &info.current_buffer; present.pWaitSemaphores = NULL; present.waitSemaphoreCount = 0; present.pResults = NULL; /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); /* VULKAN_KEY_END */ if (info.save_images) write_ppm(info, "occlusion_query"); vkDestroyBuffer(info.device, query_result_buf, NULL); vkFreeMemory(info.device, query_result_mem, NULL); vkDestroySemaphore(info.device, presentCompleteSemaphore, NULL); vkDestroyQueryPool(info.device, query_pool, NULL); vkDestroyFence(info.device, drawFence, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_descriptor_pool(info); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }