int sample_main() { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "Command Buffer Sample"; init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); /* VULKAN_KEY_START */ /* Create a command pool to allocate our command buffer from */ VkCommandPoolCreateInfo cmd_pool_info = {}; cmd_pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; cmd_pool_info.pNext = NULL; cmd_pool_info.queueFamilyIndex = info.graphics_queue_family_index; cmd_pool_info.flags = 0; res = vkCreateCommandPool(info.device, &cmd_pool_info, NULL, &info.cmd_pool); assert(res == VK_SUCCESS); /* Create the command buffer from the command pool */ VkCommandBufferAllocateInfo cmd = {}; cmd.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; cmd.pNext = NULL; cmd.commandPool = info.cmd_pool; cmd.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; cmd.commandBufferCount = 1; res = vkAllocateCommandBuffers(info.device, &cmd, &info.cmd); assert(res == VK_SUCCESS); /* VULKAN_KEY_END */ VkCommandBuffer cmd_bufs[1] = {info.cmd}; vkFreeCommandBuffers(info.device, info.cmd_pool, 1, cmd_bufs); vkDestroyCommandPool(info.device, info.cmd_pool, NULL); destroy_window(info); destroy_device(info); destroy_instance(info); return 0; }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; bool U_ASSERT_ONLY pass; struct sample_info info = {}; char sample_title[] = "Uniform Buffer Sample"; init_global_layer_properties(info); init_instance(info, sample_title); init_enumerate_device(info); init_queue_family_index(info); init_device(info); init_window_size(info, 50, 50); info.Projection = glm::perspective(glm::radians(45.0f), 1.0f, 0.1f, 100.0f); info.View = glm::lookAt( glm::vec3(0, 3, 10), // Camera is at (0,3,10), in World Space glm::vec3(0, 0, 0), // and looks at the origin glm::vec3(0, -1, 0) // Head is up (set to 0,-1,0 to look upside-down) ); info.Model = glm::mat4(1.0f); // Vulkan clip space has inverted Y and half Z. info.Clip = glm::mat4(1.0f, 0.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.5f, 0.0f, 0.0f, 0.0f, 0.5f, 1.0f); info.MVP = info.Clip * info.Projection * info.View * info.Model; /* VULKAN_KEY_START */ VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buf_info.size = sizeof(info.MVP); buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; res = vkCreateBuffer(info.device, &buf_info, NULL, &info.uniform_data.buf); assert(res == VK_SUCCESS); VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(info.device, info.uniform_data.buf, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; alloc_info.allocationSize = mem_reqs.size; pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &alloc_info.memoryTypeIndex); assert(pass && "No mappable, coherent memory"); res = vkAllocateMemory(info.device, &alloc_info, NULL, &(info.uniform_data.mem)); assert(res == VK_SUCCESS); uint8_t *pData; res = vkMapMemory(info.device, info.uniform_data.mem, 0, mem_reqs.size, 0, (void **)&pData); assert(res == VK_SUCCESS); memcpy(pData, &info.MVP, sizeof(info.MVP)); vkUnmapMemory(info.device, info.uniform_data.mem); res = vkBindBufferMemory(info.device, info.uniform_data.buf, info.uniform_data.mem, 0); assert(res == VK_SUCCESS); info.uniform_data.buffer_info.buffer = info.uniform_data.buf; info.uniform_data.buffer_info.offset = 0; info.uniform_data.buffer_info.range = sizeof(info.MVP); /* VULKAN_KEY_END */ vkDestroyBuffer(info.device, info.uniform_data.buf, NULL); vkFreeMemory(info.device, info.uniform_data.mem, NULL); destroy_device(info); destroy_instance(info); return 0; }
int main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "Swapchain Initialization Sample"; /* * Set up swapchain: * - Get supported uses for all queues * - Try to find a queue that supports both graphics and present * - If no queue supports both, find a present queue and make sure we have a * graphics queue * - Get a list of supported formats and use the first one * - Get surface properties and present modes and use them to create a swap * chain * - Create swap chain buffers * - For each buffer, create a color attachment view and set its layout to * color attachment */ init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_connection(info); init_window_size(info, 50, 50); init_window(info); /* VULKAN_KEY_START */ // Construct the surface description: #ifdef _WIN32 VkWin32SurfaceCreateInfoKHR createInfo = {}; createInfo.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR; createInfo.pNext = NULL; createInfo.hinstance = info.connection; createInfo.hwnd = info.window; res = vkCreateWin32SurfaceKHR(info.inst, &createInfo, NULL, &info.surface); #else // _WIN32 VkXcbSurfaceCreateInfoKHR createInfo = {}; createInfo.sType = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR; createInfo.pNext = NULL; createInfo.connection = info.connection; createInfo.window = info.window; res = vkCreateXcbSurfaceKHR(info.inst, &createInfo, NULL, &info.surface); #endif // _WIN32 assert(res == VK_SUCCESS); // Iterate over each queue to learn whether it supports presenting: VkBool32 *supportsPresent = (VkBool32 *)malloc(info.queue_count * sizeof(VkBool32)); for (uint32_t i = 0; i < info.queue_count; i++) { vkGetPhysicalDeviceSurfaceSupportKHR(info.gpus[0], i, info.surface, &supportsPresent[i]); } // Search for a graphics queue and a present queue in the array of queue // families, try to find one that supports both uint32_t graphicsQueueNodeIndex = UINT32_MAX; for (uint32_t i = 0; i < info.queue_count; i++) { if ((info.queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) != 0) { if (supportsPresent[i] == VK_TRUE) { graphicsQueueNodeIndex = i; break; } } } free(supportsPresent); // Generate error if could not find a queue that supports both a graphics // and present if (graphicsQueueNodeIndex == UINT32_MAX) { std::cout << "Could not find a queue that supports both graphics and " "present\n"; exit(-1); } info.graphics_queue_family_index = graphicsQueueNodeIndex; init_device(info); // Get the list of VkFormats that are supported: uint32_t formatCount; res = vkGetPhysicalDeviceSurfaceFormatsKHR(info.gpus[0], info.surface, &formatCount, NULL); assert(res == VK_SUCCESS); VkSurfaceFormatKHR *surfFormats = (VkSurfaceFormatKHR *)malloc(formatCount * sizeof(VkSurfaceFormatKHR)); res = vkGetPhysicalDeviceSurfaceFormatsKHR(info.gpus[0], info.surface, &formatCount, surfFormats); assert(res == VK_SUCCESS); // If the format list includes just one entry of VK_FORMAT_UNDEFINED, // the surface has no preferred format. Otherwise, at least one // supported format will be returned. if (formatCount == 1 && surfFormats[0].format == VK_FORMAT_UNDEFINED) { info.format = VK_FORMAT_B8G8R8A8_UNORM; } else { assert(formatCount >= 1); info.format = surfFormats[0].format; } VkSurfaceCapabilitiesKHR surfCapabilities; res = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(info.gpus[0], info.surface, &surfCapabilities); assert(res == VK_SUCCESS); uint32_t presentModeCount; res = vkGetPhysicalDeviceSurfacePresentModesKHR(info.gpus[0], info.surface, &presentModeCount, NULL); assert(res == VK_SUCCESS); VkPresentModeKHR *presentModes = (VkPresentModeKHR *)malloc(presentModeCount * sizeof(VkPresentModeKHR)); res = vkGetPhysicalDeviceSurfacePresentModesKHR( info.gpus[0], info.surface, &presentModeCount, presentModes); assert(res == VK_SUCCESS); VkExtent2D swapChainExtent; // width and height are either both -1, or both not -1. if (surfCapabilities.currentExtent.width == (uint32_t)-1) { // If the surface size is undefined, the size is set to // the size of the images requested. swapChainExtent.width = info.width; swapChainExtent.height = info.height; } else { // If the surface size is defined, the swap chain size must match swapChainExtent = surfCapabilities.currentExtent; } // If mailbox mode is available, use it, as is the lowest-latency non- // tearing mode. If not, try IMMEDIATE which will usually be available, // and is fastest (though it tears). If not, fall back to FIFO which is // always available. VkPresentModeKHR swapchainPresentMode = VK_PRESENT_MODE_FIFO_KHR; for (size_t i = 0; i < presentModeCount; i++) { if (presentModes[i] == VK_PRESENT_MODE_MAILBOX_KHR) { swapchainPresentMode = VK_PRESENT_MODE_MAILBOX_KHR; break; } if ((swapchainPresentMode != VK_PRESENT_MODE_MAILBOX_KHR) && (presentModes[i] == VK_PRESENT_MODE_IMMEDIATE_KHR)) { swapchainPresentMode = VK_PRESENT_MODE_IMMEDIATE_KHR; } } // Determine the number of VkImage's to use in the swap chain (we desire to // own only 1 image at a time, besides the images being displayed and // queued for display): uint32_t desiredNumberOfSwapChainImages = surfCapabilities.minImageCount + 1; if ((surfCapabilities.maxImageCount > 0) && (desiredNumberOfSwapChainImages > surfCapabilities.maxImageCount)) { // Application must settle for fewer images than desired: desiredNumberOfSwapChainImages = surfCapabilities.maxImageCount; } VkSurfaceTransformFlagBitsKHR preTransform; if (surfCapabilities.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR) { preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR; } else { preTransform = surfCapabilities.currentTransform; } VkSwapchainCreateInfoKHR swap_chain = {}; swap_chain.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR; swap_chain.pNext = NULL; swap_chain.surface = info.surface; swap_chain.minImageCount = desiredNumberOfSwapChainImages; swap_chain.imageFormat = info.format; swap_chain.imageExtent.width = swapChainExtent.width; swap_chain.imageExtent.height = swapChainExtent.height; swap_chain.preTransform = preTransform; swap_chain.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; swap_chain.imageArrayLayers = 1; swap_chain.presentMode = swapchainPresentMode; swap_chain.oldSwapchain = NULL; swap_chain.clipped = true; swap_chain.imageColorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR; swap_chain.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; swap_chain.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE; swap_chain.queueFamilyIndexCount = 0; swap_chain.pQueueFamilyIndices = NULL; res = vkCreateSwapchainKHR(info.device, &swap_chain, NULL, &info.swap_chain); assert(res == VK_SUCCESS); res = vkGetSwapchainImagesKHR(info.device, info.swap_chain, &info.swapchainImageCount, NULL); assert(res == VK_SUCCESS); VkImage *swapchainImages = (VkImage *)malloc(info.swapchainImageCount * sizeof(VkImage)); assert(swapchainImages); res = vkGetSwapchainImagesKHR(info.device, info.swap_chain, &info.swapchainImageCount, swapchainImages); assert(res == VK_SUCCESS); info.buffers.resize(info.swapchainImageCount); // Going to need a command buffer to send the memory barriers in // set_image_layout but we couldn't have created one before we knew // what our graphics_queue_family_index is, but now that we have it, // create the command buffer init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); vkGetDeviceQueue(info.device, info.graphics_queue_family_index, 0, &info.queue); for (uint32_t i = 0; i < info.swapchainImageCount; i++) { VkImageViewCreateInfo color_image_view = {}; color_image_view.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; color_image_view.pNext = NULL; color_image_view.format = info.format; color_image_view.components.r = VK_COMPONENT_SWIZZLE_R; color_image_view.components.g = VK_COMPONENT_SWIZZLE_G; color_image_view.components.b = VK_COMPONENT_SWIZZLE_B; color_image_view.components.a = VK_COMPONENT_SWIZZLE_A; color_image_view.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; color_image_view.subresourceRange.baseMipLevel = 0; color_image_view.subresourceRange.levelCount = 1; color_image_view.subresourceRange.baseArrayLayer = 0; color_image_view.subresourceRange.layerCount = 1; color_image_view.viewType = VK_IMAGE_VIEW_TYPE_2D; color_image_view.flags = 0; info.buffers[i].image = swapchainImages[i]; set_image_layout(info, info.buffers[i].image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); color_image_view.image = info.buffers[i].image; res = vkCreateImageView(info.device, &color_image_view, NULL, &info.buffers[i].view); assert(res == VK_SUCCESS); } free(swapchainImages); execute_end_command_buffer(info); execute_queue_command_buffer(info); /* VULKAN_KEY_END */ /* Clean Up */ VkCommandBuffer cmd_bufs[1] = {info.cmd}; vkFreeCommandBuffers(info.device, info.cmd_pool, 1, cmd_bufs); vkDestroyCommandPool(info.device, info.cmd_pool, NULL); for (uint32_t i = 0; i < info.swapchainImageCount; i++) { vkDestroyImageView(info.device, info.buffers[i].view, NULL); } vkDestroySwapchainKHR(info.device, info.swap_chain, NULL); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "SPIR-V Specialization"; const bool depthPresent = true; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_texture(info); init_uniform_buffer(info); init_descriptor_and_pipeline_layouts(info, true); init_renderpass(info, depthPresent); /* VULKAN_KEY_START */ // Pass in nullptr for fragment shader so we can setup specialization init_shaders(info, vertShaderText, nullptr); // This structure maps constant ids to data locations. // NOTE: Padding bool to 32-bits for simplicity const VkSpecializationMapEntry entries[] = // id, offset, size {{5, 0, sizeof(uint32_t)}, {7, 1 * sizeof(uint32_t), sizeof(uint32_t)}, {8, 2 * sizeof(uint32_t), sizeof(uint32_t)}, {9, 3 * sizeof(uint32_t), sizeof(uint32_t)}}; // Initialize the values we want our mini-ubershader to use const bool drawUserColor = true; const float userColor[] = {0.0f, 0.0f, 1.0f}; // Populate our data entry uint32_t data[4] = {}; data[0] = drawUserColor ? 1 : 0; ((float *)data)[1] = userColor[0]; ((float *)data)[2] = userColor[1]; ((float *)data)[3] = userColor[2]; // Set up the info describing our spec map and data const VkSpecializationInfo specInfo = { 4, // mapEntryCount entries, // pMapEntries 4 * sizeof(float), // dataSize data, // pData }; // Provide the specialization data to fragment stage info.shaderStages[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; info.shaderStages[1].pNext = NULL; info.shaderStages[1].pSpecializationInfo = &specInfo; info.shaderStages[1].flags = 0; info.shaderStages[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT; info.shaderStages[1].pName = "main"; VkShaderModuleCreateInfo moduleCreateInfo; moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; moduleCreateInfo.pNext = NULL; moduleCreateInfo.flags = 0; if (use_SPIRV_asm) { // Use the hand edited SPIR-V assembly spv_context spvContext = spvContextCreate(SPV_ENV_VULKAN_1_0); spv_binary fragmentBinary = {}; spv_diagnostic fragmentDiag = {}; spv_result_t fragmentResult = spvTextToBinary(spvContext, fragmentSPIRV_specialized.c_str(), fragmentSPIRV_specialized.length(), &fragmentBinary, &fragmentDiag); if (fragmentDiag) { printf("Diagnostic info from fragment shader:\n"); spvDiagnosticPrint(fragmentDiag); } assert(fragmentResult == SPV_SUCCESS); moduleCreateInfo.codeSize = fragmentBinary->wordCount * sizeof(unsigned int); moduleCreateInfo.pCode = fragmentBinary->code; spvDiagnosticDestroy(fragmentDiag); spvContextDestroy(spvContext); } else { // Convert GLSL to SPIR-V init_glslang(); std::vector<unsigned int> fragSpv; bool U_ASSERT_ONLY retVal = GLSLtoSPV(VK_SHADER_STAGE_FRAGMENT_BIT, fragShaderText, fragSpv); assert(retVal); finalize_glslang(); moduleCreateInfo.codeSize = fragSpv.size() * sizeof(unsigned int); moduleCreateInfo.pCode = fragSpv.data(); } res = vkCreateShaderModule(info.device, &moduleCreateInfo, NULL, &info.shaderStages[1].module); assert(res == VK_SUCCESS); /* VULKAN_KEY_END */ init_framebuffers(info, depthPresent); init_vertex_buffer(info, g_vb_texture_Data, sizeof(g_vb_texture_Data), sizeof(g_vb_texture_Data[0]), true); init_descriptor_pool(info, true); init_descriptor_set(info, true); init_pipeline_cache(info); init_pipeline(info, depthPresent); init_presentable_image(info); VkClearValue clear_values[2]; init_clear_color_and_depth(info, clear_values); VkRenderPassBeginInfo rp_begin; init_render_pass_begin_info(info, rp_begin); rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 0, NULL); const VkDeviceSize offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, offsets); init_viewports(info); init_scissors(info); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); res = vkEndCommandBuffer(info.cmd); assert(res == VK_SUCCESS); VkFence drawFence = {}; init_fence(info, drawFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; VkSubmitInfo submit_info = {}; init_submit_info(info, submit_info, pipe_stage_flags); /* Queue the command buffer for execution */ res = vkQueueSubmit(info.graphics_queue, 1, &submit_info, drawFence); assert(res == VK_SUCCESS); /* Now present the image in the window */ VkPresentInfoKHR present = {}; init_present_info(info, present); /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.present_queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); if (info.save_images) write_ppm(info, "spirv_specialization"); vkDestroyFence(info.device, drawFence, NULL); vkDestroySemaphore(info.device, info.imageAcquiredSemaphore, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_textures(info); destroy_descriptor_pool(info); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "Separate Image Sampler"; const bool depthPresent = true; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_uniform_buffer(info); init_renderpass(info, depthPresent); init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, depthPresent); init_vertex_buffer(info, g_vb_texture_Data, sizeof(g_vb_texture_Data), sizeof(g_vb_texture_Data[0]), true); /* VULKAN_KEY_START */ // Sample from a green texture to easily see that we've pulled correct texel // value // Create our separate image struct texture_object texObj; const char *textureName = "green.ppm"; init_image(info, texObj, textureName); info.textures.push_back(texObj); info.texture_data.image_info.sampler = 0; info.texture_data.image_info.imageView = info.textures[0].view; info.texture_data.image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; // Create our separate sampler VkSampler separateSampler = {}; init_sampler(info, separateSampler); VkDescriptorImageInfo samplerInfo = {}; samplerInfo.sampler = separateSampler; // Set up one descriptor set static const unsigned descriptor_set_count = 1; static const unsigned resource_count = 3; static const unsigned resource_type_count = 3; // Create binding and layout for the following, matching contents of shader // binding 0 = uniform buffer (MVP) // binding 1 = texture2D // binding 2 = sampler VkDescriptorSetLayoutBinding resource_binding[resource_count] = {}; resource_binding[0].binding = 0; resource_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; resource_binding[0].descriptorCount = 1; resource_binding[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT; resource_binding[0].pImmutableSamplers = NULL; resource_binding[1].binding = 1; resource_binding[1].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; resource_binding[1].descriptorCount = 1; resource_binding[1].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; resource_binding[1].pImmutableSamplers = NULL; resource_binding[2].binding = 2; resource_binding[2].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; resource_binding[2].descriptorCount = 1; resource_binding[2].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; resource_binding[2].pImmutableSamplers = NULL; VkDescriptorSetLayoutCreateInfo resource_layout_info[1] = {}; resource_layout_info[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; resource_layout_info[0].pNext = NULL; resource_layout_info[0].bindingCount = resource_count; resource_layout_info[0].pBindings = resource_binding; VkDescriptorSetLayout descriptor_layouts[1] = {}; res = vkCreateDescriptorSetLayout(info.device, resource_layout_info, NULL, &descriptor_layouts[0]); assert(res == VK_SUCCESS); // Create pipeline layout VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo[1] = {}; pipelineLayoutCreateInfo[0].sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipelineLayoutCreateInfo[0].pNext = NULL; pipelineLayoutCreateInfo[0].pushConstantRangeCount = 0; pipelineLayoutCreateInfo[0].pPushConstantRanges = NULL; pipelineLayoutCreateInfo[0].setLayoutCount = descriptor_set_count; pipelineLayoutCreateInfo[0].pSetLayouts = descriptor_layouts; res = vkCreatePipelineLayout(info.device, pipelineLayoutCreateInfo, NULL, &info.pipeline_layout); assert(res == VK_SUCCESS); // Create a single pool to contain data for our descriptor set VkDescriptorPoolSize pool_sizes[resource_type_count] = {}; pool_sizes[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; pool_sizes[0].descriptorCount = 1; pool_sizes[1].type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; pool_sizes[1].descriptorCount = 1; pool_sizes[2].type = VK_DESCRIPTOR_TYPE_SAMPLER; pool_sizes[2].descriptorCount = 1; VkDescriptorPoolCreateInfo pool_info[1] = {}; pool_info[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; pool_info[0].pNext = NULL; pool_info[0].maxSets = descriptor_set_count; pool_info[0].poolSizeCount = resource_type_count; pool_info[0].pPoolSizes = pool_sizes; VkDescriptorPool descriptor_pool[1] = {}; res = vkCreateDescriptorPool(info.device, pool_info, NULL, descriptor_pool); assert(res == VK_SUCCESS); VkDescriptorSetAllocateInfo alloc_info[1]; alloc_info[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info[0].pNext = NULL; alloc_info[0].descriptorPool = descriptor_pool[0]; alloc_info[0].descriptorSetCount = descriptor_set_count; alloc_info[0].pSetLayouts = descriptor_layouts; // Populate descriptor sets VkDescriptorSet descriptor_sets[descriptor_set_count] = {}; res = vkAllocateDescriptorSets(info.device, alloc_info, descriptor_sets); assert(res == VK_SUCCESS); VkWriteDescriptorSet descriptor_writes[resource_count]; // Populate with info about our uniform buffer for MVP descriptor_writes[0] = {}; descriptor_writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_writes[0].pNext = NULL; descriptor_writes[0].dstSet = descriptor_sets[0]; descriptor_writes[0].descriptorCount = 1; descriptor_writes[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_writes[0].pBufferInfo = &info.uniform_data.buffer_info; // populated by init_uniform_buffer() descriptor_writes[0].dstArrayElement = 0; descriptor_writes[0].dstBinding = 0; // Populate with info about our image descriptor_writes[1] = {}; descriptor_writes[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_writes[1].pNext = NULL; descriptor_writes[1].dstSet = descriptor_sets[0]; descriptor_writes[1].descriptorCount = 1; descriptor_writes[1].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; descriptor_writes[1].pImageInfo = &info.texture_data.image_info; // populated by init_texture() descriptor_writes[1].dstArrayElement = 0; descriptor_writes[1].dstBinding = 1; // Populate with info about our sampler descriptor_writes[2] = {}; descriptor_writes[2].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_writes[2].pNext = NULL; descriptor_writes[2].dstSet = descriptor_sets[0]; descriptor_writes[2].descriptorCount = 1; descriptor_writes[2].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; descriptor_writes[2].pImageInfo = &samplerInfo; descriptor_writes[2].dstArrayElement = 0; descriptor_writes[2].dstBinding = 2; vkUpdateDescriptorSets(info.device, resource_count, descriptor_writes, 0, NULL); /* VULKAN_KEY_END */ init_pipeline_cache(info); init_pipeline(info, depthPresent); init_presentable_image(info); VkClearValue clear_values[2]; init_clear_color_and_depth(info, clear_values); VkRenderPassBeginInfo rp_begin; init_render_pass_begin_info(info, rp_begin); rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, descriptor_sets, 0, NULL); const VkDeviceSize offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, offsets); init_viewports(info); init_scissors(info); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); res = vkEndCommandBuffer(info.cmd); assert(res == VK_SUCCESS); VkFence drawFence = {}; init_fence(info, drawFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; VkSubmitInfo submit_info = {}; init_submit_info(info, submit_info, pipe_stage_flags); /* Queue the command buffer for execution */ res = vkQueueSubmit(info.graphics_queue, 1, &submit_info, drawFence); assert(res == VK_SUCCESS); /* Now present the image in the window */ VkPresentInfoKHR present = {}; init_present_info(info, present); /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.present_queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); if (info.save_images) write_ppm(info, "separate_image_sampler"); vkDestroyFence(info.device, drawFence, NULL); vkDestroySemaphore(info.device, info.imageAcquiredSemaphore, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); vkDestroySampler(info.device, separateSampler, NULL); vkDestroyImageView(info.device, info.textures[0].view, NULL); vkDestroyImage(info.device, info.textures[0].image, NULL); vkFreeMemory(info.device, info.textures[0].mem, NULL); // instead of destroy_descriptor_pool(info); vkDestroyDescriptorPool(info.device, descriptor_pool[0], NULL); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); // instead of destroy_descriptor_and_pipeline_layouts(info); for (int i = 0; i < descriptor_set_count; i++) vkDestroyDescriptorSetLayout(info.device, descriptor_layouts[i], NULL); vkDestroyPipelineLayout(info.device, info.pipeline_layout, NULL); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; bool U_ASSERT_ONLY pass; struct sample_info info = {}; char sample_title[] = "Input Attachment Sample"; const bool depthPresent = false; const bool vertexPresent = false; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); VkFormatProperties props; vkGetPhysicalDeviceFormatProperties(info.gpus[0], VK_FORMAT_R8G8B8A8_UNORM, &props); if (!(props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) { std::cout << "VK_FORMAT_R8G8B8A8_UNORM format unsupported for input " "attachment\n"; exit(-1); } init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); /* VULKAN_KEY_START */ // Create a framebuffer with 2 attachments, one the color attachment // the shaders render into, and the other an input attachment which // will be cleared to yellow, and then used by the shaders to color // the drawn triangle. Final result should be a yellow triangle // Create the image that will be used as the input attachment // The image for the color attachment is the presentable image already // created in init_swapchain() VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = info.format; image_create_info.extent.width = info.width; image_create_info.extent.height = info.height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = NUM_SAMPLES; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; image_create_info.usage = VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; image_create_info.queueFamilyIndexCount = 0; image_create_info.pQueueFamilyIndices = NULL; image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; image_create_info.flags = 0; VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = 0; mem_alloc.memoryTypeIndex = 0; VkImage input_image; VkDeviceMemory input_memory; res = vkCreateImage(info.device, &image_create_info, NULL, &input_image); assert(res == VK_SUCCESS); VkMemoryRequirements mem_reqs; vkGetImageMemoryRequirements(info.device, input_image, &mem_reqs); mem_alloc.allocationSize = mem_reqs.size; pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, 0, &mem_alloc.memoryTypeIndex); assert(pass); res = vkAllocateMemory(info.device, &mem_alloc, NULL, &input_memory); assert(res == VK_SUCCESS); res = vkBindImageMemory(info.device, input_image, input_memory, 0); assert(res == VK_SUCCESS); // Set the image layout to TRANSFER_DST_OPTIMAL to be ready for clear set_image_layout(info, input_image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT); VkImageSubresourceRange srRange = {}; srRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; srRange.baseMipLevel = 0; srRange.levelCount = VK_REMAINING_MIP_LEVELS; srRange.baseArrayLayer = 0; srRange.layerCount = VK_REMAINING_ARRAY_LAYERS; VkClearColorValue clear_color; clear_color.float32[0] = 1.0f; clear_color.float32[1] = 1.0f; clear_color.float32[2] = 0.0f; clear_color.float32[3] = 0.0f; // Clear the input attachment image to yellow vkCmdClearColorImage(info.cmd, input_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_color, 1, &srRange); // Set the image layout to SHADER_READONLY_OPTIMAL for use by the shaders set_image_layout(info, input_image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT); VkImageViewCreateInfo view_info = {}; view_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; view_info.pNext = NULL; view_info.image = VK_NULL_HANDLE; view_info.viewType = VK_IMAGE_VIEW_TYPE_2D; view_info.format = info.format; view_info.components.r = VK_COMPONENT_SWIZZLE_R; view_info.components.g = VK_COMPONENT_SWIZZLE_G; view_info.components.b = VK_COMPONENT_SWIZZLE_B; view_info.components.a = VK_COMPONENT_SWIZZLE_A; view_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; view_info.subresourceRange.baseMipLevel = 0; view_info.subresourceRange.levelCount = 1; view_info.subresourceRange.baseArrayLayer = 0; view_info.subresourceRange.layerCount = 1; VkImageView input_attachment_view; view_info.image = input_image; res = vkCreateImageView(info.device, &view_info, NULL, &input_attachment_view); assert(res == VK_SUCCESS); VkDescriptorImageInfo input_image_info = {}; input_image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; input_image_info.imageView = input_attachment_view; input_image_info.sampler = VK_NULL_HANDLE; VkDescriptorSetLayoutBinding layout_bindings[1]; layout_bindings[0].binding = 0; layout_bindings[0].descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; layout_bindings[0].descriptorCount = 1; layout_bindings[0].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; layout_bindings[0].pImmutableSamplers = NULL; VkDescriptorSetLayoutCreateInfo descriptor_layout = {}; descriptor_layout.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; descriptor_layout.pNext = NULL; descriptor_layout.bindingCount = 1; descriptor_layout.pBindings = layout_bindings; info.desc_layout.resize(NUM_DESCRIPTOR_SETS); res = vkCreateDescriptorSetLayout(info.device, &descriptor_layout, NULL, info.desc_layout.data()); assert(res == VK_SUCCESS); VkPipelineLayoutCreateInfo pPipelineLayoutCreateInfo = {}; pPipelineLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pPipelineLayoutCreateInfo.pNext = NULL; pPipelineLayoutCreateInfo.pushConstantRangeCount = 0; pPipelineLayoutCreateInfo.pPushConstantRanges = NULL; pPipelineLayoutCreateInfo.setLayoutCount = NUM_DESCRIPTOR_SETS; pPipelineLayoutCreateInfo.pSetLayouts = info.desc_layout.data(); res = vkCreatePipelineLayout(info.device, &pPipelineLayoutCreateInfo, NULL, &info.pipeline_layout); assert(res == VK_SUCCESS); // First attachment is the color attachment - clear at the beginning of the // renderpass and transition layout to PRESENT_SRC_KHR at the end of // renderpass VkAttachmentDescription attachments[2]; attachments[0].format = info.format; attachments[0].samples = VK_SAMPLE_COUNT_1_BIT; attachments[0].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attachments[0].storeOp = VK_ATTACHMENT_STORE_OP_STORE; attachments[0].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; attachments[0].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; attachments[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attachments[0].finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; attachments[0].flags = 0; // Second attachment is input attachment. Once cleared it should have // width*height yellow pixels. Doing a subpassLoad in the fragment shader // should give the shader the color at the fragments x,y location // from the input attachment attachments[1].format = info.format; attachments[1].samples = VK_SAMPLE_COUNT_1_BIT; attachments[1].loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; attachments[1].storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; attachments[1].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; attachments[1].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; attachments[1].initialLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; attachments[1].finalLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; attachments[1].flags = 0; VkAttachmentReference color_reference = {}; color_reference.attachment = 0; color_reference.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; VkAttachmentReference input_reference = {}; input_reference.attachment = 1; input_reference.layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkSubpassDescription subpass = {}; subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; subpass.flags = 0; subpass.inputAttachmentCount = 1; subpass.pInputAttachments = &input_reference; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &color_reference; subpass.pResolveAttachments = NULL; subpass.pDepthStencilAttachment = NULL; subpass.preserveAttachmentCount = 0; subpass.pPreserveAttachments = NULL; VkRenderPassCreateInfo rp_info = {}; rp_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; rp_info.pNext = NULL; rp_info.attachmentCount = 2; rp_info.pAttachments = attachments; rp_info.subpassCount = 1; rp_info.pSubpasses = &subpass; rp_info.dependencyCount = 0; rp_info.pDependencies = NULL; res = vkCreateRenderPass(info.device, &rp_info, NULL, &info.render_pass); assert(!res); init_shaders(info, vertShaderText, fragShaderText); VkImageView fb_attachments[2]; fb_attachments[1] = input_attachment_view; VkFramebufferCreateInfo fbc_info = {}; fbc_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; fbc_info.pNext = NULL; fbc_info.renderPass = info.render_pass; fbc_info.attachmentCount = 2; fbc_info.pAttachments = fb_attachments; fbc_info.width = info.width; fbc_info.height = info.height; fbc_info.layers = 1; uint32_t i; info.framebuffers = (VkFramebuffer *)malloc(info.swapchainImageCount * sizeof(VkFramebuffer)); for (i = 0; i < info.swapchainImageCount; i++) { fb_attachments[0] = info.buffers[i].view; res = vkCreateFramebuffer(info.device, &fbc_info, NULL, &info.framebuffers[i]); assert(res == VK_SUCCESS); } VkDescriptorPoolSize type_count[1]; type_count[0].type = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; type_count[0].descriptorCount = 1; VkDescriptorPoolCreateInfo descriptor_pool = {}; descriptor_pool.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; descriptor_pool.pNext = NULL; descriptor_pool.maxSets = 1; descriptor_pool.poolSizeCount = 1; descriptor_pool.pPoolSizes = type_count; res = vkCreateDescriptorPool(info.device, &descriptor_pool, NULL, &info.desc_pool); assert(res == VK_SUCCESS); VkDescriptorSetAllocateInfo desc_alloc_info[1]; desc_alloc_info[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; desc_alloc_info[0].pNext = NULL; desc_alloc_info[0].descriptorPool = info.desc_pool; desc_alloc_info[0].descriptorSetCount = 1; desc_alloc_info[0].pSetLayouts = info.desc_layout.data(); info.desc_set.resize(1); res = vkAllocateDescriptorSets(info.device, desc_alloc_info, info.desc_set.data()); assert(res == VK_SUCCESS); VkWriteDescriptorSet writes[1]; // Write descriptor set with one write describing input attachment writes[0] = {}; writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; writes[0].dstSet = info.desc_set[0]; writes[0].dstBinding = 0; writes[0].descriptorCount = 1; writes[0].descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; writes[0].pImageInfo = &input_image_info; writes[0].pBufferInfo = nullptr; writes[0].pTexelBufferView = nullptr; writes[0].dstArrayElement = 0; vkUpdateDescriptorSets(info.device, 1, writes, 0, NULL); init_pipeline_cache(info); init_pipeline(info, depthPresent, vertexPresent); // Color attachment clear to gray VkClearValue clear_values; clear_values.color.float32[0] = 0.2f; clear_values.color.float32[1] = 0.2f; clear_values.color.float32[2] = 0.2f; clear_values.color.float32[3] = 0.2f; VkSemaphoreCreateInfo imageAcquiredSemaphoreCreateInfo; imageAcquiredSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; imageAcquiredSemaphoreCreateInfo.pNext = NULL; imageAcquiredSemaphoreCreateInfo.flags = 0; res = vkCreateSemaphore(info.device, &imageAcquiredSemaphoreCreateInfo, NULL, &info.imageAcquiredSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, info.imageAcquiredSemaphore, VK_NULL_HANDLE, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); VkRenderPassBeginInfo rp_begin; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = info.render_pass; rp_begin.framebuffer = info.framebuffers[info.current_buffer]; rp_begin.renderArea.offset.x = 0; rp_begin.renderArea.offset.y = 0; rp_begin.renderArea.extent.width = info.width; rp_begin.renderArea.extent.height = info.height; rp_begin.clearValueCount = 1; rp_begin.pClearValues = &clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 0, NULL); init_viewports(info); init_scissors(info); vkCmdDraw(info.cmd, 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); res = vkEndCommandBuffer(info.cmd); assert(res == VK_SUCCESS); /* VULKAN_KEY_END */ const VkCommandBuffer cmd_bufs[] = {info.cmd}; VkFenceCreateInfo fenceInfo; VkFence drawFence; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = 0; vkCreateFence(info.device, &fenceInfo, NULL, &drawFence); execute_queue_cmdbuf(info, cmd_bufs, drawFence); do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); vkDestroyFence(info.device, drawFence, NULL); execute_present_image(info); wait_seconds(1); if (info.save_images) write_ppm(info, "input_attachment"); vkDestroySemaphore(info.device, info.imageAcquiredSemaphore, NULL); vkDestroyImageView(info.device, input_attachment_view, NULL); vkDestroyImage(info.device, input_image, NULL); vkFreeMemory(info.device, input_memory, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_descriptor_pool(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
int main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; bool U_ASSERT_ONLY pass; struct sample_info info = {}; char sample_title[] = "Draw Cube"; process_command_line_args(info, argc, argv); init_global_layer_properties(info); info.instance_extension_names.push_back(VK_KHR_SURFACE_EXTENSION_NAME); #ifdef _WIN32 info.instance_extension_names.push_back( VK_KHR_WIN32_SURFACE_EXTENSION_NAME); #else info.instance_extension_names.push_back(VK_KHR_XCB_SURFACE_EXTENSION_NAME); #endif info.device_extension_names.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_uniform_buffer(info); init_descriptor_and_pipeline_layouts(info, false); init_renderpass(info, DEPTH_PRESENT); init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, DEPTH_PRESENT); init_vertex_buffer(info, g_vb_solid_face_colors_Data, sizeof(g_vb_solid_face_colors_Data), sizeof(g_vb_solid_face_colors_Data[0]), false); init_descriptor_pool(info, false); init_descriptor_set(info, false); init_pipeline_cache(info); init_pipeline(info, DEPTH_PRESENT); /* VULKAN_KEY_START */ VkClearValue clear_values[2]; clear_values[0].color.float32[0] = 0.2f; clear_values[0].color.float32[1] = 0.2f; clear_values[0].color.float32[2] = 0.2f; clear_values[0].color.float32[3] = 0.2f; clear_values[1].depthStencil.depth = 1.0f; clear_values[1].depthStencil.stencil = 0; VkSemaphore presentCompleteSemaphore; VkSemaphoreCreateInfo presentCompleteSemaphoreCreateInfo; presentCompleteSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; presentCompleteSemaphoreCreateInfo.pNext = NULL; presentCompleteSemaphoreCreateInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT; res = vkCreateSemaphore(info.device, &presentCompleteSemaphoreCreateInfo, NULL, &presentCompleteSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, presentCompleteSemaphore, NULL, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); /* Allocate a uniform buffer that will take query results. */ VkBuffer query_result_buf; VkDeviceMemory query_result_mem; VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; buf_info.size = 4 * sizeof(uint64_t); buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; res = vkCreateBuffer(info.device, &buf_info, NULL, &query_result_buf); assert(res == VK_SUCCESS); VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(info.device, query_result_buf, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; alloc_info.allocationSize = mem_reqs.size; pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &alloc_info.memoryTypeIndex); assert(pass); res = vkAllocateMemory(info.device, &alloc_info, NULL, &query_result_mem); assert(res == VK_SUCCESS); res = vkBindBufferMemory(info.device, query_result_buf, query_result_mem, 0); assert(res == VK_SUCCESS); VkQueryPool query_pool; VkQueryPoolCreateInfo query_pool_info; query_pool_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; query_pool_info.pNext = NULL; query_pool_info.queryType = VK_QUERY_TYPE_OCCLUSION; query_pool_info.flags = 0; query_pool_info.queryCount = 2; query_pool_info.pipelineStatistics = 0; res = vkCreateQueryPool(info.device, &query_pool_info, NULL, &query_pool); assert(res == VK_SUCCESS); vkCmdResetQueryPool(info.cmd, query_pool, 0 /*startQuery*/, 2 /*queryCount*/); VkRenderPassBeginInfo rp_begin; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = info.render_pass; rp_begin.framebuffer = info.framebuffers[info.current_buffer]; rp_begin.renderArea.offset.x = 0; rp_begin.renderArea.offset.y = 0; rp_begin.renderArea.extent.width = info.width; rp_begin.renderArea.extent.height = info.height; rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 0, NULL); const VkDeviceSize offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, offsets); VkViewport viewport; viewport.height = (float)info.height; viewport.width = (float)info.width; viewport.minDepth = (float)0.0f; viewport.maxDepth = (float)1.0f; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(info.cmd, 0, NUM_VIEWPORTS, &viewport); VkRect2D scissor; scissor.extent.width = info.width; scissor.extent.height = info.height; scissor.offset.x = 0; scissor.offset.y = 0; vkCmdSetScissor(info.cmd, 0, NUM_SCISSORS, &scissor); vkCmdBeginQuery(info.cmd, query_pool, 0 /*slot*/, 0 /*flags*/); vkCmdEndQuery(info.cmd, query_pool, 0 /*slot*/); vkCmdBeginQuery(info.cmd, query_pool, 1 /*slot*/, 0 /*flags*/); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); vkCmdEndQuery(info.cmd, query_pool, 1 /*slot*/); vkCmdCopyQueryPoolResults( info.cmd, query_pool, 0 /*firstQuery*/, 2 /*queryCount*/, query_result_buf, 0 /*dstOffset*/, sizeof(uint64_t) /*stride*/, VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT); VkImageMemoryBarrier prePresentBarrier = {}; prePresentBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; prePresentBarrier.pNext = NULL; prePresentBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; prePresentBarrier.dstAccessMask = 0; prePresentBarrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; prePresentBarrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; prePresentBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; prePresentBarrier.subresourceRange.baseMipLevel = 0; prePresentBarrier.subresourceRange.levelCount = 1; prePresentBarrier.subresourceRange.baseArrayLayer = 0; prePresentBarrier.subresourceRange.layerCount = 1; prePresentBarrier.image = info.buffers[info.current_buffer].image; vkCmdPipelineBarrier(info.cmd, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, NULL, 0, NULL, 1, &prePresentBarrier); res = vkEndCommandBuffer(info.cmd); const VkCommandBuffer cmd_bufs[] = {info.cmd}; VkFenceCreateInfo fenceInfo; VkFence drawFence; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = 0; vkCreateFence(info.device, &fenceInfo, NULL, &drawFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo submit_info[1] = {}; submit_info[0].pNext = NULL; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].waitSemaphoreCount = 1; submit_info[0].pWaitSemaphores = &presentCompleteSemaphore; submit_info[0].pWaitDstStageMask = &pipe_stage_flags; submit_info[0].commandBufferCount = 1; submit_info[0].pCommandBuffers = cmd_bufs; submit_info[0].signalSemaphoreCount = 0; submit_info[0].pSignalSemaphores = NULL; /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, submit_info, drawFence); assert(res == VK_SUCCESS); res = vkQueueWaitIdle(info.queue); assert(res == VK_SUCCESS); uint64_t samples_passed[4]; samples_passed[0] = 0; samples_passed[1] = 0; res = vkGetQueryPoolResults( info.device, query_pool, 0 /*firstQuery*/, 2 /*queryCount*/, sizeof(samples_passed) /*dataSize*/, samples_passed, sizeof(uint64_t) /*stride*/, VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT); assert(res == VK_SUCCESS); std::cout << "vkGetQueryPoolResults data" << "\n"; std::cout << "samples_passed[0] = " << samples_passed[0] << "\n"; std::cout << "samples_passed[1] = " << samples_passed[1] << "\n"; /* Read back query result from buffer */ uint64_t *samples_passed_ptr; res = vkMapMemory(info.device, query_result_mem, 0, mem_reqs.size, 0, (void **)&samples_passed_ptr); assert(res == VK_SUCCESS); std::cout << "vkCmdCopyQueryPoolResults data" << "\n"; std::cout << "samples_passed[0] = " << samples_passed_ptr[0] << "\n"; std::cout << "samples_passed[1] = " << samples_passed_ptr[1] << "\n"; vkUnmapMemory(info.device, query_result_mem); /* Now present the image in the window */ VkPresentInfoKHR present; present.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR; present.pNext = NULL; present.swapchainCount = 1; present.pSwapchains = &info.swap_chain; present.pImageIndices = &info.current_buffer; present.pWaitSemaphores = NULL; present.waitSemaphoreCount = 0; present.pResults = NULL; /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); /* VULKAN_KEY_END */ if (info.save_images) write_ppm(info, "occlusion_query"); vkDestroyBuffer(info.device, query_result_buf, NULL); vkFreeMemory(info.device, query_result_mem, NULL); vkDestroySemaphore(info.device, presentCompleteSemaphore, NULL); vkDestroyQueryPool(info.device, query_pool, NULL); vkDestroyFence(info.device, drawFence, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_descriptor_pool(info); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "Draw Textured Cube"; const bool depthPresent = true; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_texture(info); init_uniform_buffer(info); init_descriptor_and_pipeline_layouts(info, true); init_renderpass(info, depthPresent); init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, depthPresent); init_vertex_buffer(info, g_vb_texture_Data, sizeof(g_vb_texture_Data), sizeof(g_vb_texture_Data[0]), true); init_descriptor_pool(info, true); init_descriptor_set(info, true); init_pipeline_cache(info); init_pipeline(info, depthPresent); /* VULKAN_KEY_START */ VkClearValue clear_values[2]; clear_values[0].color.float32[0] = 0.2f; clear_values[0].color.float32[1] = 0.2f; clear_values[0].color.float32[2] = 0.2f; clear_values[0].color.float32[3] = 0.2f; clear_values[1].depthStencil.depth = 1.0f; clear_values[1].depthStencil.stencil = 0; VkSemaphore presentCompleteSemaphore; VkSemaphoreCreateInfo presentCompleteSemaphoreCreateInfo; presentCompleteSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; presentCompleteSemaphoreCreateInfo.pNext = NULL; presentCompleteSemaphoreCreateInfo.flags = 0; res = vkCreateSemaphore(info.device, &presentCompleteSemaphoreCreateInfo, NULL, &presentCompleteSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, presentCompleteSemaphore, VK_NULL_HANDLE, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); set_image_layout(info, info.buffers[info.current_buffer].image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); VkRenderPassBeginInfo rp_begin; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = info.render_pass; rp_begin.framebuffer = info.framebuffers[info.current_buffer]; rp_begin.renderArea.offset.x = 0; rp_begin.renderArea.offset.y = 0; rp_begin.renderArea.extent.width = info.width; rp_begin.renderArea.extent.height = info.height; rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 0, NULL); const VkDeviceSize offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, offsets); init_viewports(info); init_scissors(info); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); VkImageMemoryBarrier prePresentBarrier = {}; prePresentBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; prePresentBarrier.pNext = NULL; prePresentBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; prePresentBarrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT; prePresentBarrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; prePresentBarrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; prePresentBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; prePresentBarrier.subresourceRange.baseMipLevel = 0; prePresentBarrier.subresourceRange.levelCount = 1; prePresentBarrier.subresourceRange.baseArrayLayer = 0; prePresentBarrier.subresourceRange.layerCount = 1; prePresentBarrier.image = info.buffers[info.current_buffer].image; vkCmdPipelineBarrier(info.cmd, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, NULL, 0, NULL, 1, &prePresentBarrier); res = vkEndCommandBuffer(info.cmd); assert(res == VK_SUCCESS); const VkCommandBuffer cmd_bufs[] = {info.cmd}; VkFenceCreateInfo fenceInfo; VkFence drawFence; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = 0; vkCreateFence(info.device, &fenceInfo, NULL, &drawFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo submit_info[1] = {}; submit_info[0].pNext = NULL; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].waitSemaphoreCount = 1; submit_info[0].pWaitSemaphores = &presentCompleteSemaphore; submit_info[0].pWaitDstStageMask = &pipe_stage_flags; submit_info[0].commandBufferCount = 1; submit_info[0].pCommandBuffers = cmd_bufs; submit_info[0].signalSemaphoreCount = 0; submit_info[0].pSignalSemaphores = NULL; /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, submit_info, drawFence); assert(res == VK_SUCCESS); /* Now present the image in the window */ VkPresentInfoKHR present; present.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR; present.pNext = NULL; present.swapchainCount = 1; present.pSwapchains = &info.swap_chain; present.pImageIndices = &info.current_buffer; present.pWaitSemaphores = NULL; present.waitSemaphoreCount = 0; present.pResults = NULL; /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); /* VULKAN_KEY_END */ if (info.save_images) write_ppm(info, "drawtexturedcube"); vkDestroyFence(info.device, drawFence, NULL); vkDestroySemaphore(info.device, presentCompleteSemaphore, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_textures(info); destroy_descriptor_pool(info); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
int sample_main() { struct sample_info info = {}; init_global_layer_properties(info); /* VULKAN_KEY_START */ /* Common validation layers * Loader will chain them together in the order given, * though order doesn't really matter for these validation * layers. */ info.instance_layer_names.push_back("VK_LAYER_GOOGLE_threading"); info.instance_layer_names.push_back("VK_LAYER_LUNARG_device_limits"); info.instance_layer_names.push_back("VK_LAYER_LUNARG_draw_state"); info.instance_layer_names.push_back("VK_LAYER_LUNARG_image"); info.instance_layer_names.push_back("VK_LAYER_LUNARG_mem_tracker"); info.instance_layer_names.push_back("VK_LAYER_LUNARG_object_tracker"); info.instance_layer_names.push_back("VK_LAYER_LUNARG_param_checker"); info.instance_layer_names.push_back("VK_LAYER_LUNARG_swapchain"); info.instance_layer_names.push_back("VK_LAYER_GOOGLE_unique_objects"); if (!demo_check_layers(info.instance_layer_properties, info.instance_layer_names)) { exit(1); } /* Enable debug callback extension */ info.instance_extension_names.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME); VkApplicationInfo app_info = {}; app_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO; app_info.pNext = NULL; app_info.pApplicationName = "vulkansamples_enable_validation_with_callback"; app_info.applicationVersion = 1; app_info.pEngineName = "Vulkan Samples"; app_info.engineVersion = 1; app_info.apiVersion = VK_API_VERSION; VkInstanceCreateInfo inst_info = {}; inst_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO; inst_info.pNext = NULL; inst_info.flags = 0; inst_info.pApplicationInfo = &app_info; inst_info.enabledLayerCount = info.instance_layer_names.size(); inst_info.ppEnabledLayerNames = info.instance_layer_names.size() ? info.instance_layer_names.data() : NULL; inst_info.enabledExtensionCount = info.instance_extension_names.size(); inst_info.ppEnabledExtensionNames = info.instance_extension_names.data(); VkResult res = vkCreateInstance(&inst_info, NULL, &info.inst); assert(res == VK_SUCCESS); init_enumerate_device(info); init_device_layer_properties(info); /* * Common validation layers * Loader will chain them together in the order given, * though order doesn't really matter for these validation * layers. * Instance layers and Device layers are independent so * must enable validation layers for both to see everything. */ info.device_layer_names.push_back("VK_LAYER_GOOGLE_threading"); info.device_layer_names.push_back("VK_LAYER_LUNARG_device_limits"); info.device_layer_names.push_back("VK_LAYER_LUNARG_draw_state"); info.device_layer_names.push_back("VK_LAYER_LUNARG_image"); info.device_layer_names.push_back("VK_LAYER_LUNARG_mem_tracker"); info.device_layer_names.push_back("VK_LAYER_LUNARG_object_tracker"); info.device_layer_names.push_back("VK_LAYER_LUNARG_param_checker"); info.device_layer_names.push_back("VK_LAYER_LUNARG_swapchain"); info.device_layer_names.push_back("VK_LAYER_GOOGLE_unique_objects"); if (!demo_check_layers(info.device_layer_properties, info.device_layer_names)) { exit(1); } float queue_priorities[1] = {0.0}; VkDeviceQueueCreateInfo queue_info = {}; vkGetPhysicalDeviceQueueFamilyProperties(info.gpus[0], &info.queue_count, NULL); assert(info.queue_count >= 1); info.queue_props.resize(info.queue_count); vkGetPhysicalDeviceQueueFamilyProperties(info.gpus[0], &info.queue_count, info.queue_props.data()); assert(info.queue_count >= 1); bool found = false; for (unsigned int i = 0; i < info.queue_count; i++) { if (info.queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) { queue_info.queueFamilyIndex = i; found = true; break; } } assert(found); assert(info.queue_count >= 1); queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; queue_info.pNext = NULL; queue_info.queueCount = 1; queue_info.pQueuePriorities = queue_priorities; VkDeviceCreateInfo device_info = {}; device_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; device_info.pNext = NULL; device_info.queueCreateInfoCount = 1; device_info.pQueueCreateInfos = &queue_info; device_info.enabledLayerCount = info.device_layer_names.size(); device_info.ppEnabledLayerNames = device_info.enabledLayerCount ? info.device_layer_names.data() : NULL; device_info.enabledExtensionCount = info.device_extension_names.size(); device_info.ppEnabledExtensionNames = device_info.enabledExtensionCount ? info.device_extension_names.data() : NULL; device_info.pEnabledFeatures = NULL; res = vkCreateDevice(info.gpus[0], &device_info, NULL, &info.device); assert(res == VK_SUCCESS); VkDebugReportCallbackEXT debug_report_callback; info.dbgCreateDebugReportCallback = (PFN_vkCreateDebugReportCallbackEXT)vkGetInstanceProcAddr( info.inst, "vkCreateDebugReportCallbackEXT"); if (!info.dbgCreateDebugReportCallback) { std::cout << "GetInstanceProcAddr: Unable to find " "vkCreateDebugReportCallbackEXT function." << std::endl; exit(1); } info.dbgDestroyDebugReportCallback = (PFN_vkDestroyDebugReportCallbackEXT)vkGetInstanceProcAddr( info.inst, "vkDestroyDebugReportCallbackEXT"); if (!info.dbgDestroyDebugReportCallback) { std::cout << "GetInstanceProcAddr: Unable to find " "vkDestroyDebugReportCallbackEXT function." << std::endl; exit(1); } VkDebugReportCallbackCreateInfoEXT create_info = {}; create_info.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT; create_info.pNext = NULL; create_info.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT; create_info.pfnCallback = dbgFunc; create_info.pUserData = NULL; res = info.dbgCreateDebugReportCallback(info.inst, &create_info, NULL, &debug_report_callback); switch (res) { case VK_SUCCESS: break; case VK_ERROR_OUT_OF_HOST_MEMORY: std::cout << "dbgCreateDebugReportCallback: out of host memory\n" << std::endl; exit(1); break; default: std::cout << "dbgCreateDebugReportCallback: unknown failure\n" << std::endl; exit(1); break; } /* Create a command pool */ VkCommandPoolCreateInfo cmd_pool_info = {}; cmd_pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; cmd_pool_info.pNext = NULL; cmd_pool_info.queueFamilyIndex = info.graphics_queue_family_index; cmd_pool_info.flags = 0; res = vkCreateCommandPool(info.device, &cmd_pool_info, NULL, &info.cmd_pool); assert(res == VK_SUCCESS); /* * Destroying the device before destroying the command pool above * will trigger a validation error. */ std::cout << "calling vkDestroyDevice before destroying command pool\n"; std::cout << "this should result in an error\n"; vkDestroyDevice(info.device, NULL); /* Clean up callback */ info.dbgDestroyDebugReportCallback(info.inst, debug_report_callback, NULL); /* VULKAN_KEY_END */ vkDestroyInstance(info.inst, NULL); return 0; }
int main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "Renderpass Sample"; init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_connection(info); init_window_size(info, 50, 50); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); /* VULKAN_KEY_START */ /* Need attachments for render target and depth buffer */ VkAttachmentDescription attachments[2]; attachments[0].format = info.format; attachments[0].samples = NUM_SAMPLES; attachments[0].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attachments[0].storeOp = VK_ATTACHMENT_STORE_OP_STORE; attachments[0].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; attachments[0].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; attachments[0].initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; attachments[0].finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; attachments[0].flags = 0; attachments[1].format = info.depth.format; attachments[1].samples = NUM_SAMPLES; attachments[1].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attachments[1].storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; attachments[1].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; attachments[1].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; attachments[1].initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; attachments[1].finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; attachments[1].flags = 0; VkAttachmentReference color_reference = {}; color_reference.attachment = 0; color_reference.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; VkAttachmentReference depth_reference = {}; depth_reference.attachment = 1; depth_reference.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; VkSubpassDescription subpass = {}; subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; subpass.flags = 0; subpass.inputAttachmentCount = 0; subpass.pInputAttachments = NULL; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &color_reference; subpass.pResolveAttachments = NULL; subpass.pDepthStencilAttachment = &depth_reference; subpass.preserveAttachmentCount = 0; subpass.pPreserveAttachments = NULL; VkRenderPassCreateInfo rp_info = {}; rp_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; rp_info.pNext = NULL; rp_info.attachmentCount = 2; rp_info.pAttachments = attachments; rp_info.subpassCount = 1; rp_info.pSubpasses = &subpass; rp_info.dependencyCount = 0; rp_info.pDependencies = NULL; res = vkCreateRenderPass(info.device, &rp_info, NULL, &info.render_pass); assert(res == VK_SUCCESS); execute_end_command_buffer(info); execute_queue_command_buffer(info); /* VULKAN_KEY_END */ vkDestroyRenderPass(info.device, info.render_pass, NULL); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
// clang-format on int main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "SPIR-V Assembly"; const bool depthPresent = true; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_texture(info); init_uniform_buffer(info); init_descriptor_and_pipeline_layouts(info, true); init_renderpass(info, depthPresent); /* VULKAN_KEY_START */ // Init the assembler context spv_context spvContext = spvContextCreate(); // Convert the vertex assembly into binary format spv_binary vertexBinary = {}; spv_diagnostic vertexDiag = {}; spv_result_t vertexResult = spvTextToBinary(spvContext, vertexSPIRV.c_str(), vertexSPIRV.length(), &vertexBinary, &vertexDiag); if (vertexDiag) { printf("Diagnostic info from vertex shader:\n"); spvDiagnosticPrint(vertexDiag); } assert(vertexResult == SPV_SUCCESS); // Convert the fragment assembly into binary format spv_binary fragmentBinary = {}; spv_diagnostic fragmentDiag = {}; spv_result_t fragmentResult = spvTextToBinary(spvContext, fragmentSPIRV.c_str(), fragmentSPIRV.length(), &fragmentBinary, &fragmentDiag); if (fragmentDiag) { printf("Diagnostic info from fragment shader:\n"); spvDiagnosticPrint(fragmentDiag); } assert(fragmentResult == SPV_SUCCESS); info.shaderStages[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; info.shaderStages[0].pNext = NULL; info.shaderStages[0].pSpecializationInfo = NULL; info.shaderStages[0].flags = 0; info.shaderStages[0].stage = VK_SHADER_STAGE_VERTEX_BIT; info.shaderStages[0].pName = "main"; VkShaderModuleCreateInfo moduleCreateInfo; moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; moduleCreateInfo.pNext = NULL; moduleCreateInfo.flags = 0; // Use wordCount and code pointers from the spv_binary moduleCreateInfo.codeSize = vertexBinary->wordCount * sizeof(unsigned int); moduleCreateInfo.pCode = vertexBinary->code; res = vkCreateShaderModule(info.device, &moduleCreateInfo, NULL, &info.shaderStages[0].module); assert(res == VK_SUCCESS); info.shaderStages[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; info.shaderStages[1].pNext = NULL; info.shaderStages[1].pSpecializationInfo = NULL; info.shaderStages[1].flags = 0; info.shaderStages[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT; info.shaderStages[1].pName = "main"; moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; moduleCreateInfo.pNext = NULL; moduleCreateInfo.flags = 0; // Use wordCount and code pointers from the spv_binary moduleCreateInfo.codeSize = fragmentBinary->wordCount * sizeof(unsigned int); moduleCreateInfo.pCode = fragmentBinary->code; res = vkCreateShaderModule(info.device, &moduleCreateInfo, NULL, &info.shaderStages[1].module); assert(res == VK_SUCCESS); // Clean up the diagnostics spvDiagnosticDestroy(vertexDiag); spvDiagnosticDestroy(fragmentDiag); // Clean up the assembler context spvContextDestroy(spvContext); /* VULKAN_KEY_END */ init_framebuffers(info, depthPresent); init_vertex_buffer(info, g_vb_texture_Data, sizeof(g_vb_texture_Data), sizeof(g_vb_texture_Data[0]), true); init_descriptor_pool(info, true); init_descriptor_set(info, true); init_pipeline_cache(info); init_pipeline(info, depthPresent); init_presentable_image(info); VkClearValue clear_values[2]; init_clear_color_and_depth(info, clear_values); VkRenderPassBeginInfo rp_begin; init_render_pass_begin_info(info, rp_begin); rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 0, NULL); const VkDeviceSize offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, offsets); init_viewports(info); init_scissors(info); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); execute_pre_present_barrier(info); res = vkEndCommandBuffer(info.cmd); assert(res == VK_SUCCESS); VkFence drawFence = {}; init_fence(info, drawFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo submit_info = {}; init_submit_info(info, submit_info, pipe_stage_flags); /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, &submit_info, drawFence); assert(res == VK_SUCCESS); /* Now present the image in the window */ VkPresentInfoKHR present = {}; init_present_info(info, present); /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); if (info.save_images) write_ppm(info, "spirv_assembly"); vkDestroyFence(info.device, drawFence, NULL); vkDestroySemaphore(info.device, info.presentCompleteSemaphore, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_textures(info); destroy_descriptor_pool(info); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
int sample_main(int argc, char *argv[]) { VkExtensionProperties *vk_props = NULL; uint32_t instance_extension_count; VkResult res; /* VULKAN_KEY_START */ struct sample_info info = {}; init_global_layer_properties(info); /* * It's possible, though very rare, that the number of * instance layers could change. For example, installing something * could include new layers that the loader would pick up * between the initial query for the count and the * request for VkLayerProperties. If that happens, * the number of VkLayerProperties could exceed the count * previously given. To alert the app to this change * vkEnumerateInstanceExtensionProperties will return a VK_INCOMPLETE * status. * The count parameter will be updated with the number of * entries actually loaded into the data pointer. */ do { res = vkEnumerateInstanceExtensionProperties( NULL, &instance_extension_count, NULL); if (res) break; if (instance_extension_count == 0) { break; } vk_props = (VkExtensionProperties *)realloc( vk_props, instance_extension_count * sizeof(VkExtensionProperties)); res = vkEnumerateInstanceExtensionProperties( NULL, &instance_extension_count, vk_props); } while (res == VK_INCOMPLETE); bool found_extension = false; for (uint32_t i = 0; i < instance_extension_count; i++) { if (!strcmp(vk_props[i].extensionName, VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) { found_extension = true; } } if (!found_extension) { std::cout << "Something went very wrong, cannot find " "VK_EXT_debug_report extension" << std::endl; exit(1); } const char *extension_names[1] = {VK_EXT_DEBUG_REPORT_EXTENSION_NAME}; // initialize the VkApplicationInfo structure VkApplicationInfo app_info = {}; app_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO; app_info.pNext = NULL; app_info.pApplicationName = APP_SHORT_NAME; app_info.applicationVersion = 1; app_info.pEngineName = APP_SHORT_NAME; app_info.engineVersion = 1; app_info.apiVersion = VK_API_VERSION_1_0; // initialize the VkInstanceCreateInfo structure VkInstanceCreateInfo inst_info = {}; inst_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO; inst_info.pNext = NULL; inst_info.flags = 0; inst_info.pApplicationInfo = &app_info; inst_info.enabledExtensionCount = 1; inst_info.ppEnabledExtensionNames = extension_names; inst_info.enabledLayerCount = 0; inst_info.ppEnabledLayerNames = NULL; VkInstance inst; res = vkCreateInstance(&inst_info, NULL, &inst); if (res == VK_ERROR_INCOMPATIBLE_DRIVER) { std::cout << "cannot find a compatible Vulkan ICD\n"; exit(-1); } else if (res) { std::cout << "unknown error\n"; exit(-1); } PFN_vkCreateDebugReportCallbackEXT dbgCreateDebugReportCallback; PFN_vkDestroyDebugReportCallbackEXT dbgDestroyDebugReportCallback; VkDebugReportCallbackEXT debug_report_callback; dbgCreateDebugReportCallback = (PFN_vkCreateDebugReportCallbackEXT)vkGetInstanceProcAddr( inst, "vkCreateDebugReportCallbackEXT"); if (!dbgCreateDebugReportCallback) { std::cout << "GetInstanceProcAddr: Unable to find " "vkCreateDebugReportCallbackEXT function." << std::endl; exit(1); } dbgDestroyDebugReportCallback = (PFN_vkDestroyDebugReportCallbackEXT)vkGetInstanceProcAddr( inst, "vkDestroyDebugReportCallbackEXT"); if (!dbgDestroyDebugReportCallback) { std::cout << "GetInstanceProcAddr: Unable to find " "vkDestroyDebugReportCallbackEXT function." << std::endl; exit(1); } VkDebugReportCallbackCreateInfoEXT create_info = {}; create_info.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT; create_info.pNext = NULL; create_info.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT; create_info.pfnCallback = dbgFunc; create_info.pUserData = NULL; res = dbgCreateDebugReportCallback(inst, &create_info, NULL, &debug_report_callback); switch (res) { case VK_SUCCESS: break; case VK_ERROR_OUT_OF_HOST_MEMORY: std::cout << "dbgCreateDebugReportCallback: out of host memory\n" << std::endl; exit(1); break; default: std::cout << "dbgCreateDebugReportCallback: unknown failure\n" << std::endl; exit(1); break; } /* Clean up callback */ dbgDestroyDebugReportCallback(inst, debug_report_callback, NULL); vkDestroyInstance(inst, NULL); /* VULKAN_KEY_END */ return 0; }
int sample_main(int argc, char *argv[]) { struct sample_info info = {}; init_global_layer_properties(info); init_instance(info, "vulkansamples_device"); init_enumerate_device(info); /* VULKAN_KEY_START */ VkDeviceQueueCreateInfo queue_info = {}; vkGetPhysicalDeviceQueueFamilyProperties(info.gpus[0], &info.queue_count, NULL); assert(info.queue_count >= 1); info.queue_props.resize(info.queue_count); vkGetPhysicalDeviceQueueFamilyProperties(info.gpus[0], &info.queue_count, info.queue_props.data()); assert(info.queue_count >= 1); bool found = false; for (unsigned int i = 0; i < info.queue_count; i++) { if (info.queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) { queue_info.queueFamilyIndex = i; found = true; break; } } assert(found); assert(info.queue_count >= 1); float queue_priorities[1] = {0.0}; queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; queue_info.pNext = NULL; queue_info.queueCount = 1; queue_info.pQueuePriorities = queue_priorities; VkDeviceCreateInfo device_info = {}; device_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; device_info.pNext = NULL; device_info.queueCreateInfoCount = 1; device_info.pQueueCreateInfos = &queue_info; device_info.enabledExtensionCount = 0; device_info.ppEnabledExtensionNames = NULL; device_info.enabledLayerCount = 0; device_info.ppEnabledLayerNames = NULL; device_info.pEnabledFeatures = NULL; VkDevice device; VkResult U_ASSERT_ONLY res = vkCreateDevice(info.gpus[0], &device_info, NULL, &device); assert(res == VK_SUCCESS); vkDestroyDevice(device, NULL); /* VULKAN_KEY_END */ destroy_instance(info); return 0; }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "Events"; init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); /* VULKAN_KEY_START */ // Start with a trivial command buffer and make sure fence wait doesn't time out info.viewport.height = 10.0; info.viewport.width = 10.0; info.viewport.minDepth = (float)0.0f; info.viewport.maxDepth = (float)1.0f; info.viewport.x = 0; info.viewport.y = 0; vkCmdSetViewport(info.cmd, 0, NUM_VIEWPORTS, &info.viewport); execute_end_command_buffer(info); VkFence fence; VkFenceCreateInfo fenceInfo; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = 0; vkCreateFence(info.device, &fenceInfo, NULL, &fence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; const VkCommandBuffer cmd_bufs[] = {info.cmd}; VkSubmitInfo submit_info[1] = {}; submit_info[0].pNext = NULL; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].waitSemaphoreCount = 0; submit_info[0].pWaitSemaphores = NULL; submit_info[0].pWaitDstStageMask = &pipe_stage_flags; submit_info[0].commandBufferCount = 1; submit_info[0].pCommandBuffers = cmd_bufs; submit_info[0].signalSemaphoreCount = 0; submit_info[0].pSignalSemaphores = NULL; res = vkQueueSubmit(info.graphics_queue, 1, submit_info, fence); assert(res == VK_SUCCESS); // Make sure timeout is long enough for a simple command buffer without // waiting for an event int timeouts = -1; do { res = vkWaitForFences(info.device, 1, &fence, VK_TRUE, FENCE_TIMEOUT); timeouts++; } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); if (timeouts != 0) { std::cout << "Unsuitable timeout value, exiting\n"; exit(-1); } vkResetCommandBuffer(info.cmd, 0); // Now create an event and wait for it on the GPU VkEvent event; VkEventCreateInfo eventInfo = {}; eventInfo.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; eventInfo.pNext = NULL; eventInfo.flags = 0; vkCreateEvent(info.device, &eventInfo, NULL, &event); execute_begin_command_buffer(info); vkCmdWaitEvents(info.cmd, 1, &event, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, nullptr, 0, nullptr,0, nullptr); execute_end_command_buffer(info); vkResetFences(info.device, 1, &fence); // Note that stepping through this code in the debugger is a bad idea because the // GPU can TDR waiting for the event. Execute the code from vkQueueSubmit through // vkSetEvent without breakpoints pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; res = vkQueueSubmit(info.graphics_queue, 1, submit_info, fence); assert(res == VK_SUCCESS); // We should timeout waiting for the fence because the GPU should be waiting // on the event res = vkWaitForFences(info.device, 1, &fence, VK_TRUE, FENCE_TIMEOUT); if (res != VK_TIMEOUT) { std::cout << "Didn't get expected timeout in vkWaitForFences, exiting\n"; exit(-1); } // Set the event from the CPU and wait for the fence. This should succeed // since we set the event vkSetEvent(info.device, event); do { res = vkWaitForFences(info.device, 1, &fence, VK_TRUE, FENCE_TIMEOUT); } while ( res == VK_TIMEOUT); assert(res == VK_SUCCESS); vkResetCommandBuffer(info.cmd, 0); vkResetFences(info.device, 1, &fence); vkResetEvent(info.device,event); // Now set the event from the GPU and wait on the CPU execute_begin_command_buffer(info); vkCmdSetEvent(info.cmd, event, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT); execute_end_command_buffer(info); // Look for the event on the CPU. It should be RESET since we haven't sent // the command buffer yet. res = vkGetEventStatus(info.device, event); assert(res == VK_EVENT_RESET); // Send the command buffer and loop waiting for the event res = vkQueueSubmit(info.graphics_queue, 1, submit_info, fence); assert(res == VK_SUCCESS); int polls = 0; do { res = vkGetEventStatus(info.device, event); polls++; } while (res != VK_EVENT_SET); printf ("%d polls to find the event set\n", polls); do { res = vkWaitForFences(info.device, 1, &fence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); vkDestroyEvent(info.device, event, NULL); vkDestroyFence(info.device, fence, NULL); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_instance(info); return 0; }
int sample_main() { VkResult U_ASSERT_ONLY res; char sample_title[] = "MT Cmd Buffer Sample"; const bool depthPresent = false; init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); VkSemaphoreCreateInfo presentCompleteSemaphoreCreateInfo; presentCompleteSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; presentCompleteSemaphoreCreateInfo.pNext = NULL; presentCompleteSemaphoreCreateInfo.flags = 0; res = vkCreateSemaphore(info.device, &presentCompleteSemaphoreCreateInfo, NULL, &info.presentCompleteSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, info.presentCompleteSemaphore, NULL, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); VkPipelineLayoutCreateInfo pPipelineLayoutCreateInfo = {}; pPipelineLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pPipelineLayoutCreateInfo.pNext = NULL; pPipelineLayoutCreateInfo.pushConstantRangeCount = 0; pPipelineLayoutCreateInfo.pPushConstantRanges = NULL; pPipelineLayoutCreateInfo.setLayoutCount = 0; pPipelineLayoutCreateInfo.pSetLayouts = NULL; res = vkCreatePipelineLayout(info.device, &pPipelineLayoutCreateInfo, NULL, &info.pipeline_layout); assert(res == VK_SUCCESS); init_renderpass( info, depthPresent, false); // Can't clear in renderpass load because we re-use pipeline init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, depthPresent); /* The binding and attributes should be the same for all 3 vertex buffers, * so init here */ info.vi_binding.binding = 0; info.vi_binding.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; info.vi_binding.stride = sizeof(triData[0]); info.vi_attribs[0].binding = 0; info.vi_attribs[0].location = 0; info.vi_attribs[0].format = VK_FORMAT_R32G32B32A32_SFLOAT; info.vi_attribs[0].offset = 0; info.vi_attribs[1].binding = 0; info.vi_attribs[1].location = 1; info.vi_attribs[1].format = VK_FORMAT_R32G32B32A32_SFLOAT; info.vi_attribs[1].offset = 16; init_pipeline_cache(info); init_pipeline(info, depthPresent); VkImageSubresourceRange srRange = {}; srRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; srRange.baseMipLevel = 0; srRange.levelCount = VK_REMAINING_MIP_LEVELS; srRange.baseArrayLayer = 0; srRange.layerCount = VK_REMAINING_ARRAY_LAYERS; VkClearColorValue clear_color[1]; clear_color[0].float32[0] = 0.2f; clear_color[0].float32[1] = 0.2f; clear_color[0].float32[2] = 0.2f; clear_color[0].float32[3] = 0.2f; /* We need to do the clear here instead of as a load op since all 3 threads * share the same pipeline / renderpass */ set_image_layout(info, info.buffers[info.current_buffer].image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); vkCmdClearColorImage(info.cmd, info.buffers[info.current_buffer].image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, clear_color, 1, &srRange); set_image_layout(info, info.buffers[info.current_buffer].image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); res = vkEndCommandBuffer(info.cmd); const VkCommandBuffer cmd_bufs[] = {info.cmd}; VkFence clearFence; init_fence(info, clearFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo submit_info[1] = {}; submit_info[0].pNext = NULL; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].waitSemaphoreCount = 1; submit_info[0].pWaitSemaphores = &info.presentCompleteSemaphore; submit_info[0].pWaitDstStageMask = NULL; submit_info[0].commandBufferCount = 1; submit_info[0].pCommandBuffers = cmd_bufs; submit_info[0].signalSemaphoreCount = 0; submit_info[0].pSignalSemaphores = NULL; /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, submit_info, clearFence); assert(!res); do { res = vkWaitForFences(info.device, 1, &clearFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); vkDestroyFence(info.device, clearFence, NULL); /* VULKAN_KEY_START */ /* Use the fourth slot in the command buffer array for the presentation */ /* barrier using the command buffer in info */ threadCmdBufs[3] = info.cmd; sample_platform_thread vk_threads[3]; for (size_t i = 0; i < 3; i++) { sample_platform_thread_create(&vk_threads[i], &per_thread_code, (void *)i); } VkCommandBufferBeginInfo cmd_buf_info = {}; cmd_buf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cmd_buf_info.pNext = NULL; cmd_buf_info.flags = 0; cmd_buf_info.pInheritanceInfo = NULL; res = vkBeginCommandBuffer(threadCmdBufs[3], &cmd_buf_info); assert(res == VK_SUCCESS); VkImageMemoryBarrier prePresentBarrier = {}; prePresentBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; prePresentBarrier.pNext = NULL; prePresentBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; prePresentBarrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT; prePresentBarrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; prePresentBarrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; prePresentBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; prePresentBarrier.subresourceRange.baseMipLevel = 0; prePresentBarrier.subresourceRange.levelCount = 1; prePresentBarrier.subresourceRange.baseArrayLayer = 0; prePresentBarrier.subresourceRange.layerCount = 1; prePresentBarrier.image = info.buffers[info.current_buffer].image; vkCmdPipelineBarrier(threadCmdBufs[3], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, NULL, 0, NULL, 1, &prePresentBarrier); res = vkEndCommandBuffer(threadCmdBufs[3]); assert(res == VK_SUCCESS); pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; submit_info[0].pNext = NULL; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].waitSemaphoreCount = 0; submit_info[0].pWaitSemaphores = NULL; submit_info[0].pWaitDstStageMask = &pipe_stage_flags; submit_info[0].commandBufferCount = 4; /* 3 from threads + prePresentBarrier */ submit_info[0].pCommandBuffers = threadCmdBufs; submit_info[0].signalSemaphoreCount = 0; submit_info[0].pSignalSemaphores = NULL; /* Wait for all of the threads to finish */ for (int i = 0; i < 3; i++) { sample_platform_thread_join(vk_threads[i], NULL); } VkFenceCreateInfo fenceInfo; VkFence drawFence; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = 0; vkCreateFence(info.device, &fenceInfo, NULL, &drawFence); /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, submit_info, drawFence); assert(!res); /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); execute_present_image(info); wait_seconds(1); /* VULKAN_KEY_END */ vkDestroyBuffer(info.device, vertex_buffer[0].buf, NULL); vkDestroyBuffer(info.device, vertex_buffer[1].buf, NULL); vkDestroyBuffer(info.device, vertex_buffer[2].buf, NULL); vkFreeMemory(info.device, vertex_buffer[0].mem, NULL); vkFreeMemory(info.device, vertex_buffer[1].mem, NULL); vkFreeMemory(info.device, vertex_buffer[2].mem, NULL); for (int i = 0; i < 3; i++) { vkFreeCommandBuffers(info.device, threadCmdPools[i], 1, &threadCmdBufs[i]); vkDestroyCommandPool(info.device, threadCmdPools[i], NULL); } vkDestroySemaphore(info.device, info.presentCompleteSemaphore, NULL); vkDestroyFence(info.device, drawFence, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); vkDestroyPipelineLayout(info.device, info.pipeline_layout, NULL); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_window(info); destroy_device(info); destroy_instance(info); return 0; }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; bool U_ASSERT_ONLY pass; struct sample_info info = {}; char sample_title[] = "Depth Buffer Sample"; /* * Make a depth buffer: * - Create an Image to be the depth buffer * - Find memory requirements * - Allocate and bind memory * - Set the image layout * - Create an attachment view */ init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); /* VULKAN_KEY_START */ VkImageCreateInfo image_info = {}; const VkFormat depth_format = VK_FORMAT_D16_UNORM; VkFormatProperties props; vkGetPhysicalDeviceFormatProperties(info.gpus[0], depth_format, &props); if (props.linearTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) { image_info.tiling = VK_IMAGE_TILING_LINEAR; } else if (props.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) { image_info.tiling = VK_IMAGE_TILING_OPTIMAL; } else { /* Try other depth formats? */ std::cout << "VK_FORMAT_D16_UNORM Unsupported.\n"; exit(-1); } image_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_info.pNext = NULL; image_info.imageType = VK_IMAGE_TYPE_2D; image_info.format = depth_format; image_info.extent.width = info.width; image_info.extent.height = info.height; image_info.extent.depth = 1; image_info.mipLevels = 1; image_info.arrayLayers = 1; image_info.samples = NUM_SAMPLES; image_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; image_info.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; image_info.queueFamilyIndexCount = 0; image_info.pQueueFamilyIndices = NULL; image_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; image_info.flags = 0; VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = 0; mem_alloc.memoryTypeIndex = 0; VkImageViewCreateInfo view_info = {}; view_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; view_info.pNext = NULL; view_info.image = VK_NULL_HANDLE; view_info.format = depth_format; view_info.components.r = VK_COMPONENT_SWIZZLE_R; view_info.components.g = VK_COMPONENT_SWIZZLE_G; view_info.components.b = VK_COMPONENT_SWIZZLE_B; view_info.components.a = VK_COMPONENT_SWIZZLE_A; view_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; view_info.subresourceRange.baseMipLevel = 0; view_info.subresourceRange.levelCount = 1; view_info.subresourceRange.baseArrayLayer = 0; view_info.subresourceRange.layerCount = 1; view_info.viewType = VK_IMAGE_VIEW_TYPE_2D; view_info.flags = 0; VkMemoryRequirements mem_reqs; info.depth.format = depth_format; /* Create image */ res = vkCreateImage(info.device, &image_info, NULL, &info.depth.image); assert(res == VK_SUCCESS); vkGetImageMemoryRequirements(info.device, info.depth.image, &mem_reqs); mem_alloc.allocationSize = mem_reqs.size; /* Use the memory properties to determine the type of memory required */ pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, 0, /* No Requirements */ &mem_alloc.memoryTypeIndex); assert(pass); /* Allocate memory */ res = vkAllocateMemory(info.device, &mem_alloc, NULL, &info.depth.mem); assert(res == VK_SUCCESS); /* Bind memory */ res = vkBindImageMemory(info.device, info.depth.image, info.depth.mem, 0); assert(res == VK_SUCCESS); /* Set the image layout to depth stencil optimal */ set_image_layout(info, info.depth.image, VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL); /* Create image view */ view_info.image = info.depth.image; res = vkCreateImageView(info.device, &view_info, NULL, &info.depth.view); assert(res == VK_SUCCESS); execute_end_command_buffer(info); execute_queue_command_buffer(info); /* VULKAN_KEY_END */ /* Clean Up */ vkDestroyImageView(info.device, info.depth.view, NULL); vkDestroyImage(info.device, info.depth.image, NULL); vkFreeMemory(info.device, info.depth.mem, NULL); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; bool U_ASSERT_ONLY pass; struct sample_info info = {}; char sample_title[] = "Texture Initialization Sample"; init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_connection(info); init_window_size(info, 50, 50); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); /* VULKAN_KEY_START */ /* * Set up textures: * - Create a linear tiled image * - Map it and write the texture data into it * - If linear images cannot be used as textures, create an optimally * tiled image and blit from the linearly tiled image to the optimally * tiled image * - * - * - */ struct texture_object texObj; std::string filename = get_base_data_dir(); filename.append("lunarg.ppm"); if (!read_ppm(filename.c_str(), texObj.tex_width, texObj.tex_height, 0, NULL)) { std::cout << "Could not read texture file lunarg.ppm\n"; exit(-1); } VkFormatProperties formatProps; vkGetPhysicalDeviceFormatProperties(info.gpus[0], VK_FORMAT_R8G8B8A8_UNORM, &formatProps); /* See if we can use a linear tiled image for a texture, if not, we will * need a staging image for the texture data */ bool needStaging = (!(formatProps.linearTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) ? true : false; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM; image_create_info.extent.width = texObj.tex_width; image_create_info.extent.height = texObj.tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = NUM_SAMPLES; image_create_info.tiling = VK_IMAGE_TILING_LINEAR; image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; image_create_info.usage = needStaging ? VK_IMAGE_USAGE_TRANSFER_SRC_BIT : VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.queueFamilyIndexCount = 0; image_create_info.pQueueFamilyIndices = NULL; image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; image_create_info.flags = 0; VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = 0; mem_alloc.memoryTypeIndex = 0; VkImage mappableImage; VkDeviceMemory mappableMemory; VkMemoryRequirements mem_reqs; /* Create a mappable image. It will be the texture if linear images are ok * to be textures or it will be the staging image if they are not. */ res = vkCreateImage(info.device, &image_create_info, NULL, &mappableImage); assert(res == VK_SUCCESS); vkGetImageMemoryRequirements(info.device, mappableImage, &mem_reqs); mem_alloc.allocationSize = mem_reqs.size; /* Find the memory type that is host mappable */ pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &mem_alloc.memoryTypeIndex); assert(pass); /* allocate memory */ res = vkAllocateMemory(info.device, &mem_alloc, NULL, &(mappableMemory)); assert(res == VK_SUCCESS); /* bind memory */ res = vkBindImageMemory(info.device, mappableImage, mappableMemory, 0); assert(res == VK_SUCCESS); VkImageSubresource subres = {}; subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subres.mipLevel = 0; subres.arrayLayer = 0; VkSubresourceLayout layout; void *data; /* Get the subresource layout so we know what the row pitch is */ vkGetImageSubresourceLayout(info.device, mappableImage, &subres, &layout); res = vkMapMemory(info.device, mappableMemory, 0, mem_reqs.size, 0, &data); assert(res == VK_SUCCESS); /* Read the ppm file into the mappable image's memory */ if (!read_ppm(filename.c_str(), texObj.tex_width, texObj.tex_height, layout.rowPitch, (unsigned char *)data)) { std::cout << "Could not load texture file lunarg.ppm\n"; exit(-1); } vkUnmapMemory(info.device, mappableMemory); if (!needStaging) { /* If we can use the linear tiled image as a texture, just do it */ texObj.image = mappableImage; texObj.mem = mappableMemory; texObj.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; set_image_layout(info, texObj.image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_PREINITIALIZED, texObj.imageLayout); } else { /* The mappable image cannot be our texture, so create an optimally * tiled image and blit to it */ image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; res = vkCreateImage(info.device, &image_create_info, NULL, &texObj.image); assert(res == VK_SUCCESS); vkGetImageMemoryRequirements(info.device, texObj.image, &mem_reqs); mem_alloc.allocationSize = mem_reqs.size; /* Find memory type - don't specify any mapping requirements */ pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, 0, &mem_alloc.memoryTypeIndex); assert(pass); /* allocate memory */ res = vkAllocateMemory(info.device, &mem_alloc, NULL, &texObj.mem); assert(res == VK_SUCCESS); /* bind memory */ res = vkBindImageMemory(info.device, texObj.image, texObj.mem, 0); assert(res == VK_SUCCESS); /* Since we're going to blit from the mappable image, set its layout to * SOURCE_OPTIMAL */ /* Side effect is that this will create info.cmd */ set_image_layout(info, mappableImage, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_PREINITIALIZED, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); /* Since we're going to blit to the texture image, set its layout to * DESTINATION_OPTIMAL */ set_image_layout(info, texObj.image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); VkImageCopy copy_region; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.srcOffset.x = 0; copy_region.srcOffset.y = 0; copy_region.srcOffset.z = 0; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.mipLevel = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.dstSubresource.layerCount = 1; copy_region.dstOffset.x = 0; copy_region.dstOffset.y = 0; copy_region.dstOffset.z = 0; copy_region.extent.width = texObj.tex_width; copy_region.extent.height = texObj.tex_height; copy_region.extent.depth = 1; /* Put the copy command into the command buffer */ vkCmdCopyImage(info.cmd, mappableImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, texObj.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ©_region); /* Set the layout for the texture image from DESTINATION_OPTIMAL to * SHADER_READ_ONLY */ texObj.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; set_image_layout(info, texObj.image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, texObj.imageLayout); } execute_end_command_buffer(info); execute_queue_command_buffer(info); VkSamplerCreateInfo samplerCreateInfo = {}; samplerCreateInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; samplerCreateInfo.magFilter = VK_FILTER_NEAREST; samplerCreateInfo.minFilter = VK_FILTER_NEAREST; samplerCreateInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST; samplerCreateInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; samplerCreateInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; samplerCreateInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; samplerCreateInfo.mipLodBias = 0.0; samplerCreateInfo.anisotropyEnable = VK_FALSE, samplerCreateInfo.maxAnisotropy = 0; samplerCreateInfo.compareEnable = VK_FALSE; samplerCreateInfo.compareOp = VK_COMPARE_OP_NEVER; samplerCreateInfo.minLod = 0.0; samplerCreateInfo.maxLod = 0.0; samplerCreateInfo.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE; /* create sampler */ res = vkCreateSampler(info.device, &samplerCreateInfo, NULL, &texObj.sampler); assert(res == VK_SUCCESS); VkImageViewCreateInfo view_info = {}; view_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; view_info.pNext = NULL; view_info.image = VK_NULL_HANDLE; view_info.viewType = VK_IMAGE_VIEW_TYPE_2D; view_info.format = VK_FORMAT_R8G8B8A8_UNORM; view_info.components.r = VK_COMPONENT_SWIZZLE_R; view_info.components.g = VK_COMPONENT_SWIZZLE_G; view_info.components.b = VK_COMPONENT_SWIZZLE_B; view_info.components.a = VK_COMPONENT_SWIZZLE_A; view_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; view_info.subresourceRange.baseMipLevel = 0; view_info.subresourceRange.levelCount = 1; view_info.subresourceRange.baseArrayLayer = 0; view_info.subresourceRange.layerCount = 1; /* create image view */ view_info.image = texObj.image; res = vkCreateImageView(info.device, &view_info, NULL, &texObj.view); assert(res == VK_SUCCESS); info.textures.push_back(texObj); /* VULKAN_KEY_END */ /* Clean Up */ vkDestroySampler(info.device, texObj.sampler, NULL); vkDestroyImageView(info.device, texObj.view, NULL); vkDestroyImage(info.device, texObj.image, NULL); vkFreeMemory(info.device, texObj.mem, NULL); if (needStaging) { /* Release the resources for the staging image */ vkFreeMemory(info.device, mappableMemory, NULL); vkDestroyImage(info.device, mappableImage, NULL); } destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "Pipeline Cache"; const bool depthPresent = true; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_texture(info, "blue.ppm"); init_uniform_buffer(info); init_descriptor_and_pipeline_layouts(info, true); init_renderpass(info, depthPresent); init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, depthPresent); init_vertex_buffer(info, g_vb_texture_Data, sizeof(g_vb_texture_Data), sizeof(g_vb_texture_Data[0]), true); init_descriptor_pool(info, true); init_descriptor_set(info, true); /* VULKAN_KEY_START */ // Check disk for existing cache data size_t startCacheSize = 0; void *startCacheData = nullptr; std::string directoryName = get_file_directory(); std::string readFileName = directoryName + "pipeline_cache_data.bin"; FILE *pReadFile = fopen(readFileName.c_str(), "rb"); if (pReadFile) { // Determine cache size fseek(pReadFile, 0, SEEK_END); startCacheSize = ftell(pReadFile); rewind(pReadFile); // Allocate memory to hold the initial cache data startCacheData = (char *)malloc(sizeof(char) * startCacheSize); if (startCacheData == nullptr) { fputs("Memory error", stderr); exit(EXIT_FAILURE); } // Read the data into our buffer size_t result = fread(startCacheData, 1, startCacheSize, pReadFile); if (result != startCacheSize) { fputs("Reading error", stderr); free(startCacheData); exit(EXIT_FAILURE); } // Clean up and print results fclose(pReadFile); printf(" Pipeline cache HIT!\n"); printf(" cacheData loaded from %s\n", readFileName.c_str()); } else { // No cache found on disk printf(" Pipeline cache miss!\n"); } if (startCacheData != nullptr) { // clang-format off // // Check for cache validity // // TODO: Update this as the spec evolves. The fields are not defined by the header. // // The code below supports SDK 0.10 Vulkan spec, which contains the following table: // // Offset Size Meaning // ------ ------------ ------------------------------------------------------------------ // 0 4 a device ID equal to VkPhysicalDeviceProperties::DeviceId written // as a stream of bytes, with the least significant byte first // // 4 VK_UUID_SIZE a pipeline cache ID equal to VkPhysicalDeviceProperties::pipelineCacheUUID // // // The code must be updated for latest Vulkan spec, which contains the following table: // // Offset Size Meaning // ------ ------------ ------------------------------------------------------------------ // 0 4 length in bytes of the entire pipeline cache header written as a // stream of bytes, with the least significant byte first // 4 4 a VkPipelineCacheHeaderVersion value written as a stream of bytes, // with the least significant byte first // 8 4 a vendor ID equal to VkPhysicalDeviceProperties::vendorID written // as a stream of bytes, with the least significant byte first // 12 4 a device ID equal to VkPhysicalDeviceProperties::deviceID written // as a stream of bytes, with the least significant byte first // 16 VK_UUID_SIZE a pipeline cache ID equal to VkPhysicalDeviceProperties::pipelineCacheUUID // // clang-format on uint32_t headerLength = 0; uint32_t cacheHeaderVersion = 0; uint32_t vendorID = 0; uint32_t deviceID = 0; uint8_t pipelineCacheUUID[VK_UUID_SIZE] = {}; memcpy(&headerLength, (uint8_t *)startCacheData + 0, 4); memcpy(&cacheHeaderVersion, (uint8_t *)startCacheData + 4, 4); memcpy(&vendorID, (uint8_t *)startCacheData + 8, 4); memcpy(&deviceID, (uint8_t *)startCacheData + 12, 4); memcpy(pipelineCacheUUID, (uint8_t *)startCacheData + 16, VK_UUID_SIZE); // Check each field and report bad values before freeing existing cache bool badCache = false; if (headerLength <= 0) { badCache = true; printf(" Bad header length in %s.\n", readFileName.c_str()); printf(" Cache contains: 0x%.8x\n", headerLength); } if (cacheHeaderVersion != VK_PIPELINE_CACHE_HEADER_VERSION_ONE) { badCache = true; printf(" Unsupported cache header version in %s.\n", readFileName.c_str()); printf(" Cache contains: 0x%.8x\n", cacheHeaderVersion); } if (vendorID != info.gpu_props.vendorID) { badCache = true; printf(" Vendor ID mismatch in %s.\n", readFileName.c_str()); printf(" Cache contains: 0x%.8x\n", vendorID); printf(" Driver expects: 0x%.8x\n", info.gpu_props.vendorID); } if (deviceID != info.gpu_props.deviceID) { badCache = true; printf(" Device ID mismatch in %s.\n", readFileName.c_str()); printf(" Cache contains: 0x%.8x\n", deviceID); printf(" Driver expects: 0x%.8x\n", info.gpu_props.deviceID); } if (memcmp(pipelineCacheUUID, info.gpu_props.pipelineCacheUUID, sizeof(pipelineCacheUUID)) != 0) { badCache = true; printf(" UUID mismatch in %s.\n", readFileName.c_str()); printf(" Cache contains: "); print_UUID(pipelineCacheUUID); printf("\n"); printf(" Driver expects: "); print_UUID(info.gpu_props.pipelineCacheUUID); printf("\n"); } if (badCache) { // Don't submit initial cache data if any version info is incorrect free(startCacheData); startCacheSize = 0; startCacheData = nullptr; // And clear out the old cache file for use in next run printf(" Deleting cache entry %s to repopulate.\n", readFileName.c_str()); if (remove(readFileName.c_str()) != 0) { fputs("Reading error", stderr); exit(EXIT_FAILURE); } } } // Feed the initial cache data into pipeline creation VkPipelineCacheCreateInfo pipelineCache; pipelineCache.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; pipelineCache.pNext = NULL; pipelineCache.initialDataSize = startCacheSize; pipelineCache.pInitialData = startCacheData; pipelineCache.flags = 0; res = vkCreatePipelineCache(info.device, &pipelineCache, nullptr, &info.pipelineCache); assert(res == VK_SUCCESS); // Free our initialData now that pipeline has been created free(startCacheData); // Time (roughly) taken to create the graphics pipeline timestamp_t start = get_milliseconds(); init_pipeline(info, depthPresent); timestamp_t elapsed = get_milliseconds() - start; printf(" vkCreateGraphicsPipeline time: %0.f ms\n", (double)elapsed); // Begin standard draw stuff init_presentable_image(info); VkClearValue clear_values[2]; init_clear_color_and_depth(info, clear_values); VkRenderPassBeginInfo rp_begin; init_render_pass_begin_info(info, rp_begin); rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 0, NULL); const VkDeviceSize offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, offsets); init_viewports(info); init_scissors(info); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); execute_pre_present_barrier(info); res = vkEndCommandBuffer(info.cmd); assert(res == VK_SUCCESS); VkFence drawFence = {}; init_fence(info, drawFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo submit_info = {}; init_submit_info(info, submit_info, pipe_stage_flags); /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, &submit_info, drawFence); assert(res == VK_SUCCESS); /* Now present the image in the window */ VkPresentInfoKHR present = {}; init_present_info(info, present); /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); if (info.save_images) write_ppm(info, "pipeline_cache"); // End standard draw stuff if (startCacheData) { // TODO: Create another pipeline, preferably different from the first // one and merge it here. Then store the merged one. } // Store away the cache that we've populated. This could conceivably happen // earlier, depends on when the pipeline cache stops being populated // internally. size_t endCacheSize = 0; void *endCacheData = nullptr; // Call with nullptr to get cache size res = vkGetPipelineCacheData(info.device, info.pipelineCache, &endCacheSize, nullptr); assert(res == VK_SUCCESS); // Allocate memory to hold the populated cache data endCacheData = (char *)malloc(sizeof(char) * endCacheSize); if (!endCacheData) { fputs("Memory error", stderr); exit(EXIT_FAILURE); } // Call again with pointer to buffer res = vkGetPipelineCacheData(info.device, info.pipelineCache, &endCacheSize, endCacheData); assert(res == VK_SUCCESS); // Write the file to disk, overwriting whatever was there FILE *pWriteFile; std::string writeFileName = directoryName + "pipeline_cache_data.bin"; pWriteFile = fopen(writeFileName.c_str(), "wb"); if (pWriteFile) { fwrite(endCacheData, sizeof(char), endCacheSize, pWriteFile); fclose(pWriteFile); printf(" cacheData written to %s\n", writeFileName.c_str()); } else { // Something bad happened printf(" Unable to write cache data to disk!\n"); } /* VULKAN_KEY_END */ vkDestroyFence(info.device, drawFence, NULL); vkDestroySemaphore(info.device, info.presentCompleteSemaphore, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_textures(info); destroy_descriptor_pool(info); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; bool U_ASSERT_ONLY pass; struct sample_info info = {}; char sample_title[] = "Vertex Buffer Sample"; const bool depthPresent = true; init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_renderpass(info, depthPresent); init_framebuffers(info, depthPresent); /* VULKAN_KEY_START */ /* * Set up a vertex buffer: * - Create a buffer * - Map it and write the vertex data into it * - Bind it using vkCmdBindVertexBuffers * - Later, at pipeline creation, * - fill in vertex input part of the pipeline with relevent data */ VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; buf_info.size = sizeof(g_vb_solid_face_colors_Data); buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; res = vkCreateBuffer(info.device, &buf_info, NULL, &info.vertex_buffer.buf); assert(res == VK_SUCCESS); VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(info.device, info.vertex_buffer.buf, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; alloc_info.allocationSize = mem_reqs.size; pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &alloc_info.memoryTypeIndex); assert(pass && "No mappable, coherent memory"); res = vkAllocateMemory(info.device, &alloc_info, NULL, &(info.vertex_buffer.mem)); assert(res == VK_SUCCESS); uint8_t *pData; res = vkMapMemory(info.device, info.vertex_buffer.mem, 0, mem_reqs.size, 0, (void **)&pData); assert(res == VK_SUCCESS); memcpy(pData, g_vb_solid_face_colors_Data, sizeof(g_vb_solid_face_colors_Data)); vkUnmapMemory(info.device, info.vertex_buffer.mem); res = vkBindBufferMemory(info.device, info.vertex_buffer.buf, info.vertex_buffer.mem, 0); assert(res == VK_SUCCESS); /* We won't use these here, but we will need this info when creating the * pipeline */ info.vi_binding.binding = 0; info.vi_binding.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; info.vi_binding.stride = sizeof(g_vb_solid_face_colors_Data[0]); info.vi_attribs[0].binding = 0; info.vi_attribs[0].location = 0; info.vi_attribs[0].format = VK_FORMAT_R32G32B32A32_SFLOAT; info.vi_attribs[0].offset = 0; info.vi_attribs[1].binding = 0; info.vi_attribs[1].location = 1; info.vi_attribs[1].format = VK_FORMAT_R32G32B32A32_SFLOAT; info.vi_attribs[1].offset = 16; const VkDeviceSize offsets[1] = {0}; /* We cannot bind the vertex buffer until we begin a renderpass */ VkClearValue clear_values[2]; clear_values[0].color.float32[0] = 0.2f; clear_values[0].color.float32[1] = 0.2f; clear_values[0].color.float32[2] = 0.2f; clear_values[0].color.float32[3] = 0.2f; clear_values[1].depthStencil.depth = 1.0f; clear_values[1].depthStencil.stencil = 0; VkSemaphore imageAcquiredSemaphore; VkSemaphoreCreateInfo imageAcquiredSemaphoreCreateInfo; imageAcquiredSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; imageAcquiredSemaphoreCreateInfo.pNext = NULL; imageAcquiredSemaphoreCreateInfo.flags = 0; res = vkCreateSemaphore(info.device, &imageAcquiredSemaphoreCreateInfo, NULL, &imageAcquiredSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, imageAcquiredSemaphore, VK_NULL_HANDLE, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); VkRenderPassBeginInfo rp_begin = {}; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = info.render_pass; rp_begin.framebuffer = info.framebuffers[info.current_buffer]; rp_begin.renderArea.offset.x = 0; rp_begin.renderArea.offset.y = 0; rp_begin.renderArea.extent.width = info.width; rp_begin.renderArea.extent.height = info.height; rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindVertexBuffers(info.cmd, 0, /* Start Binding */ 1, /* Binding Count */ &info.vertex_buffer.buf, /* pBuffers */ offsets); /* pOffsets */ vkCmdEndRenderPass(info.cmd); execute_end_command_buffer(info); execute_queue_command_buffer(info); /* VULKAN_KEY_END */ vkDestroySemaphore(info.device, imageAcquiredSemaphore, NULL); vkDestroyBuffer(info.device, info.vertex_buffer.buf, NULL); vkFreeMemory(info.device, info.vertex_buffer.mem, NULL); destroy_framebuffers(info); destroy_renderpass(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; bool U_ASSERT_ONLY pass; struct sample_info info = {}; char sample_title[] = "Texel Buffer Sample"; float texels[] = {1.0, 0.0, 1.0}; const bool depthPresent = false; const bool vertexPresent = false; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); if (info.gpu_props.limits.maxTexelBufferElements < 4) { std::cout << "maxTexelBufferElements too small\n"; exit(-1); } VkFormatProperties props; vkGetPhysicalDeviceFormatProperties(info.gpus[0], VK_FORMAT_R32_SFLOAT, &props); if (!(props.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT)) { std::cout << "R32_SFLOAT format unsupported for texel buffer\n"; exit(-1); } init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT; buf_info.size = sizeof(texels); buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; VkBuffer texelBuf; res = vkCreateBuffer(info.device, &buf_info, NULL, &texelBuf); assert(res == VK_SUCCESS); VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(info.device, texelBuf, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; alloc_info.allocationSize = mem_reqs.size; pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &alloc_info.memoryTypeIndex); assert(pass && "No mappable, coherent memory"); VkDeviceMemory texelMem; res = vkAllocateMemory(info.device, &alloc_info, NULL, &texelMem); assert(res == VK_SUCCESS); uint8_t *pData; res = vkMapMemory(info.device, texelMem, 0, mem_reqs.size, 0, (void **)&pData); assert(res == VK_SUCCESS); memcpy(pData, &texels, sizeof(texels)); vkUnmapMemory(info.device, texelMem); res = vkBindBufferMemory(info.device, texelBuf, texelMem, 0); assert(res == VK_SUCCESS); VkBufferView texel_view; VkBufferViewCreateInfo view_info = {}; view_info.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO; view_info.pNext = NULL; view_info.buffer = texelBuf; view_info.format = VK_FORMAT_R32_SFLOAT; view_info.offset = 0; view_info.range = sizeof(texels); vkCreateBufferView(info.device, &view_info, NULL, &texel_view); VkDescriptorBufferInfo texel_buffer_info = {}; texel_buffer_info.buffer = texelBuf; texel_buffer_info.offset = 0; texel_buffer_info.range = sizeof(texels); // init_descriptor_and_pipeline_layouts(info, false); VkDescriptorSetLayoutBinding layout_bindings[1]; layout_bindings[0].binding = 0; layout_bindings[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; layout_bindings[0].descriptorCount = 1; layout_bindings[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT; layout_bindings[0].pImmutableSamplers = NULL; /* Next take layout bindings and use them to create a descriptor set layout */ VkDescriptorSetLayoutCreateInfo descriptor_layout = {}; descriptor_layout.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; descriptor_layout.pNext = NULL; descriptor_layout.bindingCount = 1; descriptor_layout.pBindings = layout_bindings; info.desc_layout.resize(NUM_DESCRIPTOR_SETS); res = vkCreateDescriptorSetLayout(info.device, &descriptor_layout, NULL, info.desc_layout.data()); assert(res == VK_SUCCESS); /* Now use the descriptor layout to create a pipeline layout */ VkPipelineLayoutCreateInfo pPipelineLayoutCreateInfo = {}; pPipelineLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pPipelineLayoutCreateInfo.pNext = NULL; pPipelineLayoutCreateInfo.pushConstantRangeCount = 0; pPipelineLayoutCreateInfo.pPushConstantRanges = NULL; pPipelineLayoutCreateInfo.setLayoutCount = NUM_DESCRIPTOR_SETS; pPipelineLayoutCreateInfo.pSetLayouts = info.desc_layout.data(); res = vkCreatePipelineLayout(info.device, &pPipelineLayoutCreateInfo, NULL, &info.pipeline_layout); assert(res == VK_SUCCESS); init_renderpass(info, depthPresent); init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, depthPresent); VkDescriptorPoolSize type_count[1]; type_count[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; type_count[0].descriptorCount = 1; VkDescriptorPoolCreateInfo descriptor_pool = {}; descriptor_pool.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; descriptor_pool.pNext = NULL; descriptor_pool.maxSets = 1; descriptor_pool.poolSizeCount = 1; descriptor_pool.pPoolSizes = type_count; res = vkCreateDescriptorPool(info.device, &descriptor_pool, NULL, &info.desc_pool); assert(res == VK_SUCCESS); VkDescriptorSetAllocateInfo desc_alloc_info[1]; desc_alloc_info[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; desc_alloc_info[0].pNext = NULL; desc_alloc_info[0].descriptorPool = info.desc_pool; desc_alloc_info[0].descriptorSetCount = NUM_DESCRIPTOR_SETS; desc_alloc_info[0].pSetLayouts = info.desc_layout.data(); /* Allocate descriptor set with UNIFORM_BUFFER_DYNAMIC */ info.desc_set.resize(NUM_DESCRIPTOR_SETS); res = vkAllocateDescriptorSets(info.device, desc_alloc_info, info.desc_set.data()); assert(res == VK_SUCCESS); VkWriteDescriptorSet writes[1]; writes[0] = {}; writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; writes[0].dstSet = info.desc_set[0]; writes[0].dstBinding = 0; writes[0].descriptorCount = 1; writes[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; writes[0].pBufferInfo = &texel_buffer_info; writes[0].pTexelBufferView = &texel_view; writes[0].dstArrayElement = 0; vkUpdateDescriptorSets(info.device, 1, writes, 0, NULL); init_pipeline_cache(info); init_pipeline(info, depthPresent, vertexPresent); /* VULKAN_KEY_START */ VkClearValue clear_values[1]; clear_values[0].color.float32[0] = 0.2f; clear_values[0].color.float32[1] = 0.2f; clear_values[0].color.float32[2] = 0.2f; clear_values[0].color.float32[3] = 0.2f; VkSemaphoreCreateInfo imageAcquiredSemaphoreCreateInfo; imageAcquiredSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; imageAcquiredSemaphoreCreateInfo.pNext = NULL; imageAcquiredSemaphoreCreateInfo.flags = 0; res = vkCreateSemaphore(info.device, &imageAcquiredSemaphoreCreateInfo, NULL, &info.imageAcquiredSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, info.imageAcquiredSemaphore, VK_NULL_HANDLE, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); VkRenderPassBeginInfo rp_begin; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = info.render_pass; rp_begin.framebuffer = info.framebuffers[info.current_buffer]; rp_begin.renderArea.offset.x = 0; rp_begin.renderArea.offset.y = 0; rp_begin.renderArea.extent.width = info.width; rp_begin.renderArea.extent.height = info.height; rp_begin.clearValueCount = 1; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 0, NULL); init_viewports(info); init_scissors(info); vkCmdDraw(info.cmd, 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); res = vkEndCommandBuffer(info.cmd); const VkCommandBuffer cmd_bufs[] = {info.cmd}; VkFenceCreateInfo fenceInfo; VkFence drawFence; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = 0; vkCreateFence(info.device, &fenceInfo, NULL, &drawFence); execute_queue_cmdbuf(info, cmd_bufs, drawFence); do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); vkDestroyFence(info.device, drawFence, NULL); execute_present_image(info); wait_seconds(1); /* VULKAN_KEY_END */ if (info.save_images) write_ppm(info, "texel_buffer"); vkDestroySemaphore(info.device, info.imageAcquiredSemaphore, NULL); vkDestroyBufferView(info.device, texel_view, NULL); vkDestroyBuffer(info.device, texelBuf, NULL); vkFreeMemory(info.device, texelMem, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_descriptor_pool(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "Copy/Blit Image"; VkImageCreateInfo image_info; VkImage bltSrcImage; VkImage bltDstImage; VkMemoryRequirements memReq; VkMemoryAllocateInfo memAllocInfo; VkDeviceMemory dmem; unsigned char *pImgMem; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 640, 640); init_connection(info); init_window(info); init_swapchain_extension(info); VkSurfaceCapabilitiesKHR surfCapabilities; res = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(info.gpus[0], info.surface, &surfCapabilities); if (!(surfCapabilities.supportedUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) { std::cout << "Surface cannot be destination of blit - abort \n"; exit(-1); } init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT); /* VULKAN_KEY_START */ VkFormatProperties formatProps; vkGetPhysicalDeviceFormatProperties(info.gpus[0], info.format, &formatProps); assert( (formatProps.linearTilingFeatures & VK_FORMAT_FEATURE_BLIT_SRC_BIT) && "Format cannot be used as transfer source"); VkSemaphore presentCompleteSemaphore; VkSemaphoreCreateInfo presentCompleteSemaphoreCreateInfo; presentCompleteSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; presentCompleteSemaphoreCreateInfo.pNext = NULL; presentCompleteSemaphoreCreateInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT; res = vkCreateSemaphore(info.device, &presentCompleteSemaphoreCreateInfo, NULL, &presentCompleteSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, presentCompleteSemaphore, VK_NULL_HANDLE, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); // Create an image, map it, and write some values to the image image_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_info.pNext = NULL; image_info.imageType = VK_IMAGE_TYPE_2D; image_info.format = info.format; image_info.extent.width = info.width; image_info.extent.height = info.height; image_info.extent.depth = 1; image_info.mipLevels = 1; image_info.arrayLayers = 1; image_info.samples = NUM_SAMPLES; image_info.queueFamilyIndexCount = 0; image_info.pQueueFamilyIndices = NULL; image_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; image_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_info.flags = 0; image_info.tiling = VK_IMAGE_TILING_LINEAR; image_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; res = vkCreateImage(info.device, &image_info, NULL, &bltSrcImage); memAllocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memAllocInfo.pNext = NULL; vkGetImageMemoryRequirements(info.device, bltSrcImage, &memReq); bool pass = memory_type_from_properties(info, memReq.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &memAllocInfo.memoryTypeIndex); assert(pass); memAllocInfo.allocationSize = memReq.size; res = vkAllocateMemory(info.device, &memAllocInfo, NULL, &dmem); res = vkBindImageMemory(info.device, bltSrcImage, dmem, 0); set_image_layout(info, bltSrcImage, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL); res = vkEndCommandBuffer(info.cmd); assert(res == VK_SUCCESS); VkFence cmdFence; init_fence(info, cmdFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo submit_info = {}; submit_info.pNext = NULL; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.waitSemaphoreCount = 1; submit_info.pWaitSemaphores = &presentCompleteSemaphore; submit_info.pWaitDstStageMask = &pipe_stage_flags; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &info.cmd; submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = NULL; /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, &submit_info, cmdFence); assert(res == VK_SUCCESS); /* Make sure command buffer is finished before mapping */ do { res = vkWaitForFences(info.device, 1, &cmdFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); vkDestroyFence(info.device, cmdFence, NULL); res = vkMapMemory(info.device, dmem, 0, memReq.size, 0, (void **)&pImgMem); // Checkerboard of 8x8 pixel squares for (int row = 0; row < info.height; row++) { for (int col = 0; col < info.width; col++) { unsigned char rgb = (((row & 0x8) == 0) ^ ((col & 0x8) == 0)) * 255; pImgMem[0] = rgb; pImgMem[1] = rgb; pImgMem[2] = rgb; pImgMem[3] = 255; pImgMem += 4; } } // Flush the mapped memory and then unmap it Assume it isn't coherent since // we didn't really confirm VkMappedMemoryRange memRange; memRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; memRange.pNext = NULL; memRange.memory = dmem; memRange.offset = 0; memRange.size = memReq.size; res = vkFlushMappedMemoryRanges(info.device, 1, &memRange); vkUnmapMemory(info.device, dmem); vkResetCommandBuffer(info.cmd, 0); execute_begin_command_buffer(info); set_image_layout(info, bltSrcImage, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); bltDstImage = info.buffers[info.current_buffer].image; // init_swap_chain will create the images as color attachment optimal // but we want transfer dst optimal set_image_layout(info, bltDstImage, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); // Do a 32x32 blit to all of the dst image - should get big squares VkImageBlit region; region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; region.srcSubresource.mipLevel = 0; region.srcSubresource.baseArrayLayer = 0; region.srcSubresource.layerCount = 1; region.srcOffsets[0].x = 0; region.srcOffsets[0].y = 0; region.srcOffsets[0].z = 0; region.srcOffsets[1].x = 32; region.srcOffsets[1].y = 32; region.srcOffsets[1].z = 1; region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; region.dstSubresource.mipLevel = 0; region.dstSubresource.baseArrayLayer = 0; region.dstSubresource.layerCount = 1; region.dstOffsets[0].x = 0; region.dstOffsets[0].y = 0; region.dstOffsets[0].z = 0; region.dstOffsets[1].x = info.width; region.dstOffsets[1].y = info.height; region.dstOffsets[1].z = 1; vkCmdBlitImage(info.cmd, bltSrcImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, bltDstImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion, VK_FILTER_LINEAR); // Do a image copy to part of the dst image - checks should stay small VkImageCopy cregion; cregion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; cregion.srcSubresource.mipLevel = 0; cregion.srcSubresource.baseArrayLayer = 0; cregion.srcSubresource.layerCount = 1; cregion.srcOffset.x = 0; cregion.srcOffset.y = 0; cregion.srcOffset.z = 0; cregion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; cregion.dstSubresource.mipLevel = 0; cregion.dstSubresource.baseArrayLayer = 0; cregion.dstSubresource.layerCount = 1; cregion.dstOffset.x = 256; cregion.dstOffset.y = 256; cregion.dstOffset.z = 0; cregion.extent.width = 128; cregion.extent.height = 128; cregion.extent.depth = 1; vkCmdCopyImage(info.cmd, bltSrcImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, bltDstImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &cregion); VkImageMemoryBarrier prePresentBarrier = {}; prePresentBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; prePresentBarrier.pNext = NULL; prePresentBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; prePresentBarrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT; prePresentBarrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; prePresentBarrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; prePresentBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; prePresentBarrier.subresourceRange.baseMipLevel = 0; prePresentBarrier.subresourceRange.levelCount = 1; prePresentBarrier.subresourceRange.baseArrayLayer = 0; prePresentBarrier.subresourceRange.layerCount = 1; prePresentBarrier.image = info.buffers[info.current_buffer].image; vkCmdPipelineBarrier(info.cmd, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, NULL, 0, NULL, 1, &prePresentBarrier); res = vkEndCommandBuffer(info.cmd); VkFenceCreateInfo fenceInfo; VkFence drawFence; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = 0; vkCreateFence(info.device, &fenceInfo, NULL, &drawFence); submit_info.pNext = NULL; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.waitSemaphoreCount = 0; submit_info.pWaitSemaphores = NULL; submit_info.pWaitDstStageMask = NULL; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &info.cmd; submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = NULL; /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, &submit_info, drawFence); assert(res == VK_SUCCESS); res = vkQueueWaitIdle(info.queue); assert(res == VK_SUCCESS); /* Now present the image in the window */ VkPresentInfoKHR present; present.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR; present.pNext = NULL; present.swapchainCount = 1; present.pSwapchains = &info.swap_chain; present.pImageIndices = &info.current_buffer; present.pWaitSemaphores = NULL; present.waitSemaphoreCount = 0; present.pResults = NULL; /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); /* VULKAN_KEY_END */ if (info.save_images) write_ppm(info, "copyblitimage"); vkDestroySemaphore(info.device, presentCompleteSemaphore, NULL); vkDestroyFence(info.device, drawFence, NULL); vkDestroyImage(info.device, bltSrcImage, NULL); vkFreeMemory(info.device, dmem, NULL); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
/** * Sample using multiple render passes per framebuffer (different x,y extents) * and multiple subpasses per renderpass. */ int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "Multi-pass render passes"; const bool depthPresent = true; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); info.depth.format = VK_FORMAT_D32_SFLOAT_S8_UINT; init_depth_buffer(info); init_uniform_buffer(info); init_descriptor_and_pipeline_layouts(info, false); init_vertex_buffer(info, g_vb_solid_face_colors_Data, sizeof(g_vb_solid_face_colors_Data), sizeof(g_vb_solid_face_colors_Data[0]), false); init_descriptor_pool(info, false); init_descriptor_set(info, false); init_pipeline_cache(info); /* VULKAN_KEY_START */ /** * First renderpass in this sample. * Stenciled rendering: subpass 1 draw to stencil buffer, subpass 2 draw to * color buffer with stencil test */ VkAttachmentDescription attachments[2]; attachments[0].format = info.format; attachments[0].samples = VK_SAMPLE_COUNT_1_BIT; attachments[0].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attachments[0].storeOp = VK_ATTACHMENT_STORE_OP_STORE; attachments[0].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; attachments[0].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; attachments[0].initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; attachments[0].finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; attachments[0].flags = 0; attachments[1].format = info.depth.format; attachments[1].samples = VK_SAMPLE_COUNT_1_BIT; attachments[1].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attachments[1].storeOp = VK_ATTACHMENT_STORE_OP_STORE; attachments[1].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attachments[1].stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE; attachments[1].initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; attachments[1].finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; attachments[1].flags = 0; VkAttachmentReference color_reference = {}; color_reference.attachment = 0; color_reference.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; VkAttachmentReference depth_reference = {}; depth_reference.attachment = 1; depth_reference.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; VkSubpassDescription subpass = {}; subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; subpass.flags = 0; subpass.inputAttachmentCount = 0; subpass.pInputAttachments = NULL; subpass.colorAttachmentCount = 0; subpass.pColorAttachments = NULL; subpass.pResolveAttachments = NULL; subpass.pDepthStencilAttachment = &depth_reference; subpass.preserveAttachmentCount = 0; subpass.pPreserveAttachments = NULL; std::vector<VkSubpassDescription> subpasses; /* first a depthstencil-only subpass */ subpasses.push_back(subpass); subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &color_reference; /* then depthstencil and color */ subpasses.push_back(subpass); /* Set up a dependency between the source and destination subpasses */ VkSubpassDependency dependency = {}; dependency.srcSubpass = 0; dependency.dstSubpass = 1; dependency.dependencyFlags = 0; dependency.srcStageMask = VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT; dependency.dstStageMask = VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT; dependency.dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT; dependency.srcAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT; VkRenderPassCreateInfo rp_info = {}; rp_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; rp_info.pNext = NULL; rp_info.attachmentCount = 2; rp_info.pAttachments = attachments; rp_info.subpassCount = subpasses.size(); rp_info.pSubpasses = subpasses.data(); rp_info.dependencyCount = 1; rp_info.pDependencies = &dependency; VkRenderPass stencil_render_pass; res = vkCreateRenderPass(info.device, &rp_info, NULL, &stencil_render_pass); assert(!res); /* now that we have the render pass, create framebuffer and pipelines */ info.render_pass = stencil_render_pass; init_framebuffers(info, depthPresent); VkDynamicState dynamicStateEnables[VK_DYNAMIC_STATE_RANGE_SIZE]; VkPipelineDynamicStateCreateInfo dynamicState = {}; memset(dynamicStateEnables, 0, sizeof dynamicStateEnables); dynamicState.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; dynamicState.pNext = NULL; dynamicState.pDynamicStates = dynamicStateEnables; dynamicState.dynamicStateCount = 0; VkPipelineVertexInputStateCreateInfo vi; vi.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; vi.pNext = NULL; vi.vertexBindingDescriptionCount = 1; vi.pVertexBindingDescriptions = &info.vi_binding; vi.vertexAttributeDescriptionCount = 2; vi.pVertexAttributeDescriptions = info.vi_attribs; VkPipelineInputAssemblyStateCreateInfo ia; ia.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; ia.pNext = NULL; ia.primitiveRestartEnable = VK_FALSE; ia.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; VkPipelineRasterizationStateCreateInfo rs; rs.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rs.pNext = NULL; rs.polygonMode = VK_POLYGON_MODE_FILL; rs.cullMode = VK_CULL_MODE_BACK_BIT; rs.frontFace = VK_FRONT_FACE_CLOCKWISE; rs.depthClampEnable = VK_FALSE; rs.rasterizerDiscardEnable = VK_FALSE; rs.depthBiasEnable = VK_FALSE; rs.depthBiasConstantFactor = 0; rs.depthBiasClamp = 0; rs.depthBiasSlopeFactor = 0; rs.lineWidth = 0; VkPipelineColorBlendStateCreateInfo cb; cb.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; cb.pNext = NULL; VkPipelineColorBlendAttachmentState att_state[1]; att_state[0].colorWriteMask = 0xf; att_state[0].blendEnable = VK_FALSE; att_state[0].alphaBlendOp = VK_BLEND_OP_ADD; att_state[0].colorBlendOp = VK_BLEND_OP_ADD; att_state[0].srcColorBlendFactor = VK_BLEND_FACTOR_ZERO; att_state[0].dstColorBlendFactor = VK_BLEND_FACTOR_ZERO; att_state[0].srcAlphaBlendFactor = VK_BLEND_FACTOR_ZERO; att_state[0].dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO; cb.attachmentCount = 1; cb.pAttachments = att_state; cb.logicOpEnable = VK_FALSE; cb.logicOp = VK_LOGIC_OP_NO_OP; cb.blendConstants[0] = 1.0f; cb.blendConstants[1] = 1.0f; cb.blendConstants[2] = 1.0f; cb.blendConstants[3] = 1.0f; VkPipelineViewportStateCreateInfo vp = {}; vp.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; vp.pNext = NULL; vp.viewportCount = NUM_VIEWPORTS; dynamicStateEnables[dynamicState.dynamicStateCount++] = VK_DYNAMIC_STATE_VIEWPORT; vp.scissorCount = NUM_SCISSORS; dynamicStateEnables[dynamicState.dynamicStateCount++] = VK_DYNAMIC_STATE_SCISSOR; VkPipelineDepthStencilStateCreateInfo ds; ds.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO; ds.pNext = NULL; ds.depthTestEnable = VK_TRUE; ds.depthWriteEnable = VK_TRUE; ds.depthCompareOp = VK_COMPARE_OP_LESS_OR_EQUAL; ds.depthBoundsTestEnable = VK_FALSE; ds.minDepthBounds = 0; ds.maxDepthBounds = 0; ds.stencilTestEnable = VK_TRUE; ds.back.failOp = VK_STENCIL_OP_REPLACE; ds.back.depthFailOp = VK_STENCIL_OP_REPLACE; ds.back.passOp = VK_STENCIL_OP_REPLACE; ds.back.compareOp = VK_COMPARE_OP_ALWAYS; ds.back.compareMask = 0xff; ds.back.writeMask = 0xff; ds.back.reference = 0x44; ds.front = ds.back; VkPipelineMultisampleStateCreateInfo ms; ms.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; ms.pNext = NULL; ms.pSampleMask = NULL; ms.rasterizationSamples = NUM_SAMPLES; ms.sampleShadingEnable = VK_FALSE; ms.minSampleShading = 0.0; ms.alphaToCoverageEnable = VK_FALSE; ms.alphaToOneEnable = VK_FALSE; VkGraphicsPipelineCreateInfo pipeline; pipeline.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; pipeline.pNext = NULL; pipeline.layout = info.pipeline_layout; pipeline.basePipelineHandle = VK_NULL_HANDLE; pipeline.basePipelineIndex = 0; pipeline.flags = 0; pipeline.pVertexInputState = &vi; pipeline.pInputAssemblyState = &ia; pipeline.pRasterizationState = &rs; pipeline.pColorBlendState = NULL; pipeline.pTessellationState = NULL; pipeline.pMultisampleState = &ms; pipeline.pDynamicState = &dynamicState; pipeline.pViewportState = &vp; pipeline.pDepthStencilState = &ds; pipeline.pStages = info.shaderStages; pipeline.stageCount = 2; pipeline.renderPass = stencil_render_pass; pipeline.subpass = 0; init_shaders(info, normalVertShaderText, fragShaderText); /* The first pipeline will render in subpass 0 to fill the stencil */ pipeline.subpass = 0; VkPipeline stencil_cube_pipe = VK_NULL_HANDLE; res = vkCreateGraphicsPipelines(info.device, info.pipelineCache, 1, &pipeline, NULL, &stencil_cube_pipe); assert(res == VK_SUCCESS); /* destroy the shaders used for the above pipelin eand replace them with those for the fullscreen fill pass */ destroy_shaders(info); init_shaders(info, fullscreenVertShaderText, fragShaderText); /* the second pipeline will stencil test but not write, using the same * reference */ ds.back.failOp = VK_STENCIL_OP_KEEP; ds.back.depthFailOp = VK_STENCIL_OP_KEEP; ds.back.passOp = VK_STENCIL_OP_KEEP; ds.back.compareOp = VK_COMPARE_OP_EQUAL; ds.front = ds.back; /* don't test depth, only use stencil test */ ds.depthTestEnable = VK_FALSE; /* the second pipeline will be a fullscreen triangle strip, with vertices generated purely from the vertex shader - no inputs needed */ ia.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; vi.vertexAttributeDescriptionCount = 0; vi.vertexBindingDescriptionCount = 0; /* this pipeline will run in the second subpass */ pipeline.subpass = 1; pipeline.pColorBlendState = &cb; VkPipeline stencil_fullscreen_pipe = VK_NULL_HANDLE; res = vkCreateGraphicsPipelines(info.device, info.pipelineCache, 1, &pipeline, NULL, &stencil_fullscreen_pipe); assert(res == VK_SUCCESS); destroy_shaders(info); info.pipeline = VK_NULL_HANDLE; VkClearValue clear_values[2]; clear_values[0].color.float32[0] = 0.2f; clear_values[0].color.float32[1] = 0.2f; clear_values[0].color.float32[2] = 0.2f; clear_values[0].color.float32[3] = 0.2f; clear_values[1].depthStencil.depth = 1.0f; clear_values[1].depthStencil.stencil = 0; VkSemaphore presentCompleteSemaphore; VkSemaphoreCreateInfo presentCompleteSemaphoreCreateInfo; presentCompleteSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; presentCompleteSemaphoreCreateInfo.pNext = NULL; presentCompleteSemaphoreCreateInfo.flags = 0; res = vkCreateSemaphore(info.device, &presentCompleteSemaphoreCreateInfo, NULL, &presentCompleteSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, presentCompleteSemaphore, NULL, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); VkRenderPassBeginInfo rp_begin; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = stencil_render_pass; rp_begin.framebuffer = info.framebuffers[info.current_buffer]; rp_begin.renderArea.offset.x = 0; rp_begin.renderArea.offset.y = 0; rp_begin.renderArea.extent.width = info.width / 2; rp_begin.renderArea.extent.height = info.height; rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; /* Begin the first render pass. This will render in the left half of the screen. Subpass 0 will render a cube, stencil writing but outputting no color. Subpass 1 will render a fullscreen pass, stencil testing and outputting color only where the cube filled in stencil */ vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, stencil_cube_pipe); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 0, NULL); const VkDeviceSize offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, offsets); VkViewport viewport; viewport.height = (float)info.height; viewport.width = (float)info.width / 2; viewport.minDepth = (float)0.0f; viewport.maxDepth = (float)1.0f; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(info.cmd, 0, NUM_VIEWPORTS, &viewport); VkRect2D scissor; scissor.extent.width = info.width / 2; scissor.extent.height = info.height; scissor.offset.x = 0; scissor.offset.y = 0; vkCmdSetScissor(info.cmd, 0, NUM_SCISSORS, &scissor); /* Draw the cube into stencil */ vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); /* Advance to the next subpass */ vkCmdNextSubpass(info.cmd, VK_SUBPASS_CONTENTS_INLINE); /* Bind the fullscreen pass pipeline */ vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, stencil_fullscreen_pipe); vkCmdSetViewport(info.cmd, 0, NUM_VIEWPORTS, &viewport); vkCmdSetScissor(info.cmd, 0, NUM_SCISSORS, &scissor); /* Draw the fullscreen pass */ vkCmdDraw(info.cmd, 4, 1, 0, 0); vkCmdEndRenderPass(info.cmd); /** * Second renderpass in this sample. * Blended rendering, each subpass blends continuously onto the color */ /* note that we reuse a lot of the initialisation strutures from the first render pass, so this represents a 'delta' from that configuration */ /* This time, the first subpass will use color */ subpasses[0].colorAttachmentCount = 1; subpasses[0].pColorAttachments = &color_reference; /* The dependency between the subpasses now includes the color attachment */ dependency.srcAccessMask |= VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT; dependency.dstAccessMask |= VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT; /* Otherwise, the render pass is identical */ VkRenderPass blend_render_pass; res = vkCreateRenderPass(info.device, &rp_info, NULL, &blend_render_pass); assert(!res); pipeline.renderPass = blend_render_pass; /* We must recreate the framebuffers with this renderpass as the two render passes are not compatible. Store the current framebuffers for later deletion */ VkFramebuffer *stencil_framebuffers = info.framebuffers; info.framebuffers = NULL; info.render_pass = blend_render_pass; init_framebuffers(info, depthPresent); /* Now create the pipelines for the second render pass */ /* We are rendering the cube again, configure the vertex inputs */ ia.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; vi.vertexAttributeDescriptionCount = 2; vi.vertexBindingDescriptionCount = 1; /* The first pipeline will depth write and depth test */ ds.depthWriteEnable = VK_TRUE; ds.depthTestEnable = VK_TRUE; /* We don't want to stencil test */ ds.stencilTestEnable = VK_FALSE; /* This time, both pipelines will blend. the first pipeline uses the blend constant to determine the blend amount */ att_state[0].colorWriteMask = 0xf; att_state[0].blendEnable = VK_TRUE; att_state[0].alphaBlendOp = VK_BLEND_OP_ADD; att_state[0].colorBlendOp = VK_BLEND_OP_ADD; att_state[0].srcColorBlendFactor = VK_BLEND_FACTOR_CONSTANT_ALPHA; att_state[0].dstColorBlendFactor = VK_BLEND_FACTOR_ONE; att_state[0].srcColorBlendFactor = VK_BLEND_FACTOR_CONSTANT_ALPHA; att_state[0].srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE; cb.blendConstants[0] = 1.0f; cb.blendConstants[1] = 1.0f; cb.blendConstants[2] = 1.0f; cb.blendConstants[3] = 0.3f; init_shaders(info, normalVertShaderText, fragShaderText); /* This is the first subpass's pipeline, to blend a cube onto the color * image */ pipeline.subpass = 0; VkPipeline blend_cube_pipe = VK_NULL_HANDLE; res = vkCreateGraphicsPipelines(info.device, info.pipelineCache, 1, &pipeline, NULL, &blend_cube_pipe); assert(res == VK_SUCCESS); /* Now we will set up the fullscreen pass to render on top. */ destroy_shaders(info); init_shaders(info, fullscreenVertShaderText, fragShaderText); /* the second pipeline will be a fullscreen triangle strip with no inputs */ ia.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; vi.vertexAttributeDescriptionCount = 0; vi.vertexBindingDescriptionCount = 0; /* We'll use the alpha output from the shader */ att_state[0].srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; att_state[0].dstColorBlendFactor = VK_BLEND_FACTOR_ONE; att_state[0].srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; att_state[0].dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE; /* This renders in the second subpass */ pipeline.subpass = 1; VkPipeline blend_fullscreen_pipe = VK_NULL_HANDLE; res = vkCreateGraphicsPipelines(info.device, info.pipelineCache, 1, &pipeline, NULL, &blend_fullscreen_pipe); assert(res == VK_SUCCESS); destroy_shaders(info); info.pipeline = VK_NULL_HANDLE; /* Now we are going to render in the right half of the screen */ viewport.x = (float)info.width / 2; scissor.offset.x = info.width / 2; rp_begin.renderArea.offset.x = info.width / 2; /* Use our framebuffer and render pass */ rp_begin.framebuffer = info.framebuffers[info.current_buffer]; rp_begin.renderPass = blend_render_pass; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, blend_cube_pipe); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 0, NULL); vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, offsets); vkCmdSetViewport(info.cmd, 0, NUM_VIEWPORTS, &viewport); vkCmdSetScissor(info.cmd, 0, NUM_SCISSORS, &scissor); /* Draw the cube blending */ vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); /* Advance to the next subpass */ vkCmdNextSubpass(info.cmd, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, blend_fullscreen_pipe); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 0, NULL); /* Adjust the viewport to be a square in the centre, just overlapping the * cube */ viewport.x += 25.0f; viewport.y += 150.0f; viewport.width -= 50.0f; viewport.height -= 300.0f; vkCmdSetViewport(info.cmd, 0, NUM_VIEWPORTS, &viewport); vkCmdSetScissor(info.cmd, 0, NUM_SCISSORS, &scissor); vkCmdDraw(info.cmd, 4, 1, 0, 0); /* The second renderpass is complete */ vkCmdEndRenderPass(info.cmd); /* VULKAN_KEY_END */ VkImageMemoryBarrier prePresentBarrier = {}; prePresentBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; prePresentBarrier.pNext = NULL; prePresentBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; prePresentBarrier.dstAccessMask = 0; prePresentBarrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; prePresentBarrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; prePresentBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; prePresentBarrier.subresourceRange.baseMipLevel = 0; prePresentBarrier.subresourceRange.levelCount = 1; prePresentBarrier.subresourceRange.baseArrayLayer = 0; prePresentBarrier.subresourceRange.layerCount = 1; prePresentBarrier.image = info.buffers[info.current_buffer].image; vkCmdPipelineBarrier(info.cmd, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, NULL, 0, NULL, 1, &prePresentBarrier); res = vkEndCommandBuffer(info.cmd); const VkCommandBuffer cmd_bufs[] = {info.cmd}; VkFenceCreateInfo fenceInfo; VkFence drawFence; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = 0; vkCreateFence(info.device, &fenceInfo, NULL, &drawFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo submit_info[1] = {}; submit_info[0].pNext = NULL; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].waitSemaphoreCount = 1; submit_info[0].pWaitSemaphores = &presentCompleteSemaphore; submit_info[0].commandBufferCount = 1; submit_info[0].pCommandBuffers = cmd_bufs; submit_info[0].pWaitDstStageMask = &pipe_stage_flags; submit_info[0].signalSemaphoreCount = 0; submit_info[0].pSignalSemaphores = NULL; /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, submit_info, drawFence); assert(res == VK_SUCCESS); /* Now present the image in the window */ VkPresentInfoKHR present; present.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR; present.pNext = NULL; present.swapchainCount = 1; present.pSwapchains = &info.swap_chain; present.pImageIndices = &info.current_buffer; present.pWaitSemaphores = NULL; present.waitSemaphoreCount = 0; present.pResults = NULL; /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); /* VULKAN_KEY_END */ if (info.save_images) write_ppm(info, "drawsubpasses"); for (uint32_t i = 0; i < info.swapchainImageCount; i++) vkDestroyFramebuffer(info.device, stencil_framebuffers[i], NULL); free(stencil_framebuffers); vkDestroyRenderPass(info.device, stencil_render_pass, NULL); vkDestroyRenderPass(info.device, blend_render_pass, NULL); vkDestroyPipeline(info.device, blend_cube_pipe, NULL); vkDestroyPipeline(info.device, blend_fullscreen_pipe, NULL); vkDestroyPipeline(info.device, stencil_cube_pipe, NULL); vkDestroyPipeline(info.device, stencil_fullscreen_pipe, NULL); vkDestroySemaphore(info.device, presentCompleteSemaphore, NULL); vkDestroyFence(info.device, drawFence, NULL); destroy_pipeline_cache(info); destroy_descriptor_pool(info); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_descriptor_and_pipeline_layouts(info); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "Simple Push Constants"; const bool depthPresent = true; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_uniform_buffer(info); init_renderpass(info, depthPresent); init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, depthPresent); init_vertex_buffer(info, g_vb_texture_Data, sizeof(g_vb_texture_Data), sizeof(g_vb_texture_Data[0]), true); // Set up one descriptor sets static const unsigned descriptor_set_count = 1; static const unsigned resource_count = 1; // Create binding and layout for the following, matching contents of shader // binding 0 = uniform buffer (MVP) VkDescriptorSetLayoutBinding resource_binding[resource_count] = {}; resource_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; resource_binding[0].descriptorCount = 1; resource_binding[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT; resource_binding[0].pImmutableSamplers = NULL; VkDescriptorSetLayoutCreateInfo resource_layout_info[1] = {}; resource_layout_info[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; resource_layout_info[0].pNext = NULL; resource_layout_info[0].bindingCount = resource_count; resource_layout_info[0].pBindings = resource_binding; VkDescriptorSetLayout descriptor_layouts[1] = {}; res = vkCreateDescriptorSetLayout(info.device, resource_layout_info, NULL, &descriptor_layouts[0]); assert(res == VK_SUCCESS); /* VULKAN_KEY_START */ // Set up our push constant range, which mirrors the declaration of const unsigned push_constant_range_count = 1; VkPushConstantRange push_constant_ranges[push_constant_range_count] = {}; push_constant_ranges[0].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; push_constant_ranges[0].offset = 0; push_constant_ranges[0].size = 8; // Create pipeline layout, including push constant info. // Create pipeline layout with multiple descriptor sets VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo[1] = {}; pipelineLayoutCreateInfo[0].sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipelineLayoutCreateInfo[0].pNext = NULL; pipelineLayoutCreateInfo[0].pushConstantRangeCount = push_constant_range_count; pipelineLayoutCreateInfo[0].pPushConstantRanges = push_constant_ranges; pipelineLayoutCreateInfo[0].setLayoutCount = descriptor_set_count; pipelineLayoutCreateInfo[0].pSetLayouts = descriptor_layouts; res = vkCreatePipelineLayout(info.device, pipelineLayoutCreateInfo, NULL, &info.pipeline_layout); assert(res == VK_SUCCESS); // Create a single pool to contain data for our descriptor set VkDescriptorPoolSize type_count[2] = {}; type_count[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; type_count[0].descriptorCount = 1; type_count[1].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; type_count[1].descriptorCount = 1; VkDescriptorPoolCreateInfo pool_info[1] = {}; pool_info[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; pool_info[0].pNext = NULL; pool_info[0].maxSets = descriptor_set_count; pool_info[0].poolSizeCount = sizeof(type_count) / sizeof(VkDescriptorPoolSize); pool_info[0].pPoolSizes = type_count; VkDescriptorPool descriptor_pool[1] = {}; res = vkCreateDescriptorPool(info.device, pool_info, NULL, descriptor_pool); assert(res == VK_SUCCESS); VkDescriptorSetAllocateInfo alloc_info[1]; alloc_info[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info[0].pNext = NULL; alloc_info[0].descriptorPool = descriptor_pool[0]; alloc_info[0].descriptorSetCount = descriptor_set_count; alloc_info[0].pSetLayouts = descriptor_layouts; // Populate descriptor sets VkDescriptorSet descriptor_sets[descriptor_set_count] = {}; res = vkAllocateDescriptorSets(info.device, alloc_info, descriptor_sets); assert(res == VK_SUCCESS); // Using empty brace initializer on the next line triggers a bug in older // versions of gcc, so memset instead VkWriteDescriptorSet descriptor_writes[resource_count]; memset(descriptor_writes, 0, sizeof(descriptor_writes)); // Populate with info about our uniform buffer for MVP descriptor_writes[0] = {}; descriptor_writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_writes[0].pNext = NULL; descriptor_writes[0].dstSet = descriptor_sets[0]; descriptor_writes[0].descriptorCount = 1; descriptor_writes[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_writes[0].pBufferInfo = &info.uniform_data.buffer_info; // populated by init_uniform_buffer() descriptor_writes[0].dstArrayElement = 0; descriptor_writes[0].dstBinding = 0; vkUpdateDescriptorSets(info.device, resource_count, descriptor_writes, 0, NULL); // Create our push constant data, which matches shader expectations unsigned pushConstants[2] = {}; pushConstants[0] = (unsigned)2; pushConstants[1] = (unsigned)0x3F800000; // Ensure we have enough room for push constant data if (sizeof(pushConstants) > info.gpu_props.limits.maxPushConstantsSize) assert(0 && "Too many push constants"); vkCmdPushConstants(info.cmd, info.pipeline_layout, VK_SHADER_STAGE_FRAGMENT_BIT, 0, sizeof(pushConstants), pushConstants); /* VULKAN_KEY_END */ init_pipeline_cache(info); init_pipeline(info, depthPresent); init_presentable_image(info); VkClearValue clear_values[2]; init_clear_color_and_depth(info, clear_values); VkRenderPassBeginInfo rp_begin; init_render_pass_begin_info(info, rp_begin); rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, descriptor_sets, 0, NULL); const VkDeviceSize offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, offsets); init_viewports(info); init_scissors(info); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); res = vkEndCommandBuffer(info.cmd); assert(res == VK_SUCCESS); VkFence drawFence = {}; init_fence(info, drawFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; VkSubmitInfo submit_info = {}; init_submit_info(info, submit_info, pipe_stage_flags); /* Queue the command buffer for execution */ res = vkQueueSubmit(info.graphics_queue, 1, &submit_info, drawFence); assert(res == VK_SUCCESS); /* Now present the image in the window */ VkPresentInfoKHR present = {}; init_present_info(info, present); /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.present_queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); if (info.save_images) write_ppm(info, "push_constants"); vkDestroyFence(info.device, drawFence, NULL); vkDestroySemaphore(info.device, info.imageAcquiredSemaphore, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); // instead of destroy_descriptor_pool(info); vkDestroyDescriptorPool(info.device, descriptor_pool[0], NULL); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); // instead of destroy_descriptor_and_pipeline_layouts(info); for (int i = 0; i < descriptor_set_count; i++) vkDestroyDescriptorSetLayout(info.device, descriptor_layouts[i], NULL); vkDestroyPipelineLayout(info.device, info.pipeline_layout, NULL); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "Draw Textured Cube"; const bool depthPresent = true; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_texture(info); init_uniform_buffer(info); init_descriptor_and_pipeline_layouts(info, true); init_renderpass(info, depthPresent); init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, depthPresent); init_vertex_buffer(info, g_vb_texture_Data, sizeof(g_vb_texture_Data), sizeof(g_vb_texture_Data[0]), true); init_descriptor_pool(info, true); init_descriptor_set(info, true); init_pipeline_cache(info); init_pipeline(info, depthPresent); init_presentable_image(info); VkClearValue clear_values[2]; init_clear_color_and_depth(info, clear_values); VkRenderPassBeginInfo rp_begin; init_render_pass_begin_info(info, rp_begin); rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 0, NULL); const VkDeviceSize offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, offsets); init_viewports(info); init_scissors(info); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); execute_pre_present_barrier(info); res = vkEndCommandBuffer(info.cmd); assert(res == VK_SUCCESS); VkFence drawFence = {}; init_fence(info, drawFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo submit_info = {}; init_submit_info(info, submit_info, pipe_stage_flags); /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, &submit_info, drawFence); assert(res == VK_SUCCESS); /* Now present the image in the window */ VkPresentInfoKHR present = {}; init_present_info(info, present); /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); if (info.save_images) write_ppm(info, "template"); vkDestroyFence(info.device, drawFence, NULL); vkDestroySemaphore(info.device, info.presentCompleteSemaphore, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_textures(info); destroy_descriptor_pool(info); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; bool U_ASSERT_ONLY pass; struct sample_info info = {}; char sample_title[] = "Draw Cube"; const bool depthPresent = true; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); if (info.gpu_props.limits.maxDescriptorSetUniformBuffersDynamic < 1) { std::cout << "No dynamic uniform buffers supported\n"; exit(-1); } init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_renderpass(info, depthPresent); init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, depthPresent); init_vertex_buffer(info, g_vb_solid_face_colors_Data, sizeof(g_vb_solid_face_colors_Data), sizeof(g_vb_solid_face_colors_Data[0]), false); /* Set up uniform buffer with 2 transform matrices in it */ info.Projection = glm::perspective(glm::radians(45.0f), 1.0f, 0.1f, 100.0f); info.View = glm::lookAt( glm::vec3(0, 3, 10), // Camera is at (0,3,10), in World Space glm::vec3(0, 0, 0), // and looks at the origin glm::vec3(0, -1, 0) // Head is up (set to 0,-1,0 to look upside-down) ); info.Model = glm::mat4(1.0f); // Vulkan clip space has inverted Y and half Z. info.Clip = glm::mat4(1.0f, 0.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.5f, 0.0f, 0.0f, 0.0f, 0.5f, 1.0f); info.MVP = info.Clip * info.Projection * info.View * info.Model; /* VULKAN_KEY_START */ info.Model = glm::translate(info.Model, glm::vec3(1.5, 1.5, 1.5)); glm::mat4 MVP2 = info.Clip * info.Projection * info.View * info.Model; VkDeviceSize buf_size = sizeof(info.MVP); if (info.gpu_props.limits.minUniformBufferOffsetAlignment) buf_size = (buf_size + info.gpu_props.limits.minUniformBufferOffsetAlignment - 1) & ~(info.gpu_props.limits.minUniformBufferOffsetAlignment - 1); VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buf_info.size = 2 * buf_size; buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; res = vkCreateBuffer(info.device, &buf_info, NULL, &info.uniform_data.buf); assert(res == VK_SUCCESS); VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(info.device, info.uniform_data.buf, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; alloc_info.allocationSize = mem_reqs.size; pass = memory_type_from_properties(info, mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &alloc_info.memoryTypeIndex); assert(pass); res = vkAllocateMemory(info.device, &alloc_info, NULL, &(info.uniform_data.mem)); assert(res == VK_SUCCESS); /* Map the buffer memory and copy both matrices */ uint8_t *pData; res = vkMapMemory(info.device, info.uniform_data.mem, 0, mem_reqs.size, 0, (void **)&pData); assert(res == VK_SUCCESS); memcpy(pData, &info.MVP, sizeof(info.MVP)); pData += buf_size; memcpy(pData, &MVP2, sizeof(MVP2)); vkUnmapMemory(info.device, info.uniform_data.mem); res = vkBindBufferMemory(info.device, info.uniform_data.buf, info.uniform_data.mem, 0); assert(res == VK_SUCCESS); info.uniform_data.buffer_info.buffer = info.uniform_data.buf; info.uniform_data.buffer_info.offset = 0; info.uniform_data.buffer_info.range = buf_size; /* Init desciptor and pipeline layouts - descriptor type is * UNIFORM_BUFFER_DYNAMIC */ VkDescriptorSetLayoutBinding layout_bindings[2]; layout_bindings[0].binding = 0; layout_bindings[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; layout_bindings[0].descriptorCount = 1; layout_bindings[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT; layout_bindings[0].pImmutableSamplers = NULL; /* Next take layout bindings and use them to create a descriptor set layout */ VkDescriptorSetLayoutCreateInfo descriptor_layout = {}; descriptor_layout.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; descriptor_layout.pNext = NULL; descriptor_layout.bindingCount = 1; descriptor_layout.pBindings = layout_bindings; info.desc_layout.resize(NUM_DESCRIPTOR_SETS); res = vkCreateDescriptorSetLayout(info.device, &descriptor_layout, NULL, info.desc_layout.data()); assert(res == VK_SUCCESS); /* Now use the descriptor layout to create a pipeline layout */ VkPipelineLayoutCreateInfo pPipelineLayoutCreateInfo = {}; pPipelineLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pPipelineLayoutCreateInfo.pNext = NULL; pPipelineLayoutCreateInfo.pushConstantRangeCount = 0; pPipelineLayoutCreateInfo.pPushConstantRanges = NULL; pPipelineLayoutCreateInfo.setLayoutCount = NUM_DESCRIPTOR_SETS; pPipelineLayoutCreateInfo.pSetLayouts = info.desc_layout.data(); res = vkCreatePipelineLayout(info.device, &pPipelineLayoutCreateInfo, NULL, &info.pipeline_layout); assert(res == VK_SUCCESS); /* Create descriptor pool with UNIFOM_BUFFER_DYNAMIC type */ VkDescriptorPoolSize type_count[1]; type_count[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; type_count[0].descriptorCount = 1; VkDescriptorPoolCreateInfo descriptor_pool = {}; descriptor_pool.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; descriptor_pool.pNext = NULL; descriptor_pool.maxSets = 1; descriptor_pool.poolSizeCount = 1; descriptor_pool.pPoolSizes = type_count; res = vkCreateDescriptorPool(info.device, &descriptor_pool, NULL, &info.desc_pool); assert(res == VK_SUCCESS); VkDescriptorSetAllocateInfo desc_alloc_info[1]; desc_alloc_info[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; desc_alloc_info[0].pNext = NULL; desc_alloc_info[0].descriptorPool = info.desc_pool; desc_alloc_info[0].descriptorSetCount = NUM_DESCRIPTOR_SETS; desc_alloc_info[0].pSetLayouts = info.desc_layout.data(); /* Allocate descriptor set with UNIFORM_BUFFER_DYNAMIC */ info.desc_set.resize(NUM_DESCRIPTOR_SETS); res = vkAllocateDescriptorSets(info.device, desc_alloc_info, info.desc_set.data()); assert(res == VK_SUCCESS); VkWriteDescriptorSet writes[1]; writes[0] = {}; writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; writes[0].pNext = NULL; writes[0].dstSet = info.desc_set[0]; writes[0].descriptorCount = 1; writes[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; writes[0].pBufferInfo = &info.uniform_data.buffer_info; writes[0].dstArrayElement = 0; writes[0].dstBinding = 0; vkUpdateDescriptorSets(info.device, 1, writes, 0, NULL); init_pipeline_cache(info); init_pipeline(info, depthPresent); VkClearValue clear_values[2]; clear_values[0].color.float32[0] = 0.2f; clear_values[0].color.float32[1] = 0.2f; clear_values[0].color.float32[2] = 0.2f; clear_values[0].color.float32[3] = 0.2f; clear_values[1].depthStencil.depth = 1.0f; clear_values[1].depthStencil.stencil = 0; VkSemaphore presentCompleteSemaphore; VkSemaphoreCreateInfo presentCompleteSemaphoreCreateInfo; presentCompleteSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; presentCompleteSemaphoreCreateInfo.pNext = NULL; presentCompleteSemaphoreCreateInfo.flags = 0; res = vkCreateSemaphore(info.device, &presentCompleteSemaphoreCreateInfo, NULL, &presentCompleteSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, presentCompleteSemaphore, VK_NULL_HANDLE, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); VkRenderPassBeginInfo rp_begin; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = info.render_pass; rp_begin.framebuffer = info.framebuffers[info.current_buffer]; rp_begin.renderArea.offset.x = 0; rp_begin.renderArea.offset.y = 0; rp_begin.renderArea.extent.width = info.width; rp_begin.renderArea.extent.height = info.height; rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); /* The first draw should use the first matrix in the buffer */ uint32_t uni_offsets[1] = {0}; vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 1, uni_offsets); const VkDeviceSize vtx_offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, vtx_offsets); init_viewports(info); init_scissors(info); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); uni_offsets[0] = (uint32_t)buf_size; /* The second draw should use the second matrix in the buffer */ vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 1, uni_offsets); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); VkImageMemoryBarrier prePresentBarrier = {}; prePresentBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; prePresentBarrier.pNext = NULL; prePresentBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; prePresentBarrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT; prePresentBarrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; prePresentBarrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; prePresentBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; prePresentBarrier.subresourceRange.baseMipLevel = 0; prePresentBarrier.subresourceRange.levelCount = 1; prePresentBarrier.subresourceRange.baseArrayLayer = 0; prePresentBarrier.subresourceRange.layerCount = 1; prePresentBarrier.image = info.buffers[info.current_buffer].image; vkCmdPipelineBarrier(info.cmd, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, NULL, 0, NULL, 1, &prePresentBarrier); res = vkEndCommandBuffer(info.cmd); const VkCommandBuffer cmd_bufs[] = {info.cmd}; VkFenceCreateInfo fenceInfo; VkFence drawFence; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = 0; vkCreateFence(info.device, &fenceInfo, NULL, &drawFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo submit_info[1] = {}; submit_info[0].pNext = NULL; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].waitSemaphoreCount = 1; submit_info[0].pWaitSemaphores = &presentCompleteSemaphore; submit_info[0].pWaitDstStageMask = &pipe_stage_flags; submit_info[0].commandBufferCount = 1; submit_info[0].pCommandBuffers = cmd_bufs; submit_info[0].signalSemaphoreCount = 0; submit_info[0].pSignalSemaphores = NULL; /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, submit_info, drawFence); assert(res == VK_SUCCESS); /* Now present the image in the window */ VkPresentInfoKHR present; present.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR; present.pNext = NULL; present.swapchainCount = 1; present.pSwapchains = &info.swap_chain; present.pImageIndices = &info.current_buffer; present.pWaitSemaphores = NULL; present.waitSemaphoreCount = 0; present.pResults = NULL; /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); /* VULKAN_KEY_END */ if (info.save_images) write_ppm(info, "dynamicuniform"); vkDestroySemaphore(info.device, presentCompleteSemaphore, NULL); vkDestroyFence(info.device, drawFence, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_descriptor_pool(info); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
int sample_main(int argc, char **argv) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "Memory Barriers"; process_command_line_args(info, argc, argv); init_global_layer_properties(info); info.instance_extension_names.push_back(VK_KHR_SURFACE_EXTENSION_NAME); #ifdef _WIN32 info.instance_extension_names.push_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME); #elif __ANDROID__ info.instance_extension_names.push_back(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME); #else info.instance_extension_names.push_back(VK_KHR_XCB_SURFACE_EXTENSION_NAME); #endif info.device_extension_names.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); init_instance(info, sample_title); init_enumerate_device(info); init_device(info); info.width = info.height = 500; init_connection(info); init_window(info); init_swapchain_extension(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT); // CmdClearColorImage is going to require usage of TRANSFER_DST, but // it's not clear which format feature maps to the required TRANSFER_DST usage, // BLIT_DST is a reasonable guess and it seems to work init_texture(info, nullptr, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_FORMAT_FEATURE_BLIT_DST_BIT); init_uniform_buffer(info); init_descriptor_and_pipeline_layouts(info, true); init_renderpass(info, DEPTH_PRESENT, false, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, DEPTH_PRESENT); init_vertex_buffer(info, vb_Data, sizeof(vb_Data), sizeof(vb_Data[0]), true); init_descriptor_pool(info, true); init_descriptor_set(info, true); init_pipeline_cache(info); init_pipeline(info, DEPTH_PRESENT); /* VULKAN_KEY_START */ VkImageSubresourceRange srRange = {}; srRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; srRange.baseMipLevel = 0; srRange.levelCount = VK_REMAINING_MIP_LEVELS; srRange.baseArrayLayer = 0; srRange.layerCount = VK_REMAINING_ARRAY_LAYERS; VkClearColorValue clear_color[1]; clear_color[0].float32[0] = 0.2f; clear_color[0].float32[1] = 0.2f; clear_color[0].float32[2] = 0.2f; clear_color[0].float32[3] = 0.2f; VkSemaphoreCreateInfo presentCompleteSemaphoreCreateInfo; presentCompleteSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; presentCompleteSemaphoreCreateInfo.pNext = NULL; presentCompleteSemaphoreCreateInfo.flags = 0; res = vkCreateSemaphore(info.device, &presentCompleteSemaphoreCreateInfo, NULL, &info.imageAcquiredSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, info.imageAcquiredSemaphore, VK_NULL_HANDLE, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); set_image_layout(info, info.buffers[info.current_buffer].image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT); // We need to do the clear here instead of using a renderpass load op since // we will use the same renderpass multiple times in the frame vkCmdClearColorImage(info.cmd, info.buffers[info.current_buffer].image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, clear_color, 1, &srRange); VkRenderPassBeginInfo rp_begin; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = info.render_pass; rp_begin.framebuffer = info.framebuffers[info.current_buffer]; rp_begin.renderArea.offset.x = 0; rp_begin.renderArea.offset.y = 0; rp_begin.renderArea.extent.width = info.width; rp_begin.renderArea.extent.height = info.height; rp_begin.clearValueCount = 0; rp_begin.pClearValues = NULL; // Draw a textured quad on the left side of the window vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 0, NULL); const VkDeviceSize offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, offsets); init_viewports(info); init_scissors(info); vkCmdDraw(info.cmd, 2 * 3, 1, 0, 0); // We can't do a clear inside a renderpass, so end this one and start another one // for the next draw vkCmdEndRenderPass(info.cmd); // Send a barrier to change the texture image's layout from SHADER_READ_ONLY // to COLOR_ATTACHMENT_GENERAL because we're going to clear it VkImageMemoryBarrier textureBarrier = {}; textureBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; textureBarrier.pNext = NULL; textureBarrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT; textureBarrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; textureBarrier.oldLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; textureBarrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; textureBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; textureBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; textureBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; textureBarrier.subresourceRange.baseMipLevel = 0; textureBarrier.subresourceRange.levelCount = 1; textureBarrier.subresourceRange.baseArrayLayer = 0; textureBarrier.subresourceRange.layerCount = 1; textureBarrier.image = info.textures[0].image; vkCmdPipelineBarrier(info.cmd, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, NULL, 0, NULL, 1, &textureBarrier); clear_color[0].float32[0] = 0.0f; clear_color[0].float32[1] = 1.0f; clear_color[0].float32[2] = 0.0f; clear_color[0].float32[3] = 1.0f; /* Clear texture to green */ vkCmdClearColorImage(info.cmd, info.textures[0].image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, clear_color, 1, &srRange); // Send a barrier to change the texture image's layout back to SHADER_READ_ONLY // because we're going to use it as a texture again textureBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; textureBarrier.pNext = NULL; textureBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; textureBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; textureBarrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; textureBarrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; textureBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; textureBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; textureBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; textureBarrier.subresourceRange.baseMipLevel = 0; textureBarrier.subresourceRange.levelCount = 1; textureBarrier.subresourceRange.baseArrayLayer = 0; textureBarrier.subresourceRange.layerCount = 1; textureBarrier.image = info.textures[0].image; vkCmdPipelineBarrier(info.cmd, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, NULL, 0, NULL, 1, &textureBarrier); // Draw the second quad to the right using the (now) green texture vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); // Draw starting with vertex index 6 to draw to the right of the first quad vkCmdDraw(info.cmd, 2 * 3, 1, 6, 0); vkCmdEndRenderPass(info.cmd); // Change the present buffer from COLOR_ATTACHMENT_OPTIMAL to // PRESENT_SOURCE_KHR // so it can be presented execute_pre_present_barrier(info); res = vkEndCommandBuffer(info.cmd); assert(res == VK_SUCCESS); VkSubmitInfo submit_info = {}; VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; init_submit_info(info, submit_info, pipe_stage_flags); assert(res == VK_SUCCESS); VkFence drawFence = {}; init_fence(info, drawFence); // Queue the command buffer for execution res = vkQueueSubmit(info.graphics_queue, 1, &submit_info, drawFence); assert(res == VK_SUCCESS); // Now present the image in the window VkPresentInfoKHR present{}; init_present_info(info, present); // Make sure command buffer is finished before presenting do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.present_queue, &present); assert(res == VK_SUCCESS); /* VULKAN_KEY_END */ wait_seconds(1); if (info.save_images) write_ppm(info, "memory_barriers"); vkDestroySemaphore(info.device, info.imageAcquiredSemaphore, NULL); vkDestroyFence(info.device, drawFence, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_textures(info); destroy_descriptor_pool(info); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_uniform_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_window(info); destroy_device(info); destroy_instance(info); return 0; }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "Multiple Descriptor Sets"; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); // Sample from a green texture to easily see that we've pulled correct texel // value const char *textureName = "green.ppm"; init_texture(info, textureName); init_uniform_buffer(info); init_renderpass(info, true); init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, true); init_vertex_buffer(info, g_vb_texture_Data, sizeof(g_vb_texture_Data), sizeof(g_vb_texture_Data[0]), true); /* VULKAN_KEY_START */ // Set up two descriptor sets static const unsigned descriptor_set_count = 2; // Create first layout to contain uniform buffer data VkDescriptorSetLayoutBinding uniform_binding[1] = {}; uniform_binding[0].binding = 0; uniform_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; uniform_binding[0].descriptorCount = 1; uniform_binding[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT; uniform_binding[0].pImmutableSamplers = NULL; VkDescriptorSetLayoutCreateInfo uniform_layout_info[1] = {}; uniform_layout_info[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; uniform_layout_info[0].pNext = NULL; uniform_layout_info[0].bindingCount = 1; uniform_layout_info[0].pBindings = uniform_binding; // Create second layout containing combined sampler/image data VkDescriptorSetLayoutBinding sampler2D_binding[1] = {}; sampler2D_binding[0].binding = 0; sampler2D_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; sampler2D_binding[0].descriptorCount = 1; sampler2D_binding[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT; sampler2D_binding[0].pImmutableSamplers = NULL; VkDescriptorSetLayoutCreateInfo sampler2D_layout_info[1] = {}; sampler2D_layout_info[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; sampler2D_layout_info[0].pNext = NULL; sampler2D_layout_info[0].bindingCount = 1; sampler2D_layout_info[0].pBindings = sampler2D_binding; // Create multiple sets, using each createInfo static const unsigned uniform_set_index = 0; static const unsigned sampler_set_index = 1; VkDescriptorSetLayout descriptor_layouts[descriptor_set_count] = {}; res = vkCreateDescriptorSetLayout(info.device, uniform_layout_info, NULL, &descriptor_layouts[uniform_set_index]); assert(res == VK_SUCCESS); res = vkCreateDescriptorSetLayout(info.device, sampler2D_layout_info, NULL, &descriptor_layouts[sampler_set_index]); assert(res == VK_SUCCESS); // Create pipeline layout with multiple descriptor sets VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo[1] = {}; pipelineLayoutCreateInfo[0].sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipelineLayoutCreateInfo[0].pNext = NULL; pipelineLayoutCreateInfo[0].pushConstantRangeCount = 0; pipelineLayoutCreateInfo[0].pPushConstantRanges = NULL; pipelineLayoutCreateInfo[0].setLayoutCount = descriptor_set_count; pipelineLayoutCreateInfo[0].pSetLayouts = descriptor_layouts; res = vkCreatePipelineLayout(info.device, pipelineLayoutCreateInfo, NULL, &info.pipeline_layout); assert(res == VK_SUCCESS); // Create a single pool to contain data for our two descriptor sets VkDescriptorPoolSize type_count[2] = {}; type_count[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; type_count[0].descriptorCount = 1; type_count[1].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; type_count[1].descriptorCount = 1; VkDescriptorPoolCreateInfo pool_info[1] = {}; pool_info[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; pool_info[0].pNext = NULL; pool_info[0].maxSets = descriptor_set_count; pool_info[0].poolSizeCount = sizeof(type_count) / sizeof(VkDescriptorPoolSize); pool_info[0].pPoolSizes = type_count; VkDescriptorPool descriptor_pool[1] = {}; res = vkCreateDescriptorPool(info.device, pool_info, NULL, descriptor_pool); assert(res == VK_SUCCESS); VkDescriptorSetAllocateInfo alloc_info[1]; alloc_info[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info[0].pNext = NULL; alloc_info[0].descriptorPool = descriptor_pool[0]; alloc_info[0].descriptorSetCount = descriptor_set_count; alloc_info[0].pSetLayouts = descriptor_layouts; // Populate descriptor sets VkDescriptorSet descriptor_sets[descriptor_set_count] = {}; res = vkAllocateDescriptorSets(info.device, alloc_info, descriptor_sets); assert(res == VK_SUCCESS); // Using empty brace initializer on the next line triggers a bug in older // versions of gcc, so memset instead VkWriteDescriptorSet descriptor_writes[2]; memset(descriptor_writes, 0, sizeof(descriptor_writes)); // Populate with info about our uniform buffer descriptor_writes[0] = {}; descriptor_writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_writes[0].pNext = NULL; descriptor_writes[0].dstSet = descriptor_sets[uniform_set_index]; descriptor_writes[0].descriptorCount = 1; descriptor_writes[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_writes[0].pBufferInfo = &info.uniform_data.buffer_info; // populated by init_uniform_buffer() descriptor_writes[0].dstArrayElement = 0; descriptor_writes[0].dstBinding = 0; // Populate with info about our sampled image descriptor_writes[1] = {}; descriptor_writes[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_writes[1].pNext = NULL; descriptor_writes[1].dstSet = descriptor_sets[sampler_set_index]; descriptor_writes[1].descriptorCount = 1; descriptor_writes[1].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_writes[1].pImageInfo = &info.texture_data.image_info; // populated by init_texture() descriptor_writes[1].dstArrayElement = 0; descriptor_writes[1].dstBinding = 0; vkUpdateDescriptorSets(info.device, descriptor_set_count, descriptor_writes, 0, NULL); /* VULKAN_KEY_END */ // Call remaining boilerplate utils init_pipeline_cache(info); init_pipeline(info, true); // The remaining is identical to drawtexturedcube VkClearValue clear_values[2]; clear_values[0].color.float32[0] = 0.2f; clear_values[0].color.float32[1] = 0.2f; clear_values[0].color.float32[2] = 0.2f; clear_values[0].color.float32[3] = 0.2f; clear_values[1].depthStencil.depth = 1.0f; clear_values[1].depthStencil.stencil = 0; VkSemaphore imageAcquiredSemaphore; VkSemaphoreCreateInfo imageAcquiredSemaphoreCreateInfo; imageAcquiredSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; imageAcquiredSemaphoreCreateInfo.pNext = NULL; imageAcquiredSemaphoreCreateInfo.flags = 0; res = vkCreateSemaphore(info.device, &imageAcquiredSemaphoreCreateInfo, NULL, &imageAcquiredSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, imageAcquiredSemaphore, VK_NULL_HANDLE, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); VkRenderPassBeginInfo rp_begin; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = info.render_pass; rp_begin.framebuffer = info.framebuffers[info.current_buffer]; rp_begin.renderArea.offset.x = 0; rp_begin.renderArea.offset.y = 0; rp_begin.renderArea.extent.width = info.width; rp_begin.renderArea.extent.height = info.height; rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, descriptor_set_count, descriptor_sets, 0, NULL); const VkDeviceSize offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, offsets); init_viewports(info); init_scissors(info); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); res = vkEndCommandBuffer(info.cmd); assert(res == VK_SUCCESS); const VkCommandBuffer cmd_bufs[] = {info.cmd}; VkFenceCreateInfo fenceInfo; VkFence drawFence; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = 0; vkCreateFence(info.device, &fenceInfo, NULL, &drawFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; VkSubmitInfo submit_info[1] = {}; submit_info[0].pNext = NULL; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].waitSemaphoreCount = 1; submit_info[0].pWaitSemaphores = &imageAcquiredSemaphore; submit_info[0].pWaitDstStageMask = &pipe_stage_flags; submit_info[0].commandBufferCount = 1; submit_info[0].pCommandBuffers = cmd_bufs; submit_info[0].signalSemaphoreCount = 0; submit_info[0].pSignalSemaphores = NULL; /* Queue the command buffer for execution */ res = vkQueueSubmit(info.graphics_queue, 1, submit_info, drawFence); assert(res == VK_SUCCESS); /* Now present the image in the window */ VkPresentInfoKHR present; present.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR; present.pNext = NULL; present.swapchainCount = 1; present.pSwapchains = &info.swap_chain; present.pImageIndices = &info.current_buffer; present.pWaitSemaphores = NULL; present.waitSemaphoreCount = 0; present.pResults = NULL; /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.present_queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); if (info.save_images) write_ppm(info, "multiple_sets"); vkDestroySemaphore(info.device, imageAcquiredSemaphore, NULL); vkDestroyFence(info.device, drawFence, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_textures(info); // instead of destroy_descriptor_pool(info); vkDestroyDescriptorPool(info.device, descriptor_pool[0], NULL); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); // instead of destroy_descriptor_and_pipeline_layouts(info); for (int i = 0; i < descriptor_set_count; i++) vkDestroyDescriptorSetLayout(info.device, descriptor_layouts[i], NULL); vkDestroyPipelineLayout(info.device, info.pipeline_layout, NULL); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "Graphics Pipeline Sample"; const bool depthPresent = true; init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_uniform_buffer(info); init_renderpass(info, depthPresent); init_framebuffers(info, depthPresent); init_vertex_buffer(info, g_vb_solid_face_colors_Data, sizeof(g_vb_solid_face_colors_Data), sizeof(g_vb_solid_face_colors_Data[0]), false); init_descriptor_and_pipeline_layouts(info, false); init_descriptor_pool(info, false); init_descriptor_set(info, false); init_shaders(info, vertShaderText, fragShaderText); /* VULKAN_KEY_START */ VkDynamicState dynamicStateEnables[VK_DYNAMIC_STATE_RANGE_SIZE]; VkPipelineDynamicStateCreateInfo dynamicState = {}; memset(dynamicStateEnables, 0, sizeof dynamicStateEnables); dynamicState.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; dynamicState.pNext = NULL; dynamicState.pDynamicStates = dynamicStateEnables; dynamicState.dynamicStateCount = 0; VkPipelineVertexInputStateCreateInfo vi; vi.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; vi.pNext = NULL; vi.flags = 0; vi.vertexBindingDescriptionCount = 1; vi.pVertexBindingDescriptions = &info.vi_binding; vi.vertexAttributeDescriptionCount = 2; vi.pVertexAttributeDescriptions = info.vi_attribs; VkPipelineInputAssemblyStateCreateInfo ia; ia.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; ia.pNext = NULL; ia.flags = 0; ia.primitiveRestartEnable = VK_FALSE; ia.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; VkPipelineRasterizationStateCreateInfo rs; rs.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rs.pNext = NULL; rs.flags = 0; rs.polygonMode = VK_POLYGON_MODE_FILL; rs.cullMode = VK_CULL_MODE_BACK_BIT; rs.frontFace = VK_FRONT_FACE_CLOCKWISE; rs.depthClampEnable = VK_FALSE; rs.rasterizerDiscardEnable = VK_FALSE; rs.depthBiasEnable = VK_FALSE; rs.depthBiasConstantFactor = 0; rs.depthBiasClamp = 0; rs.depthBiasSlopeFactor = 0; rs.lineWidth = 1.0f; VkPipelineColorBlendStateCreateInfo cb; cb.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; cb.pNext = NULL; cb.flags = 0; VkPipelineColorBlendAttachmentState att_state[1]; att_state[0].colorWriteMask = 0xf; att_state[0].blendEnable = VK_FALSE; att_state[0].alphaBlendOp = VK_BLEND_OP_ADD; att_state[0].colorBlendOp = VK_BLEND_OP_ADD; att_state[0].srcColorBlendFactor = VK_BLEND_FACTOR_ZERO; att_state[0].dstColorBlendFactor = VK_BLEND_FACTOR_ZERO; att_state[0].srcAlphaBlendFactor = VK_BLEND_FACTOR_ZERO; att_state[0].dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO; cb.attachmentCount = 1; cb.pAttachments = att_state; cb.logicOpEnable = VK_FALSE; cb.logicOp = VK_LOGIC_OP_NO_OP; cb.blendConstants[0] = 1.0f; cb.blendConstants[1] = 1.0f; cb.blendConstants[2] = 1.0f; cb.blendConstants[3] = 1.0f; VkPipelineViewportStateCreateInfo vp = {}; vp.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; vp.pNext = NULL; vp.flags = 0; vp.viewportCount = NUM_VIEWPORTS; dynamicStateEnables[dynamicState.dynamicStateCount++] = VK_DYNAMIC_STATE_VIEWPORT; vp.scissorCount = NUM_SCISSORS; dynamicStateEnables[dynamicState.dynamicStateCount++] = VK_DYNAMIC_STATE_SCISSOR; vp.pScissors = NULL; vp.pViewports = NULL; VkPipelineDepthStencilStateCreateInfo ds; ds.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO; ds.pNext = NULL; ds.flags = 0; ds.depthTestEnable = VK_TRUE; ds.depthWriteEnable = VK_TRUE; ds.depthCompareOp = VK_COMPARE_OP_LESS_OR_EQUAL; ds.depthBoundsTestEnable = VK_FALSE; ds.minDepthBounds = 0; ds.maxDepthBounds = 0; ds.stencilTestEnable = VK_FALSE; ds.back.failOp = VK_STENCIL_OP_KEEP; ds.back.passOp = VK_STENCIL_OP_KEEP; ds.back.compareOp = VK_COMPARE_OP_ALWAYS; ds.back.compareMask = 0; ds.back.reference = 0; ds.back.depthFailOp = VK_STENCIL_OP_KEEP; ds.back.writeMask = 0; ds.front = ds.back; VkPipelineMultisampleStateCreateInfo ms; ms.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; ms.pNext = NULL; ms.flags = 0; ms.pSampleMask = NULL; ms.rasterizationSamples = NUM_SAMPLES; ms.sampleShadingEnable = VK_FALSE; ms.alphaToCoverageEnable = VK_FALSE; ms.alphaToOneEnable = VK_FALSE; ms.minSampleShading = 0.0; VkGraphicsPipelineCreateInfo pipeline; pipeline.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; pipeline.pNext = NULL; pipeline.layout = info.pipeline_layout; pipeline.basePipelineHandle = VK_NULL_HANDLE; pipeline.basePipelineIndex = 0; pipeline.flags = 0; pipeline.pVertexInputState = &vi; pipeline.pInputAssemblyState = &ia; pipeline.pRasterizationState = &rs; pipeline.pColorBlendState = &cb; pipeline.pTessellationState = NULL; pipeline.pMultisampleState = &ms; pipeline.pDynamicState = &dynamicState; pipeline.pViewportState = &vp; pipeline.pDepthStencilState = &ds; pipeline.pStages = info.shaderStages; pipeline.stageCount = 2; pipeline.renderPass = info.render_pass; pipeline.subpass = 0; res = vkCreateGraphicsPipelines(info.device, VK_NULL_HANDLE, 1, &pipeline, NULL, &info.pipeline); assert(res == VK_SUCCESS); execute_end_command_buffer(info); execute_queue_command_buffer(info); /* VULKAN_KEY_END */ vkDestroyPipeline(info.device, info.pipeline, NULL); destroy_descriptor_pool(info); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "Secondary command buffers"; const bool depthPresent = true; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_uniform_buffer(info); init_descriptor_and_pipeline_layouts(info, true); init_renderpass(info, depthPresent, true, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, depthPresent); init_vertex_buffer(info, g_vb_texture_Data, sizeof(g_vb_texture_Data), sizeof(g_vb_texture_Data[0]), true); init_pipeline_cache(info); init_pipeline(info, depthPresent); // we have to set up a couple of things by hand, but this // isn't any different to other examples // get two different textures init_texture(info, "green.ppm"); VkDescriptorImageInfo greenTex = info.texture_data.image_info; init_texture(info, "lunarg.ppm"); VkDescriptorImageInfo lunargTex = info.texture_data.image_info; // create two identical descriptor sets, each with a different texture but // identical UBOa VkDescriptorPoolSize pool_size[2]; pool_size[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; pool_size[0].descriptorCount = 2; pool_size[1].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; pool_size[1].descriptorCount = 2; VkDescriptorPoolCreateInfo descriptor_pool = {}; descriptor_pool.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; descriptor_pool.pNext = NULL; descriptor_pool.flags = 0; descriptor_pool.maxSets = 2; descriptor_pool.poolSizeCount = 2; descriptor_pool.pPoolSizes = pool_size; res = vkCreateDescriptorPool(info.device, &descriptor_pool, NULL, &info.desc_pool); assert(res == VK_SUCCESS); VkDescriptorSetLayout layouts[] = {info.desc_layout[0], info.desc_layout[0]}; VkDescriptorSetAllocateInfo alloc_info[1]; alloc_info[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info[0].pNext = NULL; alloc_info[0].descriptorPool = info.desc_pool; alloc_info[0].descriptorSetCount = 2; alloc_info[0].pSetLayouts = layouts; info.desc_set.resize(2); res = vkAllocateDescriptorSets(info.device, alloc_info, info.desc_set.data()); assert(res == VK_SUCCESS); VkWriteDescriptorSet writes[2]; writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; writes[0].pNext = NULL; writes[0].dstSet = info.desc_set[0]; writes[0].descriptorCount = 1; writes[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; writes[0].pBufferInfo = &info.uniform_data.buffer_info; writes[0].dstArrayElement = 0; writes[0].dstBinding = 0; writes[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; writes[1].pNext = NULL; writes[1].dstSet = info.desc_set[0]; writes[1].dstBinding = 1; writes[1].descriptorCount = 1; writes[1].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; writes[1].pImageInfo = &greenTex; writes[1].dstArrayElement = 0; vkUpdateDescriptorSets(info.device, 2, writes, 0, NULL); writes[0].dstSet = writes[1].dstSet = info.desc_set[1]; writes[1].pImageInfo = &lunargTex; vkUpdateDescriptorSets(info.device, 2, writes, 0, NULL); /* VULKAN_KEY_START */ // create four secondary command buffers, for each quadrant of the screen VkCommandBufferAllocateInfo cmdalloc = {}; cmdalloc.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; cmdalloc.pNext = NULL; cmdalloc.commandPool = info.cmd_pool; cmdalloc.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY; cmdalloc.commandBufferCount = 4; VkCommandBuffer secondary_cmds[4]; res = vkAllocateCommandBuffers(info.device, &cmdalloc, secondary_cmds); assert(res == VK_SUCCESS); VkClearValue clear_values[2]; clear_values[0].color.float32[0] = 0.2f; clear_values[0].color.float32[1] = 0.2f; clear_values[0].color.float32[2] = 0.2f; clear_values[0].color.float32[3] = 0.2f; clear_values[1].depthStencil.depth = 1.0f; clear_values[1].depthStencil.stencil = 0; VkSemaphore imageAcquiredSemaphore; VkSemaphoreCreateInfo imageAcquiredSemaphoreCreateInfo; imageAcquiredSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; imageAcquiredSemaphoreCreateInfo.pNext = NULL; imageAcquiredSemaphoreCreateInfo.flags = 0; res = vkCreateSemaphore(info.device, &imageAcquiredSemaphoreCreateInfo, NULL, &imageAcquiredSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, imageAcquiredSemaphore, VK_NULL_HANDLE, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); set_image_layout(info, info.buffers[info.current_buffer].image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT); const VkDeviceSize offsets[1] = {0}; VkViewport viewport; viewport.height = 200.0f; viewport.width = 200.0f; viewport.minDepth = (float)0.0f; viewport.maxDepth = (float)1.0f; viewport.x = 0; viewport.y = 0; VkRect2D scissor; scissor.extent.width = info.width; scissor.extent.height = info.height; scissor.offset.x = 0; scissor.offset.y = 0; // now we record four separate command buffers, one for each quadrant of the // screen VkCommandBufferInheritanceInfo cmd_buf_inheritance_info = {}; cmd_buf_inheritance_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO, cmd_buf_inheritance_info.pNext = NULL; cmd_buf_inheritance_info.renderPass = info.render_pass; cmd_buf_inheritance_info.subpass = 0; cmd_buf_inheritance_info.framebuffer = info.framebuffers[info.current_buffer]; cmd_buf_inheritance_info.occlusionQueryEnable = VK_FALSE; cmd_buf_inheritance_info.queryFlags = 0; cmd_buf_inheritance_info.pipelineStatistics = 0; VkCommandBufferBeginInfo secondary_begin = {}; secondary_begin.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; secondary_begin.pNext = NULL; secondary_begin.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT; secondary_begin.pInheritanceInfo = &cmd_buf_inheritance_info; for (int i = 0; i < 4; i++) { vkBeginCommandBuffer(secondary_cmds[i], &secondary_begin); vkCmdBindPipeline(secondary_cmds[i], VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(secondary_cmds[i], VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, 1, &info.desc_set[i == 0 || i == 3], 0, NULL); vkCmdBindVertexBuffers(secondary_cmds[i], 0, 1, &info.vertex_buffer.buf, offsets); viewport.x = 25.0f + 250.0f * (i % 2); viewport.y = 25.0f + 250.0f * (i / 2); vkCmdSetViewport(secondary_cmds[i], 0, NUM_VIEWPORTS, &viewport); vkCmdSetScissor(secondary_cmds[i], 0, NUM_SCISSORS, &scissor); vkCmdDraw(secondary_cmds[i], 12 * 3, 1, 0, 0); vkEndCommandBuffer(secondary_cmds[i]); } VkRenderPassBeginInfo rp_begin; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = info.render_pass; rp_begin.framebuffer = info.framebuffers[info.current_buffer]; rp_begin.renderArea.offset.x = 0; rp_begin.renderArea.offset.y = 0; rp_begin.renderArea.extent.width = info.width; rp_begin.renderArea.extent.height = info.height; rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; // specifying VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS means this // render pass may // ONLY call vkCmdExecuteCommands vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS); vkCmdExecuteCommands(info.cmd, 4, secondary_cmds); vkCmdEndRenderPass(info.cmd); VkImageMemoryBarrier prePresentBarrier = {}; prePresentBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; prePresentBarrier.pNext = NULL; prePresentBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; prePresentBarrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT; prePresentBarrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; prePresentBarrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; prePresentBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; prePresentBarrier.subresourceRange.baseMipLevel = 0; prePresentBarrier.subresourceRange.levelCount = 1; prePresentBarrier.subresourceRange.baseArrayLayer = 0; prePresentBarrier.subresourceRange.layerCount = 1; prePresentBarrier.image = info.buffers[info.current_buffer].image; vkCmdPipelineBarrier(info.cmd, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, NULL, 0, NULL, 1, &prePresentBarrier); res = vkEndCommandBuffer(info.cmd); assert(res == VK_SUCCESS); const VkCommandBuffer cmd_bufs[] = {info.cmd}; VkFenceCreateInfo fenceInfo; VkFence drawFence; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = 0; vkCreateFence(info.device, &fenceInfo, NULL, &drawFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; VkSubmitInfo submit_info[1] = {}; submit_info[0].pNext = NULL; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].waitSemaphoreCount = 1; submit_info[0].pWaitSemaphores = &imageAcquiredSemaphore; submit_info[0].pWaitDstStageMask = &pipe_stage_flags; submit_info[0].commandBufferCount = 1; submit_info[0].pCommandBuffers = cmd_bufs; submit_info[0].signalSemaphoreCount = 0; submit_info[0].pSignalSemaphores = NULL; /* Queue the command buffer for execution */ res = vkQueueSubmit(info.graphics_queue, 1, submit_info, drawFence); assert(res == VK_SUCCESS); /* Now present the image in the window */ VkPresentInfoKHR present; present.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR; present.pNext = NULL; present.swapchainCount = 1; present.pSwapchains = &info.swap_chain; present.pImageIndices = &info.current_buffer; present.pWaitSemaphores = NULL; present.waitSemaphoreCount = 0; present.pResults = NULL; /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.present_queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); if (info.save_images) write_ppm(info, "secondary_command_buffer"); vkFreeCommandBuffers(info.device, info.cmd_pool, 4, secondary_cmds); /* VULKAN_KEY_END */ vkDestroyFence(info.device, drawFence, NULL); vkDestroySemaphore(info.device, imageAcquiredSemaphore, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_textures(info); destroy_descriptor_pool(info); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
int sample_main(int argc, char *argv[]) { VkResult res; struct sample_info info; uint32_t instance_layer_count; VkLayerProperties *vk_props = NULL; init_global_layer_properties(info); /* VULKAN_KEY_START */ /* * It's possible, though very rare, that the number of * instance layers could change. For example, installing something * could include new layers that the loader would pick up * between the initial query for the count and the * request for VkLayerProperties. The loader indicates that * by returning a VK_INCOMPLETE status and will update the * the count parameter. * The count parameter will be updated with the number of * entries loaded into the data pointer - in case the number * of layers went down or is smaller than the size given. */ do { res = vkEnumerateInstanceLayerProperties(&instance_layer_count, NULL); if (res) break; if (instance_layer_count == 0) { break; } vk_props = (VkLayerProperties *)realloc( vk_props, instance_layer_count * sizeof(VkLayerProperties)); res = vkEnumerateInstanceLayerProperties(&instance_layer_count, vk_props); } while (res == VK_INCOMPLETE); std::cout << "Instance Layers:" << std::endl; for (uint32_t i = 0; i < instance_layer_count; i++) { VkLayerProperties *props = &vk_props[i]; uint32_t major, minor, patch; std::cout << props->layerName << ":" << std::endl; extract_version(props->specVersion, major, minor, patch); std::cout << "\tVersion: " << props->implementationVersion << std::endl; std::cout << "\tAPI Version: " << "(" << major << "." << minor << "." << patch << ")" << std::endl; std::cout << "\tDescription: " << props->description << std::endl; std::cout << std::endl << std::endl; } std::cout << std::endl; free(vk_props); /* VULKAN_KEY_END */ return 0; }