int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "SPIR-V Specialization"; const bool depthPresent = true; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_texture(info); init_uniform_buffer(info); init_descriptor_and_pipeline_layouts(info, true); init_renderpass(info, depthPresent); /* VULKAN_KEY_START */ // Pass in nullptr for fragment shader so we can setup specialization init_shaders(info, vertShaderText, nullptr); // This structure maps constant ids to data locations. // NOTE: Padding bool to 32-bits for simplicity const VkSpecializationMapEntry entries[] = // id, offset, size {{5, 0, sizeof(uint32_t)}, {7, 1 * sizeof(uint32_t), sizeof(uint32_t)}, {8, 2 * sizeof(uint32_t), sizeof(uint32_t)}, {9, 3 * sizeof(uint32_t), sizeof(uint32_t)}}; // Initialize the values we want our mini-ubershader to use const bool drawUserColor = true; const float userColor[] = {0.0f, 0.0f, 1.0f}; // Populate our data entry uint32_t data[4] = {}; data[0] = drawUserColor ? 1 : 0; ((float *)data)[1] = userColor[0]; ((float *)data)[2] = userColor[1]; ((float *)data)[3] = userColor[2]; // Set up the info describing our spec map and data const VkSpecializationInfo specInfo = { 4, // mapEntryCount entries, // pMapEntries 4 * sizeof(float), // dataSize data, // pData }; // Provide the specialization data to fragment stage info.shaderStages[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; info.shaderStages[1].pNext = NULL; info.shaderStages[1].pSpecializationInfo = &specInfo; info.shaderStages[1].flags = 0; info.shaderStages[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT; info.shaderStages[1].pName = "main"; VkShaderModuleCreateInfo moduleCreateInfo; moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; moduleCreateInfo.pNext = NULL; moduleCreateInfo.flags = 0; if (use_SPIRV_asm) { // Use the hand edited SPIR-V assembly spv_context spvContext = spvContextCreate(SPV_ENV_VULKAN_1_0); spv_binary fragmentBinary = {}; spv_diagnostic fragmentDiag = {}; spv_result_t fragmentResult = spvTextToBinary(spvContext, fragmentSPIRV_specialized.c_str(), fragmentSPIRV_specialized.length(), &fragmentBinary, &fragmentDiag); if (fragmentDiag) { printf("Diagnostic info from fragment shader:\n"); spvDiagnosticPrint(fragmentDiag); } assert(fragmentResult == SPV_SUCCESS); moduleCreateInfo.codeSize = fragmentBinary->wordCount * sizeof(unsigned int); moduleCreateInfo.pCode = fragmentBinary->code; spvDiagnosticDestroy(fragmentDiag); spvContextDestroy(spvContext); } else { // Convert GLSL to SPIR-V init_glslang(); std::vector<unsigned int> fragSpv; bool U_ASSERT_ONLY retVal = GLSLtoSPV(VK_SHADER_STAGE_FRAGMENT_BIT, fragShaderText, fragSpv); assert(retVal); finalize_glslang(); moduleCreateInfo.codeSize = fragSpv.size() * sizeof(unsigned int); moduleCreateInfo.pCode = fragSpv.data(); } res = vkCreateShaderModule(info.device, &moduleCreateInfo, NULL, &info.shaderStages[1].module); assert(res == VK_SUCCESS); /* VULKAN_KEY_END */ init_framebuffers(info, depthPresent); init_vertex_buffer(info, g_vb_texture_Data, sizeof(g_vb_texture_Data), sizeof(g_vb_texture_Data[0]), true); init_descriptor_pool(info, true); init_descriptor_set(info, true); init_pipeline_cache(info); init_pipeline(info, depthPresent); init_presentable_image(info); VkClearValue clear_values[2]; init_clear_color_and_depth(info, clear_values); VkRenderPassBeginInfo rp_begin; init_render_pass_begin_info(info, rp_begin); rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 0, NULL); const VkDeviceSize offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, offsets); init_viewports(info); init_scissors(info); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); res = vkEndCommandBuffer(info.cmd); assert(res == VK_SUCCESS); VkFence drawFence = {}; init_fence(info, drawFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; VkSubmitInfo submit_info = {}; init_submit_info(info, submit_info, pipe_stage_flags); /* Queue the command buffer for execution */ res = vkQueueSubmit(info.graphics_queue, 1, &submit_info, drawFence); assert(res == VK_SUCCESS); /* Now present the image in the window */ VkPresentInfoKHR present = {}; init_present_info(info, present); /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.present_queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); if (info.save_images) write_ppm(info, "spirv_specialization"); vkDestroyFence(info.device, drawFence, NULL); vkDestroySemaphore(info.device, info.imageAcquiredSemaphore, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_textures(info); destroy_descriptor_pool(info); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "Simple Push Constants"; const bool depthPresent = true; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_uniform_buffer(info); init_renderpass(info, depthPresent); init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, depthPresent); init_vertex_buffer(info, g_vb_texture_Data, sizeof(g_vb_texture_Data), sizeof(g_vb_texture_Data[0]), true); // Set up one descriptor sets static const unsigned descriptor_set_count = 1; static const unsigned resource_count = 1; // Create binding and layout for the following, matching contents of shader // binding 0 = uniform buffer (MVP) VkDescriptorSetLayoutBinding resource_binding[resource_count] = {}; resource_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; resource_binding[0].descriptorCount = 1; resource_binding[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT; resource_binding[0].pImmutableSamplers = NULL; VkDescriptorSetLayoutCreateInfo resource_layout_info[1] = {}; resource_layout_info[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; resource_layout_info[0].pNext = NULL; resource_layout_info[0].bindingCount = resource_count; resource_layout_info[0].pBindings = resource_binding; VkDescriptorSetLayout descriptor_layouts[1] = {}; res = vkCreateDescriptorSetLayout(info.device, resource_layout_info, NULL, &descriptor_layouts[0]); assert(res == VK_SUCCESS); /* VULKAN_KEY_START */ // Set up our push constant range, which mirrors the declaration of const unsigned push_constant_range_count = 1; VkPushConstantRange push_constant_ranges[push_constant_range_count] = {}; push_constant_ranges[0].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; push_constant_ranges[0].offset = 0; push_constant_ranges[0].size = 8; // Create pipeline layout, including push constant info. // Create pipeline layout with multiple descriptor sets VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo[1] = {}; pipelineLayoutCreateInfo[0].sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipelineLayoutCreateInfo[0].pNext = NULL; pipelineLayoutCreateInfo[0].pushConstantRangeCount = push_constant_range_count; pipelineLayoutCreateInfo[0].pPushConstantRanges = push_constant_ranges; pipelineLayoutCreateInfo[0].setLayoutCount = descriptor_set_count; pipelineLayoutCreateInfo[0].pSetLayouts = descriptor_layouts; res = vkCreatePipelineLayout(info.device, pipelineLayoutCreateInfo, NULL, &info.pipeline_layout); assert(res == VK_SUCCESS); // Create a single pool to contain data for our descriptor set VkDescriptorPoolSize type_count[2] = {}; type_count[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; type_count[0].descriptorCount = 1; type_count[1].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; type_count[1].descriptorCount = 1; VkDescriptorPoolCreateInfo pool_info[1] = {}; pool_info[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; pool_info[0].pNext = NULL; pool_info[0].maxSets = descriptor_set_count; pool_info[0].poolSizeCount = sizeof(type_count) / sizeof(VkDescriptorPoolSize); pool_info[0].pPoolSizes = type_count; VkDescriptorPool descriptor_pool[1] = {}; res = vkCreateDescriptorPool(info.device, pool_info, NULL, descriptor_pool); assert(res == VK_SUCCESS); VkDescriptorSetAllocateInfo alloc_info[1]; alloc_info[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info[0].pNext = NULL; alloc_info[0].descriptorPool = descriptor_pool[0]; alloc_info[0].descriptorSetCount = descriptor_set_count; alloc_info[0].pSetLayouts = descriptor_layouts; // Populate descriptor sets VkDescriptorSet descriptor_sets[descriptor_set_count] = {}; res = vkAllocateDescriptorSets(info.device, alloc_info, descriptor_sets); assert(res == VK_SUCCESS); // Using empty brace initializer on the next line triggers a bug in older // versions of gcc, so memset instead VkWriteDescriptorSet descriptor_writes[resource_count]; memset(descriptor_writes, 0, sizeof(descriptor_writes)); // Populate with info about our uniform buffer for MVP descriptor_writes[0] = {}; descriptor_writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_writes[0].pNext = NULL; descriptor_writes[0].dstSet = descriptor_sets[0]; descriptor_writes[0].descriptorCount = 1; descriptor_writes[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_writes[0].pBufferInfo = &info.uniform_data.buffer_info; // populated by init_uniform_buffer() descriptor_writes[0].dstArrayElement = 0; descriptor_writes[0].dstBinding = 0; vkUpdateDescriptorSets(info.device, resource_count, descriptor_writes, 0, NULL); // Create our push constant data, which matches shader expectations unsigned pushConstants[2] = {}; pushConstants[0] = (unsigned)2; pushConstants[1] = (unsigned)0x3F800000; // Ensure we have enough room for push constant data if (sizeof(pushConstants) > info.gpu_props.limits.maxPushConstantsSize) assert(0 && "Too many push constants"); vkCmdPushConstants(info.cmd, info.pipeline_layout, VK_SHADER_STAGE_FRAGMENT_BIT, 0, sizeof(pushConstants), pushConstants); /* VULKAN_KEY_END */ init_pipeline_cache(info); init_pipeline(info, depthPresent); init_presentable_image(info); VkClearValue clear_values[2]; init_clear_color_and_depth(info, clear_values); VkRenderPassBeginInfo rp_begin; init_render_pass_begin_info(info, rp_begin); rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, descriptor_sets, 0, NULL); const VkDeviceSize offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, offsets); init_viewports(info); init_scissors(info); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); res = vkEndCommandBuffer(info.cmd); assert(res == VK_SUCCESS); VkFence drawFence = {}; init_fence(info, drawFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; VkSubmitInfo submit_info = {}; init_submit_info(info, submit_info, pipe_stage_flags); /* Queue the command buffer for execution */ res = vkQueueSubmit(info.graphics_queue, 1, &submit_info, drawFence); assert(res == VK_SUCCESS); /* Now present the image in the window */ VkPresentInfoKHR present = {}; init_present_info(info, present); /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.present_queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); if (info.save_images) write_ppm(info, "push_constants"); vkDestroyFence(info.device, drawFence, NULL); vkDestroySemaphore(info.device, info.imageAcquiredSemaphore, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); // instead of destroy_descriptor_pool(info); vkDestroyDescriptorPool(info.device, descriptor_pool[0], NULL); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); // instead of destroy_descriptor_and_pipeline_layouts(info); for (int i = 0; i < descriptor_set_count; i++) vkDestroyDescriptorSetLayout(info.device, descriptor_layouts[i], NULL); vkDestroyPipelineLayout(info.device, info.pipeline_layout, NULL); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "Separate Image Sampler"; const bool depthPresent = true; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_uniform_buffer(info); init_renderpass(info, depthPresent); init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, depthPresent); init_vertex_buffer(info, g_vb_texture_Data, sizeof(g_vb_texture_Data), sizeof(g_vb_texture_Data[0]), true); /* VULKAN_KEY_START */ // Sample from a green texture to easily see that we've pulled correct texel // value // Create our separate image struct texture_object texObj; const char *textureName = "green.ppm"; init_image(info, texObj, textureName); info.textures.push_back(texObj); info.texture_data.image_info.sampler = 0; info.texture_data.image_info.imageView = info.textures[0].view; info.texture_data.image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; // Create our separate sampler VkSampler separateSampler = {}; init_sampler(info, separateSampler); VkDescriptorImageInfo samplerInfo = {}; samplerInfo.sampler = separateSampler; // Set up one descriptor set static const unsigned descriptor_set_count = 1; static const unsigned resource_count = 3; static const unsigned resource_type_count = 3; // Create binding and layout for the following, matching contents of shader // binding 0 = uniform buffer (MVP) // binding 1 = texture2D // binding 2 = sampler VkDescriptorSetLayoutBinding resource_binding[resource_count] = {}; resource_binding[0].binding = 0; resource_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; resource_binding[0].descriptorCount = 1; resource_binding[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT; resource_binding[0].pImmutableSamplers = NULL; resource_binding[1].binding = 1; resource_binding[1].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; resource_binding[1].descriptorCount = 1; resource_binding[1].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; resource_binding[1].pImmutableSamplers = NULL; resource_binding[2].binding = 2; resource_binding[2].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; resource_binding[2].descriptorCount = 1; resource_binding[2].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; resource_binding[2].pImmutableSamplers = NULL; VkDescriptorSetLayoutCreateInfo resource_layout_info[1] = {}; resource_layout_info[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; resource_layout_info[0].pNext = NULL; resource_layout_info[0].bindingCount = resource_count; resource_layout_info[0].pBindings = resource_binding; VkDescriptorSetLayout descriptor_layouts[1] = {}; res = vkCreateDescriptorSetLayout(info.device, resource_layout_info, NULL, &descriptor_layouts[0]); assert(res == VK_SUCCESS); // Create pipeline layout VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo[1] = {}; pipelineLayoutCreateInfo[0].sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipelineLayoutCreateInfo[0].pNext = NULL; pipelineLayoutCreateInfo[0].pushConstantRangeCount = 0; pipelineLayoutCreateInfo[0].pPushConstantRanges = NULL; pipelineLayoutCreateInfo[0].setLayoutCount = descriptor_set_count; pipelineLayoutCreateInfo[0].pSetLayouts = descriptor_layouts; res = vkCreatePipelineLayout(info.device, pipelineLayoutCreateInfo, NULL, &info.pipeline_layout); assert(res == VK_SUCCESS); // Create a single pool to contain data for our descriptor set VkDescriptorPoolSize pool_sizes[resource_type_count] = {}; pool_sizes[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; pool_sizes[0].descriptorCount = 1; pool_sizes[1].type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; pool_sizes[1].descriptorCount = 1; pool_sizes[2].type = VK_DESCRIPTOR_TYPE_SAMPLER; pool_sizes[2].descriptorCount = 1; VkDescriptorPoolCreateInfo pool_info[1] = {}; pool_info[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; pool_info[0].pNext = NULL; pool_info[0].maxSets = descriptor_set_count; pool_info[0].poolSizeCount = resource_type_count; pool_info[0].pPoolSizes = pool_sizes; VkDescriptorPool descriptor_pool[1] = {}; res = vkCreateDescriptorPool(info.device, pool_info, NULL, descriptor_pool); assert(res == VK_SUCCESS); VkDescriptorSetAllocateInfo alloc_info[1]; alloc_info[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info[0].pNext = NULL; alloc_info[0].descriptorPool = descriptor_pool[0]; alloc_info[0].descriptorSetCount = descriptor_set_count; alloc_info[0].pSetLayouts = descriptor_layouts; // Populate descriptor sets VkDescriptorSet descriptor_sets[descriptor_set_count] = {}; res = vkAllocateDescriptorSets(info.device, alloc_info, descriptor_sets); assert(res == VK_SUCCESS); VkWriteDescriptorSet descriptor_writes[resource_count]; // Populate with info about our uniform buffer for MVP descriptor_writes[0] = {}; descriptor_writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_writes[0].pNext = NULL; descriptor_writes[0].dstSet = descriptor_sets[0]; descriptor_writes[0].descriptorCount = 1; descriptor_writes[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_writes[0].pBufferInfo = &info.uniform_data.buffer_info; // populated by init_uniform_buffer() descriptor_writes[0].dstArrayElement = 0; descriptor_writes[0].dstBinding = 0; // Populate with info about our image descriptor_writes[1] = {}; descriptor_writes[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_writes[1].pNext = NULL; descriptor_writes[1].dstSet = descriptor_sets[0]; descriptor_writes[1].descriptorCount = 1; descriptor_writes[1].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; descriptor_writes[1].pImageInfo = &info.texture_data.image_info; // populated by init_texture() descriptor_writes[1].dstArrayElement = 0; descriptor_writes[1].dstBinding = 1; // Populate with info about our sampler descriptor_writes[2] = {}; descriptor_writes[2].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_writes[2].pNext = NULL; descriptor_writes[2].dstSet = descriptor_sets[0]; descriptor_writes[2].descriptorCount = 1; descriptor_writes[2].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; descriptor_writes[2].pImageInfo = &samplerInfo; descriptor_writes[2].dstArrayElement = 0; descriptor_writes[2].dstBinding = 2; vkUpdateDescriptorSets(info.device, resource_count, descriptor_writes, 0, NULL); /* VULKAN_KEY_END */ init_pipeline_cache(info); init_pipeline(info, depthPresent); init_presentable_image(info); VkClearValue clear_values[2]; init_clear_color_and_depth(info, clear_values); VkRenderPassBeginInfo rp_begin; init_render_pass_begin_info(info, rp_begin); rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, descriptor_sets, 0, NULL); const VkDeviceSize offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, offsets); init_viewports(info); init_scissors(info); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); res = vkEndCommandBuffer(info.cmd); assert(res == VK_SUCCESS); VkFence drawFence = {}; init_fence(info, drawFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; VkSubmitInfo submit_info = {}; init_submit_info(info, submit_info, pipe_stage_flags); /* Queue the command buffer for execution */ res = vkQueueSubmit(info.graphics_queue, 1, &submit_info, drawFence); assert(res == VK_SUCCESS); /* Now present the image in the window */ VkPresentInfoKHR present = {}; init_present_info(info, present); /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.present_queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); if (info.save_images) write_ppm(info, "separate_image_sampler"); vkDestroyFence(info.device, drawFence, NULL); vkDestroySemaphore(info.device, info.imageAcquiredSemaphore, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); vkDestroySampler(info.device, separateSampler, NULL); vkDestroyImageView(info.device, info.textures[0].view, NULL); vkDestroyImage(info.device, info.textures[0].image, NULL); vkFreeMemory(info.device, info.textures[0].mem, NULL); // instead of destroy_descriptor_pool(info); vkDestroyDescriptorPool(info.device, descriptor_pool[0], NULL); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); // instead of destroy_descriptor_and_pipeline_layouts(info); for (int i = 0; i < descriptor_set_count; i++) vkDestroyDescriptorSetLayout(info.device, descriptor_layouts[i], NULL); vkDestroyPipelineLayout(info.device, info.pipeline_layout, NULL); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
int sample_main() { VkResult U_ASSERT_ONLY res; char sample_title[] = "MT Cmd Buffer Sample"; const bool depthPresent = false; init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); VkSemaphoreCreateInfo presentCompleteSemaphoreCreateInfo; presentCompleteSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; presentCompleteSemaphoreCreateInfo.pNext = NULL; presentCompleteSemaphoreCreateInfo.flags = 0; res = vkCreateSemaphore(info.device, &presentCompleteSemaphoreCreateInfo, NULL, &info.presentCompleteSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, info.presentCompleteSemaphore, NULL, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); VkPipelineLayoutCreateInfo pPipelineLayoutCreateInfo = {}; pPipelineLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pPipelineLayoutCreateInfo.pNext = NULL; pPipelineLayoutCreateInfo.pushConstantRangeCount = 0; pPipelineLayoutCreateInfo.pPushConstantRanges = NULL; pPipelineLayoutCreateInfo.setLayoutCount = 0; pPipelineLayoutCreateInfo.pSetLayouts = NULL; res = vkCreatePipelineLayout(info.device, &pPipelineLayoutCreateInfo, NULL, &info.pipeline_layout); assert(res == VK_SUCCESS); init_renderpass( info, depthPresent, false); // Can't clear in renderpass load because we re-use pipeline init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, depthPresent); /* The binding and attributes should be the same for all 3 vertex buffers, * so init here */ info.vi_binding.binding = 0; info.vi_binding.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; info.vi_binding.stride = sizeof(triData[0]); info.vi_attribs[0].binding = 0; info.vi_attribs[0].location = 0; info.vi_attribs[0].format = VK_FORMAT_R32G32B32A32_SFLOAT; info.vi_attribs[0].offset = 0; info.vi_attribs[1].binding = 0; info.vi_attribs[1].location = 1; info.vi_attribs[1].format = VK_FORMAT_R32G32B32A32_SFLOAT; info.vi_attribs[1].offset = 16; init_pipeline_cache(info); init_pipeline(info, depthPresent); VkImageSubresourceRange srRange = {}; srRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; srRange.baseMipLevel = 0; srRange.levelCount = VK_REMAINING_MIP_LEVELS; srRange.baseArrayLayer = 0; srRange.layerCount = VK_REMAINING_ARRAY_LAYERS; VkClearColorValue clear_color[1]; clear_color[0].float32[0] = 0.2f; clear_color[0].float32[1] = 0.2f; clear_color[0].float32[2] = 0.2f; clear_color[0].float32[3] = 0.2f; /* We need to do the clear here instead of as a load op since all 3 threads * share the same pipeline / renderpass */ set_image_layout(info, info.buffers[info.current_buffer].image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); vkCmdClearColorImage(info.cmd, info.buffers[info.current_buffer].image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, clear_color, 1, &srRange); set_image_layout(info, info.buffers[info.current_buffer].image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); res = vkEndCommandBuffer(info.cmd); const VkCommandBuffer cmd_bufs[] = {info.cmd}; VkFence clearFence; init_fence(info, clearFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo submit_info[1] = {}; submit_info[0].pNext = NULL; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].waitSemaphoreCount = 1; submit_info[0].pWaitSemaphores = &info.presentCompleteSemaphore; submit_info[0].pWaitDstStageMask = NULL; submit_info[0].commandBufferCount = 1; submit_info[0].pCommandBuffers = cmd_bufs; submit_info[0].signalSemaphoreCount = 0; submit_info[0].pSignalSemaphores = NULL; /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, submit_info, clearFence); assert(!res); do { res = vkWaitForFences(info.device, 1, &clearFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); vkDestroyFence(info.device, clearFence, NULL); /* VULKAN_KEY_START */ /* Use the fourth slot in the command buffer array for the presentation */ /* barrier using the command buffer in info */ threadCmdBufs[3] = info.cmd; sample_platform_thread vk_threads[3]; for (size_t i = 0; i < 3; i++) { sample_platform_thread_create(&vk_threads[i], &per_thread_code, (void *)i); } VkCommandBufferBeginInfo cmd_buf_info = {}; cmd_buf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cmd_buf_info.pNext = NULL; cmd_buf_info.flags = 0; cmd_buf_info.pInheritanceInfo = NULL; res = vkBeginCommandBuffer(threadCmdBufs[3], &cmd_buf_info); assert(res == VK_SUCCESS); VkImageMemoryBarrier prePresentBarrier = {}; prePresentBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; prePresentBarrier.pNext = NULL; prePresentBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; prePresentBarrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT; prePresentBarrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; prePresentBarrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; prePresentBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; prePresentBarrier.subresourceRange.baseMipLevel = 0; prePresentBarrier.subresourceRange.levelCount = 1; prePresentBarrier.subresourceRange.baseArrayLayer = 0; prePresentBarrier.subresourceRange.layerCount = 1; prePresentBarrier.image = info.buffers[info.current_buffer].image; vkCmdPipelineBarrier(threadCmdBufs[3], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, NULL, 0, NULL, 1, &prePresentBarrier); res = vkEndCommandBuffer(threadCmdBufs[3]); assert(res == VK_SUCCESS); pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; submit_info[0].pNext = NULL; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].waitSemaphoreCount = 0; submit_info[0].pWaitSemaphores = NULL; submit_info[0].pWaitDstStageMask = &pipe_stage_flags; submit_info[0].commandBufferCount = 4; /* 3 from threads + prePresentBarrier */ submit_info[0].pCommandBuffers = threadCmdBufs; submit_info[0].signalSemaphoreCount = 0; submit_info[0].pSignalSemaphores = NULL; /* Wait for all of the threads to finish */ for (int i = 0; i < 3; i++) { sample_platform_thread_join(vk_threads[i], NULL); } VkFenceCreateInfo fenceInfo; VkFence drawFence; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = 0; vkCreateFence(info.device, &fenceInfo, NULL, &drawFence); /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, submit_info, drawFence); assert(!res); /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); execute_present_image(info); wait_seconds(1); /* VULKAN_KEY_END */ vkDestroyBuffer(info.device, vertex_buffer[0].buf, NULL); vkDestroyBuffer(info.device, vertex_buffer[1].buf, NULL); vkDestroyBuffer(info.device, vertex_buffer[2].buf, NULL); vkFreeMemory(info.device, vertex_buffer[0].mem, NULL); vkFreeMemory(info.device, vertex_buffer[1].mem, NULL); vkFreeMemory(info.device, vertex_buffer[2].mem, NULL); for (int i = 0; i < 3; i++) { vkFreeCommandBuffers(info.device, threadCmdPools[i], 1, &threadCmdBufs[i]); vkDestroyCommandPool(info.device, threadCmdPools[i], NULL); } vkDestroySemaphore(info.device, info.presentCompleteSemaphore, NULL); vkDestroyFence(info.device, drawFence, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); vkDestroyPipelineLayout(info.device, info.pipeline_layout, NULL); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_window(info); destroy_device(info); destroy_instance(info); return 0; }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "Copy/Blit Image"; VkImageCreateInfo image_info; VkImage bltSrcImage; VkImage bltDstImage; VkMemoryRequirements memReq; VkMemoryAllocateInfo memAllocInfo; VkDeviceMemory dmem; unsigned char *pImgMem; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 640, 640); init_connection(info); init_window(info); init_swapchain_extension(info); VkSurfaceCapabilitiesKHR surfCapabilities; res = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(info.gpus[0], info.surface, &surfCapabilities); if (!(surfCapabilities.supportedUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) { std::cout << "Surface cannot be destination of blit - abort \n"; exit(-1); } init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT); /* VULKAN_KEY_START */ VkFormatProperties formatProps; vkGetPhysicalDeviceFormatProperties(info.gpus[0], info.format, &formatProps); assert( (formatProps.linearTilingFeatures & VK_FORMAT_FEATURE_BLIT_SRC_BIT) && "Format cannot be used as transfer source"); VkSemaphore presentCompleteSemaphore; VkSemaphoreCreateInfo presentCompleteSemaphoreCreateInfo; presentCompleteSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; presentCompleteSemaphoreCreateInfo.pNext = NULL; presentCompleteSemaphoreCreateInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT; res = vkCreateSemaphore(info.device, &presentCompleteSemaphoreCreateInfo, NULL, &presentCompleteSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, presentCompleteSemaphore, VK_NULL_HANDLE, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); // Create an image, map it, and write some values to the image image_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_info.pNext = NULL; image_info.imageType = VK_IMAGE_TYPE_2D; image_info.format = info.format; image_info.extent.width = info.width; image_info.extent.height = info.height; image_info.extent.depth = 1; image_info.mipLevels = 1; image_info.arrayLayers = 1; image_info.samples = NUM_SAMPLES; image_info.queueFamilyIndexCount = 0; image_info.pQueueFamilyIndices = NULL; image_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; image_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_info.flags = 0; image_info.tiling = VK_IMAGE_TILING_LINEAR; image_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; res = vkCreateImage(info.device, &image_info, NULL, &bltSrcImage); memAllocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memAllocInfo.pNext = NULL; vkGetImageMemoryRequirements(info.device, bltSrcImage, &memReq); bool pass = memory_type_from_properties(info, memReq.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &memAllocInfo.memoryTypeIndex); assert(pass); memAllocInfo.allocationSize = memReq.size; res = vkAllocateMemory(info.device, &memAllocInfo, NULL, &dmem); res = vkBindImageMemory(info.device, bltSrcImage, dmem, 0); set_image_layout(info, bltSrcImage, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL); res = vkEndCommandBuffer(info.cmd); assert(res == VK_SUCCESS); VkFence cmdFence; init_fence(info, cmdFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo submit_info = {}; submit_info.pNext = NULL; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.waitSemaphoreCount = 1; submit_info.pWaitSemaphores = &presentCompleteSemaphore; submit_info.pWaitDstStageMask = &pipe_stage_flags; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &info.cmd; submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = NULL; /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, &submit_info, cmdFence); assert(res == VK_SUCCESS); /* Make sure command buffer is finished before mapping */ do { res = vkWaitForFences(info.device, 1, &cmdFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); vkDestroyFence(info.device, cmdFence, NULL); res = vkMapMemory(info.device, dmem, 0, memReq.size, 0, (void **)&pImgMem); // Checkerboard of 8x8 pixel squares for (int row = 0; row < info.height; row++) { for (int col = 0; col < info.width; col++) { unsigned char rgb = (((row & 0x8) == 0) ^ ((col & 0x8) == 0)) * 255; pImgMem[0] = rgb; pImgMem[1] = rgb; pImgMem[2] = rgb; pImgMem[3] = 255; pImgMem += 4; } } // Flush the mapped memory and then unmap it Assume it isn't coherent since // we didn't really confirm VkMappedMemoryRange memRange; memRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; memRange.pNext = NULL; memRange.memory = dmem; memRange.offset = 0; memRange.size = memReq.size; res = vkFlushMappedMemoryRanges(info.device, 1, &memRange); vkUnmapMemory(info.device, dmem); vkResetCommandBuffer(info.cmd, 0); execute_begin_command_buffer(info); set_image_layout(info, bltSrcImage, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); bltDstImage = info.buffers[info.current_buffer].image; // init_swap_chain will create the images as color attachment optimal // but we want transfer dst optimal set_image_layout(info, bltDstImage, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); // Do a 32x32 blit to all of the dst image - should get big squares VkImageBlit region; region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; region.srcSubresource.mipLevel = 0; region.srcSubresource.baseArrayLayer = 0; region.srcSubresource.layerCount = 1; region.srcOffsets[0].x = 0; region.srcOffsets[0].y = 0; region.srcOffsets[0].z = 0; region.srcOffsets[1].x = 32; region.srcOffsets[1].y = 32; region.srcOffsets[1].z = 1; region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; region.dstSubresource.mipLevel = 0; region.dstSubresource.baseArrayLayer = 0; region.dstSubresource.layerCount = 1; region.dstOffsets[0].x = 0; region.dstOffsets[0].y = 0; region.dstOffsets[0].z = 0; region.dstOffsets[1].x = info.width; region.dstOffsets[1].y = info.height; region.dstOffsets[1].z = 1; vkCmdBlitImage(info.cmd, bltSrcImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, bltDstImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion, VK_FILTER_LINEAR); // Do a image copy to part of the dst image - checks should stay small VkImageCopy cregion; cregion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; cregion.srcSubresource.mipLevel = 0; cregion.srcSubresource.baseArrayLayer = 0; cregion.srcSubresource.layerCount = 1; cregion.srcOffset.x = 0; cregion.srcOffset.y = 0; cregion.srcOffset.z = 0; cregion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; cregion.dstSubresource.mipLevel = 0; cregion.dstSubresource.baseArrayLayer = 0; cregion.dstSubresource.layerCount = 1; cregion.dstOffset.x = 256; cregion.dstOffset.y = 256; cregion.dstOffset.z = 0; cregion.extent.width = 128; cregion.extent.height = 128; cregion.extent.depth = 1; vkCmdCopyImage(info.cmd, bltSrcImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, bltDstImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &cregion); VkImageMemoryBarrier prePresentBarrier = {}; prePresentBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; prePresentBarrier.pNext = NULL; prePresentBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; prePresentBarrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT; prePresentBarrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; prePresentBarrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; prePresentBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; prePresentBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; prePresentBarrier.subresourceRange.baseMipLevel = 0; prePresentBarrier.subresourceRange.levelCount = 1; prePresentBarrier.subresourceRange.baseArrayLayer = 0; prePresentBarrier.subresourceRange.layerCount = 1; prePresentBarrier.image = info.buffers[info.current_buffer].image; vkCmdPipelineBarrier(info.cmd, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, NULL, 0, NULL, 1, &prePresentBarrier); res = vkEndCommandBuffer(info.cmd); VkFenceCreateInfo fenceInfo; VkFence drawFence; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = 0; vkCreateFence(info.device, &fenceInfo, NULL, &drawFence); submit_info.pNext = NULL; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.waitSemaphoreCount = 0; submit_info.pWaitSemaphores = NULL; submit_info.pWaitDstStageMask = NULL; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &info.cmd; submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = NULL; /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, &submit_info, drawFence); assert(res == VK_SUCCESS); res = vkQueueWaitIdle(info.queue); assert(res == VK_SUCCESS); /* Now present the image in the window */ VkPresentInfoKHR present; present.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR; present.pNext = NULL; present.swapchainCount = 1; present.pSwapchains = &info.swap_chain; present.pImageIndices = &info.current_buffer; present.pWaitSemaphores = NULL; present.waitSemaphoreCount = 0; present.pResults = NULL; /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); /* VULKAN_KEY_END */ if (info.save_images) write_ppm(info, "copyblitimage"); vkDestroySemaphore(info.device, presentCompleteSemaphore, NULL); vkDestroyFence(info.device, drawFence, NULL); vkDestroyImage(info.device, bltSrcImage, NULL); vkFreeMemory(info.device, dmem, NULL); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
int sample_main(int argc, char **argv) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "Memory Barriers"; process_command_line_args(info, argc, argv); init_global_layer_properties(info); info.instance_extension_names.push_back(VK_KHR_SURFACE_EXTENSION_NAME); #ifdef _WIN32 info.instance_extension_names.push_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME); #elif __ANDROID__ info.instance_extension_names.push_back(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME); #else info.instance_extension_names.push_back(VK_KHR_XCB_SURFACE_EXTENSION_NAME); #endif info.device_extension_names.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); init_instance(info, sample_title); init_enumerate_device(info); init_device(info); info.width = info.height = 500; init_connection(info); init_window(info); init_swapchain_extension(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT); // CmdClearColorImage is going to require usage of TRANSFER_DST, but // it's not clear which format feature maps to the required TRANSFER_DST usage, // BLIT_DST is a reasonable guess and it seems to work init_texture(info, nullptr, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_FORMAT_FEATURE_BLIT_DST_BIT); init_uniform_buffer(info); init_descriptor_and_pipeline_layouts(info, true); init_renderpass(info, DEPTH_PRESENT, false, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, DEPTH_PRESENT); init_vertex_buffer(info, vb_Data, sizeof(vb_Data), sizeof(vb_Data[0]), true); init_descriptor_pool(info, true); init_descriptor_set(info, true); init_pipeline_cache(info); init_pipeline(info, DEPTH_PRESENT); /* VULKAN_KEY_START */ VkImageSubresourceRange srRange = {}; srRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; srRange.baseMipLevel = 0; srRange.levelCount = VK_REMAINING_MIP_LEVELS; srRange.baseArrayLayer = 0; srRange.layerCount = VK_REMAINING_ARRAY_LAYERS; VkClearColorValue clear_color[1]; clear_color[0].float32[0] = 0.2f; clear_color[0].float32[1] = 0.2f; clear_color[0].float32[2] = 0.2f; clear_color[0].float32[3] = 0.2f; VkSemaphoreCreateInfo presentCompleteSemaphoreCreateInfo; presentCompleteSemaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; presentCompleteSemaphoreCreateInfo.pNext = NULL; presentCompleteSemaphoreCreateInfo.flags = 0; res = vkCreateSemaphore(info.device, &presentCompleteSemaphoreCreateInfo, NULL, &info.imageAcquiredSemaphore); assert(res == VK_SUCCESS); // Get the index of the next available swapchain image: res = vkAcquireNextImageKHR(info.device, info.swap_chain, UINT64_MAX, info.imageAcquiredSemaphore, VK_NULL_HANDLE, &info.current_buffer); // TODO: Deal with the VK_SUBOPTIMAL_KHR and VK_ERROR_OUT_OF_DATE_KHR // return codes assert(res == VK_SUCCESS); set_image_layout(info, info.buffers[info.current_buffer].image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT); // We need to do the clear here instead of using a renderpass load op since // we will use the same renderpass multiple times in the frame vkCmdClearColorImage(info.cmd, info.buffers[info.current_buffer].image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, clear_color, 1, &srRange); VkRenderPassBeginInfo rp_begin; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = info.render_pass; rp_begin.framebuffer = info.framebuffers[info.current_buffer]; rp_begin.renderArea.offset.x = 0; rp_begin.renderArea.offset.y = 0; rp_begin.renderArea.extent.width = info.width; rp_begin.renderArea.extent.height = info.height; rp_begin.clearValueCount = 0; rp_begin.pClearValues = NULL; // Draw a textured quad on the left side of the window vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 0, NULL); const VkDeviceSize offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, offsets); init_viewports(info); init_scissors(info); vkCmdDraw(info.cmd, 2 * 3, 1, 0, 0); // We can't do a clear inside a renderpass, so end this one and start another one // for the next draw vkCmdEndRenderPass(info.cmd); // Send a barrier to change the texture image's layout from SHADER_READ_ONLY // to COLOR_ATTACHMENT_GENERAL because we're going to clear it VkImageMemoryBarrier textureBarrier = {}; textureBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; textureBarrier.pNext = NULL; textureBarrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT; textureBarrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; textureBarrier.oldLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; textureBarrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; textureBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; textureBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; textureBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; textureBarrier.subresourceRange.baseMipLevel = 0; textureBarrier.subresourceRange.levelCount = 1; textureBarrier.subresourceRange.baseArrayLayer = 0; textureBarrier.subresourceRange.layerCount = 1; textureBarrier.image = info.textures[0].image; vkCmdPipelineBarrier(info.cmd, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, NULL, 0, NULL, 1, &textureBarrier); clear_color[0].float32[0] = 0.0f; clear_color[0].float32[1] = 1.0f; clear_color[0].float32[2] = 0.0f; clear_color[0].float32[3] = 1.0f; /* Clear texture to green */ vkCmdClearColorImage(info.cmd, info.textures[0].image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, clear_color, 1, &srRange); // Send a barrier to change the texture image's layout back to SHADER_READ_ONLY // because we're going to use it as a texture again textureBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; textureBarrier.pNext = NULL; textureBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; textureBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; textureBarrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; textureBarrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; textureBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; textureBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; textureBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; textureBarrier.subresourceRange.baseMipLevel = 0; textureBarrier.subresourceRange.levelCount = 1; textureBarrier.subresourceRange.baseArrayLayer = 0; textureBarrier.subresourceRange.layerCount = 1; textureBarrier.image = info.textures[0].image; vkCmdPipelineBarrier(info.cmd, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, NULL, 0, NULL, 1, &textureBarrier); // Draw the second quad to the right using the (now) green texture vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); // Draw starting with vertex index 6 to draw to the right of the first quad vkCmdDraw(info.cmd, 2 * 3, 1, 6, 0); vkCmdEndRenderPass(info.cmd); // Change the present buffer from COLOR_ATTACHMENT_OPTIMAL to // PRESENT_SOURCE_KHR // so it can be presented execute_pre_present_barrier(info); res = vkEndCommandBuffer(info.cmd); assert(res == VK_SUCCESS); VkSubmitInfo submit_info = {}; VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; init_submit_info(info, submit_info, pipe_stage_flags); assert(res == VK_SUCCESS); VkFence drawFence = {}; init_fence(info, drawFence); // Queue the command buffer for execution res = vkQueueSubmit(info.graphics_queue, 1, &submit_info, drawFence); assert(res == VK_SUCCESS); // Now present the image in the window VkPresentInfoKHR present{}; init_present_info(info, present); // Make sure command buffer is finished before presenting do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.present_queue, &present); assert(res == VK_SUCCESS); /* VULKAN_KEY_END */ wait_seconds(1); if (info.save_images) write_ppm(info, "memory_barriers"); vkDestroySemaphore(info.device, info.imageAcquiredSemaphore, NULL); vkDestroyFence(info.device, drawFence, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_textures(info); destroy_descriptor_pool(info); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_uniform_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_window(info); destroy_device(info); destroy_instance(info); return 0; }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "Draw Textured Cube"; const bool depthPresent = true; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_texture(info); init_uniform_buffer(info); init_descriptor_and_pipeline_layouts(info, true); init_renderpass(info, depthPresent); init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, depthPresent); init_vertex_buffer(info, g_vb_texture_Data, sizeof(g_vb_texture_Data), sizeof(g_vb_texture_Data[0]), true); init_descriptor_pool(info, true); init_descriptor_set(info, true); init_pipeline_cache(info); init_pipeline(info, depthPresent); init_presentable_image(info); VkClearValue clear_values[2]; init_clear_color_and_depth(info, clear_values); VkRenderPassBeginInfo rp_begin; init_render_pass_begin_info(info, rp_begin); rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 0, NULL); const VkDeviceSize offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, offsets); init_viewports(info); init_scissors(info); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); execute_pre_present_barrier(info); res = vkEndCommandBuffer(info.cmd); assert(res == VK_SUCCESS); VkFence drawFence = {}; init_fence(info, drawFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo submit_info = {}; init_submit_info(info, submit_info, pipe_stage_flags); /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, &submit_info, drawFence); assert(res == VK_SUCCESS); /* Now present the image in the window */ VkPresentInfoKHR present = {}; init_present_info(info, present); /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); if (info.save_images) write_ppm(info, "template"); vkDestroyFence(info.device, drawFence, NULL); vkDestroySemaphore(info.device, info.presentCompleteSemaphore, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_textures(info); destroy_descriptor_pool(info); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
int sample_main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "Pipeline Cache"; const bool depthPresent = true; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_texture(info, "blue.ppm"); init_uniform_buffer(info); init_descriptor_and_pipeline_layouts(info, true); init_renderpass(info, depthPresent); init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, depthPresent); init_vertex_buffer(info, g_vb_texture_Data, sizeof(g_vb_texture_Data), sizeof(g_vb_texture_Data[0]), true); init_descriptor_pool(info, true); init_descriptor_set(info, true); /* VULKAN_KEY_START */ // Check disk for existing cache data size_t startCacheSize = 0; void *startCacheData = nullptr; std::string directoryName = get_file_directory(); std::string readFileName = directoryName + "pipeline_cache_data.bin"; FILE *pReadFile = fopen(readFileName.c_str(), "rb"); if (pReadFile) { // Determine cache size fseek(pReadFile, 0, SEEK_END); startCacheSize = ftell(pReadFile); rewind(pReadFile); // Allocate memory to hold the initial cache data startCacheData = (char *)malloc(sizeof(char) * startCacheSize); if (startCacheData == nullptr) { fputs("Memory error", stderr); exit(EXIT_FAILURE); } // Read the data into our buffer size_t result = fread(startCacheData, 1, startCacheSize, pReadFile); if (result != startCacheSize) { fputs("Reading error", stderr); free(startCacheData); exit(EXIT_FAILURE); } // Clean up and print results fclose(pReadFile); printf(" Pipeline cache HIT!\n"); printf(" cacheData loaded from %s\n", readFileName.c_str()); } else { // No cache found on disk printf(" Pipeline cache miss!\n"); } if (startCacheData != nullptr) { // clang-format off // // Check for cache validity // // TODO: Update this as the spec evolves. The fields are not defined by the header. // // The code below supports SDK 0.10 Vulkan spec, which contains the following table: // // Offset Size Meaning // ------ ------------ ------------------------------------------------------------------ // 0 4 a device ID equal to VkPhysicalDeviceProperties::DeviceId written // as a stream of bytes, with the least significant byte first // // 4 VK_UUID_SIZE a pipeline cache ID equal to VkPhysicalDeviceProperties::pipelineCacheUUID // // // The code must be updated for latest Vulkan spec, which contains the following table: // // Offset Size Meaning // ------ ------------ ------------------------------------------------------------------ // 0 4 length in bytes of the entire pipeline cache header written as a // stream of bytes, with the least significant byte first // 4 4 a VkPipelineCacheHeaderVersion value written as a stream of bytes, // with the least significant byte first // 8 4 a vendor ID equal to VkPhysicalDeviceProperties::vendorID written // as a stream of bytes, with the least significant byte first // 12 4 a device ID equal to VkPhysicalDeviceProperties::deviceID written // as a stream of bytes, with the least significant byte first // 16 VK_UUID_SIZE a pipeline cache ID equal to VkPhysicalDeviceProperties::pipelineCacheUUID // // clang-format on uint32_t headerLength = 0; uint32_t cacheHeaderVersion = 0; uint32_t vendorID = 0; uint32_t deviceID = 0; uint8_t pipelineCacheUUID[VK_UUID_SIZE] = {}; memcpy(&headerLength, (uint8_t *)startCacheData + 0, 4); memcpy(&cacheHeaderVersion, (uint8_t *)startCacheData + 4, 4); memcpy(&vendorID, (uint8_t *)startCacheData + 8, 4); memcpy(&deviceID, (uint8_t *)startCacheData + 12, 4); memcpy(pipelineCacheUUID, (uint8_t *)startCacheData + 16, VK_UUID_SIZE); // Check each field and report bad values before freeing existing cache bool badCache = false; if (headerLength <= 0) { badCache = true; printf(" Bad header length in %s.\n", readFileName.c_str()); printf(" Cache contains: 0x%.8x\n", headerLength); } if (cacheHeaderVersion != VK_PIPELINE_CACHE_HEADER_VERSION_ONE) { badCache = true; printf(" Unsupported cache header version in %s.\n", readFileName.c_str()); printf(" Cache contains: 0x%.8x\n", cacheHeaderVersion); } if (vendorID != info.gpu_props.vendorID) { badCache = true; printf(" Vendor ID mismatch in %s.\n", readFileName.c_str()); printf(" Cache contains: 0x%.8x\n", vendorID); printf(" Driver expects: 0x%.8x\n", info.gpu_props.vendorID); } if (deviceID != info.gpu_props.deviceID) { badCache = true; printf(" Device ID mismatch in %s.\n", readFileName.c_str()); printf(" Cache contains: 0x%.8x\n", deviceID); printf(" Driver expects: 0x%.8x\n", info.gpu_props.deviceID); } if (memcmp(pipelineCacheUUID, info.gpu_props.pipelineCacheUUID, sizeof(pipelineCacheUUID)) != 0) { badCache = true; printf(" UUID mismatch in %s.\n", readFileName.c_str()); printf(" Cache contains: "); print_UUID(pipelineCacheUUID); printf("\n"); printf(" Driver expects: "); print_UUID(info.gpu_props.pipelineCacheUUID); printf("\n"); } if (badCache) { // Don't submit initial cache data if any version info is incorrect free(startCacheData); startCacheSize = 0; startCacheData = nullptr; // And clear out the old cache file for use in next run printf(" Deleting cache entry %s to repopulate.\n", readFileName.c_str()); if (remove(readFileName.c_str()) != 0) { fputs("Reading error", stderr); exit(EXIT_FAILURE); } } } // Feed the initial cache data into pipeline creation VkPipelineCacheCreateInfo pipelineCache; pipelineCache.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; pipelineCache.pNext = NULL; pipelineCache.initialDataSize = startCacheSize; pipelineCache.pInitialData = startCacheData; pipelineCache.flags = 0; res = vkCreatePipelineCache(info.device, &pipelineCache, nullptr, &info.pipelineCache); assert(res == VK_SUCCESS); // Free our initialData now that pipeline has been created free(startCacheData); // Time (roughly) taken to create the graphics pipeline timestamp_t start = get_milliseconds(); init_pipeline(info, depthPresent); timestamp_t elapsed = get_milliseconds() - start; printf(" vkCreateGraphicsPipeline time: %0.f ms\n", (double)elapsed); // Begin standard draw stuff init_presentable_image(info); VkClearValue clear_values[2]; init_clear_color_and_depth(info, clear_values); VkRenderPassBeginInfo rp_begin; init_render_pass_begin_info(info, rp_begin); rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 0, NULL); const VkDeviceSize offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, offsets); init_viewports(info); init_scissors(info); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); execute_pre_present_barrier(info); res = vkEndCommandBuffer(info.cmd); assert(res == VK_SUCCESS); VkFence drawFence = {}; init_fence(info, drawFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo submit_info = {}; init_submit_info(info, submit_info, pipe_stage_flags); /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, &submit_info, drawFence); assert(res == VK_SUCCESS); /* Now present the image in the window */ VkPresentInfoKHR present = {}; init_present_info(info, present); /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); if (info.save_images) write_ppm(info, "pipeline_cache"); // End standard draw stuff if (startCacheData) { // TODO: Create another pipeline, preferably different from the first // one and merge it here. Then store the merged one. } // Store away the cache that we've populated. This could conceivably happen // earlier, depends on when the pipeline cache stops being populated // internally. size_t endCacheSize = 0; void *endCacheData = nullptr; // Call with nullptr to get cache size res = vkGetPipelineCacheData(info.device, info.pipelineCache, &endCacheSize, nullptr); assert(res == VK_SUCCESS); // Allocate memory to hold the populated cache data endCacheData = (char *)malloc(sizeof(char) * endCacheSize); if (!endCacheData) { fputs("Memory error", stderr); exit(EXIT_FAILURE); } // Call again with pointer to buffer res = vkGetPipelineCacheData(info.device, info.pipelineCache, &endCacheSize, endCacheData); assert(res == VK_SUCCESS); // Write the file to disk, overwriting whatever was there FILE *pWriteFile; std::string writeFileName = directoryName + "pipeline_cache_data.bin"; pWriteFile = fopen(writeFileName.c_str(), "wb"); if (pWriteFile) { fwrite(endCacheData, sizeof(char), endCacheSize, pWriteFile); fclose(pWriteFile); printf(" cacheData written to %s\n", writeFileName.c_str()); } else { // Something bad happened printf(" Unable to write cache data to disk!\n"); } /* VULKAN_KEY_END */ vkDestroyFence(info.device, drawFence, NULL); vkDestroySemaphore(info.device, info.presentCompleteSemaphore, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_textures(info); destroy_descriptor_pool(info); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
// clang-format on int main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "SPIR-V Assembly"; const bool depthPresent = true; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_texture(info); init_uniform_buffer(info); init_descriptor_and_pipeline_layouts(info, true); init_renderpass(info, depthPresent); /* VULKAN_KEY_START */ // Init the assembler context spv_context spvContext = spvContextCreate(); // Convert the vertex assembly into binary format spv_binary vertexBinary = {}; spv_diagnostic vertexDiag = {}; spv_result_t vertexResult = spvTextToBinary(spvContext, vertexSPIRV.c_str(), vertexSPIRV.length(), &vertexBinary, &vertexDiag); if (vertexDiag) { printf("Diagnostic info from vertex shader:\n"); spvDiagnosticPrint(vertexDiag); } assert(vertexResult == SPV_SUCCESS); // Convert the fragment assembly into binary format spv_binary fragmentBinary = {}; spv_diagnostic fragmentDiag = {}; spv_result_t fragmentResult = spvTextToBinary(spvContext, fragmentSPIRV.c_str(), fragmentSPIRV.length(), &fragmentBinary, &fragmentDiag); if (fragmentDiag) { printf("Diagnostic info from fragment shader:\n"); spvDiagnosticPrint(fragmentDiag); } assert(fragmentResult == SPV_SUCCESS); info.shaderStages[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; info.shaderStages[0].pNext = NULL; info.shaderStages[0].pSpecializationInfo = NULL; info.shaderStages[0].flags = 0; info.shaderStages[0].stage = VK_SHADER_STAGE_VERTEX_BIT; info.shaderStages[0].pName = "main"; VkShaderModuleCreateInfo moduleCreateInfo; moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; moduleCreateInfo.pNext = NULL; moduleCreateInfo.flags = 0; // Use wordCount and code pointers from the spv_binary moduleCreateInfo.codeSize = vertexBinary->wordCount * sizeof(unsigned int); moduleCreateInfo.pCode = vertexBinary->code; res = vkCreateShaderModule(info.device, &moduleCreateInfo, NULL, &info.shaderStages[0].module); assert(res == VK_SUCCESS); info.shaderStages[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; info.shaderStages[1].pNext = NULL; info.shaderStages[1].pSpecializationInfo = NULL; info.shaderStages[1].flags = 0; info.shaderStages[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT; info.shaderStages[1].pName = "main"; moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; moduleCreateInfo.pNext = NULL; moduleCreateInfo.flags = 0; // Use wordCount and code pointers from the spv_binary moduleCreateInfo.codeSize = fragmentBinary->wordCount * sizeof(unsigned int); moduleCreateInfo.pCode = fragmentBinary->code; res = vkCreateShaderModule(info.device, &moduleCreateInfo, NULL, &info.shaderStages[1].module); assert(res == VK_SUCCESS); // Clean up the diagnostics spvDiagnosticDestroy(vertexDiag); spvDiagnosticDestroy(fragmentDiag); // Clean up the assembler context spvContextDestroy(spvContext); /* VULKAN_KEY_END */ init_framebuffers(info, depthPresent); init_vertex_buffer(info, g_vb_texture_Data, sizeof(g_vb_texture_Data), sizeof(g_vb_texture_Data[0]), true); init_descriptor_pool(info, true); init_descriptor_set(info, true); init_pipeline_cache(info); init_pipeline(info, depthPresent); init_presentable_image(info); VkClearValue clear_values[2]; init_clear_color_and_depth(info, clear_values); VkRenderPassBeginInfo rp_begin; init_render_pass_begin_info(info, rp_begin); rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 0, NULL); const VkDeviceSize offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, offsets); init_viewports(info); init_scissors(info); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); execute_pre_present_barrier(info); res = vkEndCommandBuffer(info.cmd); assert(res == VK_SUCCESS); VkFence drawFence = {}; init_fence(info, drawFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo submit_info = {}; init_submit_info(info, submit_info, pipe_stage_flags); /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, &submit_info, drawFence); assert(res == VK_SUCCESS); /* Now present the image in the window */ VkPresentInfoKHR present = {}; init_present_info(info, present); /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); if (info.save_images) write_ppm(info, "spirv_assembly"); vkDestroyFence(info.device, drawFence, NULL); vkDestroySemaphore(info.device, info.presentCompleteSemaphore, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_textures(info); destroy_descriptor_pool(info); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }
int main(int argc, char *argv[]) { VkResult U_ASSERT_ONLY res; struct sample_info info = {}; char sample_title[] = "Pipeline Derivative"; const bool depthPresent = true; process_command_line_args(info, argc, argv); init_global_layer_properties(info); init_instance_extension_names(info); init_device_extension_names(info); init_instance(info, sample_title); init_enumerate_device(info); init_window_size(info, 500, 500); init_connection(info); init_window(info); init_swapchain_extension(info); init_device(info); init_command_pool(info); init_command_buffer(info); execute_begin_command_buffer(info); init_device_queue(info); init_swap_chain(info); init_depth_buffer(info); init_texture(info); init_uniform_buffer(info); init_descriptor_and_pipeline_layouts(info, true); init_renderpass(info, depthPresent); init_shaders(info, vertShaderText, fragShaderText); init_framebuffers(info, depthPresent); init_vertex_buffer(info, g_vb_texture_Data, sizeof(g_vb_texture_Data), sizeof(g_vb_texture_Data[0]), true); init_descriptor_pool(info, true); init_descriptor_set(info, true); init_pipeline_cache(info); /* VULKAN_KEY_START */ // // Create two pipelines. // // First pipeline is the same as that generated by init_pipeline(), // but with VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT set. // // Second pipeline has a modified fragment shader and sets the // VK_PIPELINE_CREATE_DERIVATIVE_BIT flag. // bool include_depth = true; bool include_vi = true; VkDynamicState dynamicStateEnables[VK_DYNAMIC_STATE_RANGE_SIZE]; VkPipelineDynamicStateCreateInfo dynamicState = {}; memset(dynamicStateEnables, 0, sizeof dynamicStateEnables); dynamicState.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; dynamicState.pNext = NULL; dynamicState.pDynamicStates = dynamicStateEnables; dynamicState.dynamicStateCount = 0; VkPipelineVertexInputStateCreateInfo vi; vi.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; vi.pNext = NULL; vi.flags = 0; vi.vertexBindingDescriptionCount = 1; vi.pVertexBindingDescriptions = &info.vi_binding; vi.vertexAttributeDescriptionCount = 2; vi.pVertexAttributeDescriptions = info.vi_attribs; VkPipelineInputAssemblyStateCreateInfo ia; ia.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; ia.pNext = NULL; ia.flags = 0; ia.primitiveRestartEnable = VK_FALSE; ia.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; VkPipelineRasterizationStateCreateInfo rs; rs.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rs.pNext = NULL; rs.flags = 0; rs.polygonMode = VK_POLYGON_MODE_FILL; rs.cullMode = VK_CULL_MODE_BACK_BIT; rs.frontFace = VK_FRONT_FACE_CLOCKWISE; rs.depthClampEnable = include_depth; rs.rasterizerDiscardEnable = VK_FALSE; rs.depthBiasEnable = VK_FALSE; rs.depthBiasConstantFactor = 0; rs.depthBiasClamp = 0; rs.depthBiasSlopeFactor = 0; rs.lineWidth = 0; VkPipelineColorBlendStateCreateInfo cb; cb.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; cb.flags = 0; cb.pNext = NULL; VkPipelineColorBlendAttachmentState att_state[1]; att_state[0].colorWriteMask = 0xf; att_state[0].blendEnable = VK_FALSE; att_state[0].alphaBlendOp = VK_BLEND_OP_ADD; att_state[0].colorBlendOp = VK_BLEND_OP_ADD; att_state[0].srcColorBlendFactor = VK_BLEND_FACTOR_ZERO; att_state[0].dstColorBlendFactor = VK_BLEND_FACTOR_ZERO; att_state[0].srcAlphaBlendFactor = VK_BLEND_FACTOR_ZERO; att_state[0].dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO; cb.attachmentCount = 1; cb.pAttachments = att_state; cb.logicOpEnable = VK_FALSE; cb.logicOp = VK_LOGIC_OP_NO_OP; cb.blendConstants[0] = 1.0f; cb.blendConstants[1] = 1.0f; cb.blendConstants[2] = 1.0f; cb.blendConstants[3] = 1.0f; VkPipelineViewportStateCreateInfo vp = {}; vp.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; vp.pNext = NULL; vp.flags = 0; vp.viewportCount = NUM_VIEWPORTS; dynamicStateEnables[dynamicState.dynamicStateCount++] = VK_DYNAMIC_STATE_VIEWPORT; vp.scissorCount = NUM_SCISSORS; dynamicStateEnables[dynamicState.dynamicStateCount++] = VK_DYNAMIC_STATE_SCISSOR; vp.pScissors = NULL; vp.pViewports = NULL; VkPipelineDepthStencilStateCreateInfo ds; ds.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO; ds.pNext = NULL; ds.flags = 0; ds.depthTestEnable = include_depth; ds.depthWriteEnable = include_depth; ds.depthCompareOp = VK_COMPARE_OP_LESS_OR_EQUAL; ds.depthBoundsTestEnable = VK_FALSE; ds.stencilTestEnable = VK_FALSE; ds.back.failOp = VK_STENCIL_OP_KEEP; ds.back.passOp = VK_STENCIL_OP_KEEP; ds.back.compareOp = VK_COMPARE_OP_ALWAYS; ds.back.compareMask = 0; ds.back.reference = 0; ds.back.depthFailOp = VK_STENCIL_OP_KEEP; ds.back.writeMask = 0; ds.minDepthBounds = 0; ds.maxDepthBounds = 0; ds.stencilTestEnable = VK_FALSE; ds.front = ds.back; VkPipelineMultisampleStateCreateInfo ms; ms.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; ms.pNext = NULL; ms.flags = 0; ms.pSampleMask = NULL; ms.rasterizationSamples = NUM_SAMPLES; ms.sampleShadingEnable = VK_FALSE; ms.alphaToCoverageEnable = VK_FALSE; ms.alphaToOneEnable = VK_FALSE; ms.minSampleShading = 0.0; VkGraphicsPipelineCreateInfo pipeline; pipeline.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; pipeline.pNext = NULL; pipeline.layout = info.pipeline_layout; pipeline.basePipelineHandle = VK_NULL_HANDLE; pipeline.basePipelineIndex = 0; // Specify that we will be creating a derivative of this pipeline. pipeline.flags = VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT; pipeline.pVertexInputState = include_vi ? &vi : NULL; pipeline.pInputAssemblyState = &ia; pipeline.pRasterizationState = &rs; pipeline.pColorBlendState = &cb; pipeline.pTessellationState = NULL; pipeline.pMultisampleState = &ms; pipeline.pDynamicState = &dynamicState; pipeline.pViewportState = &vp; pipeline.pDepthStencilState = &ds; pipeline.pStages = info.shaderStages; pipeline.stageCount = 2; pipeline.renderPass = info.render_pass; pipeline.subpass = 0; // Create the base pipeline without storing it in the info struct // NOTE: If desired, we can add timing info around pipeline creation to // demonstrate any perf benefits to derivation. VkPipeline basePipeline; res = vkCreateGraphicsPipelines(info.device, info.pipelineCache, 1, &pipeline, NULL, &basePipeline); assert(res == VK_SUCCESS); // Now create the derivative pipeline, using a different fragment shader // This shader will shade the cube faces with interpolated colors // NOTE: If this step is too heavyweight to show any benefit of derivation, // then // create a pipeline that differs in some other, simpler way. const char *fragShaderText2 = "#version 450\n" "layout (location = 0) in vec2 texcoord;\n" "layout (location = 0) out vec4 outColor;\n" "void main() {\n" " outColor = vec4(texcoord.x, texcoord.y, " "1.0 - texcoord.x - texcoord.y, 1.0f);\n" "}\n"; // Convert GLSL to SPIR-V init_glslang(); std::vector<unsigned int> fragSpv; bool U_ASSERT_ONLY retVal = GLSLtoSPV(VK_SHADER_STAGE_FRAGMENT_BIT, fragShaderText2, fragSpv); assert(retVal); finalize_glslang(); // Replace the module entry of info.shaderStages to change the fragment // shader vkDestroyShaderModule(info.device, info.shaderStages[1].module, NULL); VkShaderModuleCreateInfo moduleCreateInfo = {}; moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; moduleCreateInfo.pNext = NULL; moduleCreateInfo.flags = 0; moduleCreateInfo.codeSize = fragSpv.size() * sizeof(unsigned int); moduleCreateInfo.pCode = fragSpv.data(); res = vkCreateShaderModule(info.device, &moduleCreateInfo, NULL, &info.shaderStages[1].module); assert(res == VK_SUCCESS); // Modify pipeline info to reflect derivation pipeline.flags = VK_PIPELINE_CREATE_DERIVATIVE_BIT; pipeline.basePipelineHandle = basePipeline; pipeline.basePipelineIndex = -1; // And create the derived pipeline, assigning to info.pipeline for use by // later helpers res = vkCreateGraphicsPipelines(info.device, info.pipelineCache, 1, &pipeline, NULL, &info.pipeline); assert(res == VK_SUCCESS); /* VULKAN_KEY_END */ init_presentable_image(info); VkClearValue clear_values[2]; init_clear_color_and_depth(info, clear_values); VkRenderPassBeginInfo rp_begin; init_render_pass_begin_info(info, rp_begin); rp_begin.clearValueCount = 2; rp_begin.pClearValues = clear_values; vkCmdBeginRenderPass(info.cmd, &rp_begin, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline); vkCmdBindDescriptorSets(info.cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, info.pipeline_layout, 0, NUM_DESCRIPTOR_SETS, info.desc_set.data(), 0, NULL); const VkDeviceSize offsets[1] = {0}; vkCmdBindVertexBuffers(info.cmd, 0, 1, &info.vertex_buffer.buf, offsets); init_viewports(info); init_scissors(info); vkCmdDraw(info.cmd, 12 * 3, 1, 0, 0); vkCmdEndRenderPass(info.cmd); execute_pre_present_barrier(info); res = vkEndCommandBuffer(info.cmd); assert(res == VK_SUCCESS); VkFence drawFence = {}; init_fence(info, drawFence); VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo submit_info = {}; init_submit_info(info, submit_info, pipe_stage_flags); /* Queue the command buffer for execution */ res = vkQueueSubmit(info.queue, 1, &submit_info, drawFence); assert(res == VK_SUCCESS); /* Now present the image in the window */ VkPresentInfoKHR present = {}; init_present_info(info, present); /* Make sure command buffer is finished before presenting */ do { res = vkWaitForFences(info.device, 1, &drawFence, VK_TRUE, FENCE_TIMEOUT); } while (res == VK_TIMEOUT); assert(res == VK_SUCCESS); res = vkQueuePresentKHR(info.queue, &present); assert(res == VK_SUCCESS); wait_seconds(1); if (info.save_images) write_ppm(info, "pipeline_derivative"); vkDestroyFence(info.device, drawFence, NULL); vkDestroySemaphore(info.device, info.presentCompleteSemaphore, NULL); vkDestroyPipeline(info.device, basePipeline, NULL); destroy_pipeline(info); destroy_pipeline_cache(info); destroy_textures(info); destroy_descriptor_pool(info); destroy_vertex_buffer(info); destroy_framebuffers(info); destroy_shaders(info); destroy_renderpass(info); destroy_descriptor_and_pipeline_layouts(info); destroy_uniform_buffer(info); destroy_depth_buffer(info); destroy_swap_chain(info); destroy_command_buffer(info); destroy_command_pool(info); destroy_device(info); destroy_window(info); destroy_instance(info); return 0; }