void StagingTexture2DBuffer::CopyFromImage(VkCommandBuffer command_buffer, VkImage image, VkImageAspectFlags src_aspect, u32 x, u32 y, u32 width, u32 height, u32 level, u32 layer) { // Issue the image->buffer copy. VkBufferImageCopy image_copy = { 0, // VkDeviceSize bufferOffset m_width, // uint32_t bufferRowLength 0, // uint32_t bufferImageHeight {src_aspect, level, layer, 1}, // VkImageSubresourceLayers imageSubresource {static_cast<s32>(x), static_cast<s32>(y), 0}, // VkOffset3D imageOffset {width, height, 1} // VkExtent3D imageExtent }; vkCmdCopyImageToBuffer(command_buffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_buffer, 1, &image_copy); // Ensure the write has completed. VkDeviceSize copy_size = m_row_stride * height; Util::BufferMemoryBarrier(command_buffer, m_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, 0, copy_size, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT); // If we're still mapped, invalidate the mapped range if (m_map_pointer && !m_coherent) { VkMappedMemoryRange range = {VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, nullptr, m_memory, m_map_offset, m_map_size}; vkInvalidateMappedMemoryRanges(g_vulkan_context->GetDevice(), 1, &range); } }
void Image::copyImageToBuffer(const VkCommandBuffer cmdBuffer, IBufferSP& targetBuffer, const VkBufferImageCopy& bufferImageCopy) { if (!targetBuffer.get()) { return; } VkImageLayout sourceImageLayout = getImageLayout(bufferImageCopy.imageSubresource.mipLevel, bufferImageCopy.imageSubresource.baseArrayLayer); VkAccessFlags sourceAccessMask = getAccessMask(bufferImageCopy.imageSubresource.mipLevel, bufferImageCopy.imageSubresource.baseArrayLayer); VkAccessFlags targetAccessMask = targetBuffer->getAccessMask(); // Prepare source image for copy. VkImageSubresourceRange imageSubresourceRange = {bufferImageCopy.imageSubresource.aspectMask, bufferImageCopy.imageSubresource.mipLevel, 1, bufferImageCopy.imageSubresource.baseArrayLayer, bufferImageCopy.imageSubresource.layerCount}; cmdPipelineBarrier(cmdBuffer, VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, imageSubresourceRange); // Prepare target buffer for copy. targetBuffer->cmdPipelineBarrier(cmdBuffer, VK_ACCESS_TRANSFER_WRITE_BIT); // Copy image by command. vkCmdCopyImageToBuffer(cmdBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, targetBuffer->getBuffer(), 1, &bufferImageCopy); // Revert back. targetBuffer->cmdPipelineBarrier(cmdBuffer, targetAccessMask); // Revert back. cmdPipelineBarrier(cmdBuffer, sourceAccessMask, sourceImageLayout, imageSubresourceRange); }
void StagingTexture2D::CopyFromImage(VkCommandBuffer command_buffer, VkImage image, VkImageAspectFlags src_aspect, u32 x, u32 y, u32 width, u32 height, u32 level, u32 layer) { u32 block_width = Util::GetBlockWidth(m_format); u32 block_x = (x + block_width - 1) / block_width; u32 block_y = (y + block_width - 1) / block_width; // Issue the image->buffer copy. VkBufferImageCopy image_copy = { block_y * m_row_stride + block_x * m_texel_size,// VkDeviceSize bufferOffset m_width, // uint32_t bufferRowLength 0, // uint32_t bufferImageHeight {src_aspect, level, layer, 1}, // VkImageSubresourceLayers imageSubresource {static_cast<s32>(x), static_cast<s32>(y), 0}, // VkOffset3D imageOffset {width, height, 1} // VkExtent3D imageExtent }; vkCmdCopyImageToBuffer(command_buffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_buffer, 1, &image_copy); // Flush CPU and GPU caches if not coherent mapping. VkDeviceSize buffer_flush_offset = y * m_row_stride; VkDeviceSize buffer_flush_size = height * m_row_stride; FlushGPUCache(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, buffer_flush_offset, buffer_flush_size); InvalidateCPUCache(buffer_flush_offset, buffer_flush_size); }
bool NvGLFWContextVK::readFramebufferRGBX32(uint8_t *dest, int32_t& w, int32_t& h) { NvVkRenderTarget& rt = *mainRenderTarget(); w = rt.width(); h = rt.height(); if (!dest) return true; VkFormat format = rt.targetFormat(); if (format == VK_FORMAT_R8G8B8A8_UNORM) { uint32_t size = 4 * w * h; VkBufferImageCopy region; region.bufferOffset = 0; region.bufferRowLength = 0; region.bufferImageHeight = 0; region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; region.imageSubresource.baseArrayLayer = 0; region.imageSubresource.layerCount = 1; region.imageSubresource.mipLevel = 0; region.imageOffset = { 0, 0, 0 }; region.imageExtent = { (uint32_t)w, (uint32_t)h, 1 }; NvVkBuffer dstBuffer; VkResult result = createAndFillBuffer(size, VK_BUFFER_USAGE_FLAG_BITS_MAX_ENUM, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, dstBuffer); VkCommandBuffer cmd = beginTempCmdBuffer(); vkCmdCopyImageToBuffer(cmd, rt.image(), VK_IMAGE_LAYOUT_GENERAL, dstBuffer.buffer, 1, ®ion); result = doneWithTempCmdBufferSubmit(cmd); vkDeviceWaitIdle(device()); uint8_t* ptr = NULL; result = vkMapMemory(device(), dstBuffer.mem, 0, size, 0, (void**)&ptr); uint32_t rowSize = w * 4; ptr += rowSize * (h - 1); for (int32_t i = 0; i < h; i++) { memcpy(dest, ptr, rowSize); dest += rowSize; ptr -= rowSize; } return true; } return false; }
void VulkanImage::copy(VulkanTransferBuffer* cb, VulkanBuffer* destination, const VkExtent3D& extent, const VkImageSubresourceLayers& range, VkImageLayout layout) { VkBufferImageCopy region; region.bufferRowLength = destination->getRowPitch(); region.bufferImageHeight = destination->getSliceHeight(); region.bufferOffset = 0; region.imageOffset.x = 0; region.imageOffset.y = 0; region.imageOffset.z = 0; region.imageExtent = extent; region.imageSubresource = range; vkCmdCopyImageToBuffer(cb->getCB()->getHandle(), mImage, layout, destination->getHandle(), 1, ®ion); }
std::vector<uint8> CopyContext::readTextureSubresource(const Texture* pTexture, uint32_t subresourceIndex) { mCommandsPending = true; VkBufferImageCopy vkCopy; Buffer::SharedPtr pStaging; size_t dataSize; initTexAccessParams(pTexture, subresourceIndex, vkCopy, pStaging, nullptr, dataSize); // Execute the copy resourceBarrier(pTexture, Resource::State::CopySource); resourceBarrier(pStaging.get(), Resource::State::CopyDest); vkCmdCopyImageToBuffer(mpLowLevelData->getCommandList(), pTexture->getApiHandle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, pStaging->getApiHandle(), 1, &vkCopy); flush(true); // Map and read the results std::vector<uint8> result(dataSize); uint8* pData = reinterpret_cast<uint8*>(pStaging->map(Buffer::MapType::Read)); memcpy_s(result.data(), result.size(), pData, dataSize); return result; }
int main(void) { VkInstance instance; { const char debug_ext[] = "VK_EXT_debug_report"; const char* extensions[] = {debug_ext,}; const char validation_layer[] = "VK_LAYER_LUNARG_standard_validation"; const char* layers[] = {validation_layer,}; VkInstanceCreateInfo create_info = { .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, .pNext = NULL, .flags = 0, .pApplicationInfo = NULL, .enabledLayerCount = NELEMS(layers), .ppEnabledLayerNames = layers, .enabledExtensionCount = NELEMS(extensions), .ppEnabledExtensionNames = extensions, }; assert(vkCreateInstance(&create_info, NULL, &instance) == VK_SUCCESS); } VkDebugReportCallbackEXT debug_callback; { VkDebugReportCallbackCreateInfoEXT create_info = { .sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT, .pNext = NULL, .flags = (VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT | VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT), .pfnCallback = &debugReportCallback, .pUserData = NULL, }; PFN_vkCreateDebugReportCallbackEXT createDebugReportCallback = (PFN_vkCreateDebugReportCallbackEXT) vkGetInstanceProcAddr(instance, "vkCreateDebugReportCallbackEXT"); assert(createDebugReportCallback); assert(createDebugReportCallback(instance, &create_info, NULL, &debug_callback) == VK_SUCCESS); } VkPhysicalDevice phy_device; { uint32_t num_devices; assert(vkEnumeratePhysicalDevices(instance, &num_devices, NULL) == VK_SUCCESS); assert(num_devices >= 1); VkPhysicalDevice * phy_devices = malloc(sizeof(*phy_devices) * num_devices); assert(vkEnumeratePhysicalDevices(instance, &num_devices, phy_devices) == VK_SUCCESS); phy_device = phy_devices[0]; free(phy_devices); } VkPhysicalDeviceMemoryProperties memory_properties; vkGetPhysicalDeviceMemoryProperties(phy_device, &memory_properties); VkDevice device; { float queue_priorities[] = {1.0}; const char validation_layer[] = "VK_LAYER_LUNARG_standard_validation"; const char* layers[] = {validation_layer,}; uint32_t nqueues; matchingQueues(phy_device, VK_QUEUE_GRAPHICS_BIT, &nqueues, NULL); assert(nqueues > 0); uint32_t * queue_family_idxs = malloc(sizeof(*queue_family_idxs) * nqueues); matchingQueues(phy_device, VK_QUEUE_GRAPHICS_BIT, &nqueues, queue_family_idxs); VkDeviceQueueCreateInfo queue_info = { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, .pNext = NULL, .flags = 0, .queueFamilyIndex = queue_family_idxs[0], .queueCount = 1, .pQueuePriorities = queue_priorities, }; free(queue_family_idxs); VkPhysicalDeviceFeatures features = { .geometryShader = VK_TRUE, .fillModeNonSolid = VK_TRUE, }; VkDeviceCreateInfo create_info = { .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, .pNext = NULL, .flags = 0, .queueCreateInfoCount = 1, .pQueueCreateInfos = &queue_info, .enabledLayerCount = NELEMS(layers), .ppEnabledLayerNames = layers, .enabledExtensionCount = 0, .ppEnabledExtensionNames = NULL, .pEnabledFeatures = &features, }; assert(vkCreateDevice(phy_device, &create_info, NULL, &device) == VK_SUCCESS); } VkQueue queue; vkGetDeviceQueue(device, 0, 0, &queue); VkCommandPool cmd_pool; { VkCommandPoolCreateInfo create_info = { .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, .pNext = NULL, .flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, .queueFamilyIndex = 0, }; assert(vkCreateCommandPool(device, &create_info, NULL, &cmd_pool) == VK_SUCCESS); } VkRenderPass render_pass; { VkAttachmentDescription attachments[] = {{ .flags = 0, .format = VK_FORMAT_R8G8B8A8_UNORM, .samples = VK_SAMPLE_COUNT_8_BIT, .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR, .storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE, .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE, .stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE, .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, .finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, }, { .flags = 0, .format = VK_FORMAT_D16_UNORM, .samples = VK_SAMPLE_COUNT_8_BIT, .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR, .storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE, .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE, .stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE, .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, .finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, }, { .flags = 0, .format = VK_FORMAT_R8G8B8A8_UNORM, .samples = VK_SAMPLE_COUNT_1_BIT, .loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE, .storeOp = VK_ATTACHMENT_STORE_OP_STORE, .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE, .stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE, .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, .finalLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, }}; VkAttachmentReference attachment_refs[NELEMS(attachments)] = {{ .attachment = 0, .layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, }, { .attachment = 1, .layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, }, { .attachment = 2, .layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, }}; VkSubpassDescription subpasses[1] = {{ .flags = 0, .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS, .inputAttachmentCount = 0, .pInputAttachments = NULL, .colorAttachmentCount = 1, .pColorAttachments = &attachment_refs[0], .pResolveAttachments = &attachment_refs[2], .pDepthStencilAttachment = &attachment_refs[1], .preserveAttachmentCount = 0, .pPreserveAttachments = NULL, }}; VkSubpassDependency dependencies[] = {{ .srcSubpass = 0, .dstSubpass = VK_SUBPASS_EXTERNAL, .srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, .dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT, .srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT, .dependencyFlags = 0, }}; VkRenderPassCreateInfo create_info = { .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, .pNext = NULL, .flags = 0, .attachmentCount = NELEMS(attachments), .pAttachments = attachments, .subpassCount = NELEMS(subpasses), .pSubpasses = subpasses, .dependencyCount = NELEMS(dependencies), .pDependencies = dependencies, }; assert(vkCreateRenderPass(device, &create_info, NULL, &render_pass) == VK_SUCCESS); } VkImage images[3]; VkDeviceMemory image_memories[NELEMS(images)]; VkImageView views[NELEMS(images)]; createFrameImage(memory_properties, device, render_size, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_8_BIT, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_ASPECT_COLOR_BIT, &images[0], &image_memories[0], &views[0]); createFrameImage(memory_properties, device, render_size, VK_FORMAT_D16_UNORM, VK_SAMPLE_COUNT_8_BIT, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_ASPECT_DEPTH_BIT, &images[1], &image_memories[1], &views[1]); createFrameImage(memory_properties, device, render_size, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_ASPECT_COLOR_BIT, &images[2], &image_memories[2], &views[2]); VkBuffer verts_buffer; VkDeviceMemory verts_memory; createBuffer(memory_properties, device, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, sizeof(verts), verts, &verts_buffer, &verts_memory); VkBuffer index_buffer; VkDeviceMemory index_memory; createBuffer(memory_properties, device, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, sizeof(indices), indices, &index_buffer, &index_memory); VkBuffer image_buffer; VkDeviceMemory image_buffer_memory; createBuffer(memory_properties, device, VK_BUFFER_USAGE_TRANSFER_DST_BIT, render_size.height * render_size.width * 4, NULL, &image_buffer, &image_buffer_memory); VkFramebuffer framebuffer; createFramebuffer(device, render_size, 3, views, render_pass, &framebuffer); VkShaderModule shaders[5]; { char* filenames[NELEMS(shaders)] = {"cube.vert.spv", "cube.geom.spv", "cube.frag.spv", "wireframe.geom.spv", "color.frag.spv"}; for (size_t i = 0; i < NELEMS(shaders); i++){ size_t code_size; uint32_t * code; assert((code_size = loadModule(filenames[i], &code)) != 0); VkShaderModuleCreateInfo create_info = { .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO, .pNext = NULL, .flags = 0, .codeSize = code_size, .pCode = code, }; assert(vkCreateShaderModule(device, &create_info, NULL, &shaders[i]) == VK_SUCCESS); free(code); } } VkPipelineLayout pipeline_layout; { VkPushConstantRange push_range = { .stageFlags = VK_SHADER_STAGE_VERTEX_BIT, .offset = 0, .size = 4, }; VkPipelineLayoutCreateInfo create_info = { .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, .pNext = NULL, .flags = 0, .setLayoutCount = 0, .pSetLayouts = NULL, .pushConstantRangeCount = 1, .pPushConstantRanges = &push_range, }; assert(vkCreatePipelineLayout(device, &create_info, NULL, &pipeline_layout) == VK_SUCCESS); } VkPipeline pipelines[2]; { VkPipelineShaderStageCreateInfo stages[3] = {{ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .pNext = NULL, .flags = 0, .stage = VK_SHADER_STAGE_VERTEX_BIT, .module = shaders[0], .pName = "main", .pSpecializationInfo = NULL, },{ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .pNext = NULL, .flags = 0, .stage = VK_SHADER_STAGE_GEOMETRY_BIT, .module = shaders[1], .pName = "main", .pSpecializationInfo = NULL, },{ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .pNext = NULL, .flags = 0, .stage = VK_SHADER_STAGE_FRAGMENT_BIT, .module = shaders[2], .pName = "main", .pSpecializationInfo = NULL, }}; VkVertexInputBindingDescription vtx_binding = { .binding = 0, .stride = sizeof(struct Vertex), .inputRate = VK_VERTEX_INPUT_RATE_VERTEX, }; VkVertexInputAttributeDescription vtx_attr = { .location = 0, .binding = 0, .format = VK_FORMAT_R32G32B32_SFLOAT, .offset = offsetof(struct Vertex, pos), }; VkPipelineVertexInputStateCreateInfo vtx_state = { .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, .pNext = NULL, .flags = 0, .vertexBindingDescriptionCount = 1, .pVertexBindingDescriptions = &vtx_binding, .vertexAttributeDescriptionCount = 1, .pVertexAttributeDescriptions = &vtx_attr, }; VkPipelineInputAssemblyStateCreateInfo ia_state = { .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, .pNext = NULL, .flags = 0, .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, .primitiveRestartEnable = VK_TRUE, }; VkViewport viewport = { .x = 0, .y = 0, .width = render_size.width, .height = render_size.height, .minDepth = 0.0, .maxDepth = 1.0, }; VkRect2D scissor= { .offset = {.x = 0, .y = 0,}, .extent = render_size, }; VkPipelineViewportStateCreateInfo viewport_state = { .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, .pNext = NULL, .flags = 0, .viewportCount = 1, .pViewports = &viewport, .scissorCount = 1, .pScissors = &scissor, }; VkPipelineRasterizationStateCreateInfo rasterization_state = { .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, .pNext = NULL, .flags = 0, .depthClampEnable = VK_FALSE, .rasterizerDiscardEnable = VK_FALSE, .polygonMode = VK_POLYGON_MODE_FILL, .cullMode = VK_CULL_MODE_NONE, .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE, .depthBiasEnable = VK_FALSE, .lineWidth = 1.0, }; VkPipelineMultisampleStateCreateInfo multisample_state = { .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, .pNext = NULL, .flags = 0, .rasterizationSamples = VK_SAMPLE_COUNT_8_BIT, .sampleShadingEnable = VK_FALSE, .minSampleShading = 0.0, .pSampleMask = NULL, .alphaToCoverageEnable = VK_FALSE, .alphaToOneEnable = VK_FALSE, }; VkPipelineDepthStencilStateCreateInfo depth_stencil_state = { .sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, .pNext = NULL, .flags = 0, .depthTestEnable = VK_TRUE, .depthWriteEnable = VK_TRUE, .depthCompareOp = VK_COMPARE_OP_LESS_OR_EQUAL, .depthBoundsTestEnable = VK_FALSE, .stencilTestEnable = VK_FALSE, .front = {}, .back = {}, .minDepthBounds = 0.0, .maxDepthBounds = 1.0, }; VkPipelineColorBlendAttachmentState color_blend_attachment = { .blendEnable = VK_FALSE, .colorWriteMask = ( VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT), }; VkPipelineColorBlendStateCreateInfo color_blend_state = { .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, .pNext = NULL, .flags = 0, .logicOpEnable = VK_FALSE, //.logicOp = 0, .attachmentCount = 1, .pAttachments = &color_blend_attachment, .blendConstants = {}, }; VkGraphicsPipelineCreateInfo create_info = { .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, .pNext = NULL, .flags = 0, .stageCount = NELEMS(stages), .pStages = stages, .pVertexInputState = &vtx_state, .pInputAssemblyState = &ia_state, .pTessellationState = NULL, .pViewportState = &viewport_state, .pRasterizationState = &rasterization_state, .pMultisampleState = &multisample_state, .pDepthStencilState = &depth_stencil_state, .pColorBlendState = &color_blend_state, .pDynamicState = NULL, .layout = pipeline_layout, .renderPass = render_pass, .subpass = 0, .basePipelineHandle = VK_NULL_HANDLE, .basePipelineIndex = 0, }; assert(vkCreateGraphicsPipelines(device, VK_NULL_HANDLE, 1, &create_info, NULL, &pipelines[0]) == VK_SUCCESS); stages[1].module = shaders[3]; stages[2].module = shaders[4]; rasterization_state.polygonMode = VK_POLYGON_MODE_LINE; assert(vkCreateGraphicsPipelines(device, VK_NULL_HANDLE, 1, &create_info, NULL, &pipelines[1]) == VK_SUCCESS); } VkCommandBuffer draw_buffers[2]; { VkCommandBufferAllocateInfo allocate_info = { .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, .pNext = NULL, .commandPool = cmd_pool, .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, .commandBufferCount = NELEMS(draw_buffers), }; assert(vkAllocateCommandBuffers(device, &allocate_info, draw_buffers) == VK_SUCCESS); } { VkCommandBufferBeginInfo begin_info = { .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, .pNext = NULL, .flags = 0, .pInheritanceInfo = NULL, }; VkClearValue clear_values[] = {{ .color.float32 = {0.0, 0.0, 0.0, 1.0}, }, { .depthStencil = {.depth = 1.0}, }}; VkRenderPassBeginInfo renderpass_begin_info = { .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, .pNext = NULL, .renderPass = render_pass, .framebuffer = framebuffer, .renderArea = { .offset = {.x = 0, .y = 0}, .extent = render_size, }, .clearValueCount = NELEMS(clear_values), .pClearValues = clear_values, }; for (size_t i = 0; i < NELEMS(draw_buffers); i++){ assert(vkBeginCommandBuffer(draw_buffers[i], &begin_info) == VK_SUCCESS); uint32_t persp = i == 0; vkCmdPushConstants(draw_buffers[i], pipeline_layout, VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(persp), &persp); vkCmdBeginRenderPass(draw_buffers[i], &renderpass_begin_info, VK_SUBPASS_CONTENTS_INLINE); VkDeviceSize offset = 0; vkCmdBindVertexBuffers(draw_buffers[i], 0, 1, &verts_buffer, &offset); vkCmdBindIndexBuffer(draw_buffers[i], index_buffer, 0, VK_INDEX_TYPE_UINT32); for (size_t j = 0; j < NELEMS(pipelines); j++) { vkCmdBindPipeline(draw_buffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS, pipelines[j]); vkCmdDrawIndexed(draw_buffers[i], 20, 27, 0, 0, 0); } vkCmdEndRenderPass(draw_buffers[i]); } VkBufferImageCopy copy = { .bufferOffset = 0, .bufferRowLength = 0, // Tightly packed .bufferImageHeight = 0, // Tightly packed .imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1}, .imageOffset = {0, 0, 0}, .imageExtent = {.width = render_size.width, .height = render_size.height, .depth = 1}, }; VkBufferMemoryBarrier transfer_barrier = { .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, .pNext = 0, .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, .dstAccessMask = VK_ACCESS_HOST_READ_BIT, .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .buffer = image_buffer, .offset = 0, .size = VK_WHOLE_SIZE, }; for (size_t i = 0; i < NELEMS(draw_buffers); i++){ vkCmdCopyImageToBuffer(draw_buffers[i], images[2], VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, image_buffer, 1, ©); vkCmdPipelineBarrier(draw_buffers[i], VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0, NULL, 1, &transfer_barrier, 0, NULL); assert(vkEndCommandBuffer(draw_buffers[i]) == VK_SUCCESS); } } VkFence fence; { VkFenceCreateInfo create_info = { .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, .pNext = 0, .flags = 0, }; assert(vkCreateFence(device, &create_info, NULL, &fence) == VK_SUCCESS); } { char * filenames[] = {"cube_persp.tif", "cube_ortho.tif"}; char * image_data; assert(vkMapMemory(device, image_buffer_memory, 0, VK_WHOLE_SIZE, 0, (void **) &image_data) == VK_SUCCESS); VkMappedMemoryRange image_flush = { .sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, .pNext = NULL, .memory = image_buffer_memory, .offset = 0, .size = VK_WHOLE_SIZE, }; VkSubmitInfo submit_info = { .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, .pNext = NULL, .waitSemaphoreCount = 0, .pWaitSemaphores = NULL, .pWaitDstStageMask = NULL, .commandBufferCount = 1, .pCommandBuffers = NULL, .signalSemaphoreCount = 0, .pSignalSemaphores = NULL, }; for (size_t i = 0; i < NELEMS(filenames); i++){ submit_info.pCommandBuffers = &draw_buffers[i]; assert(vkResetFences(device, 1, &fence) == VK_SUCCESS); assert(vkQueueSubmit(queue, 1, &submit_info, fence) == VK_SUCCESS); assert(vkWaitForFences(device, 1, &fence, VK_TRUE, UINT64_MAX) == VK_SUCCESS); assert(vkInvalidateMappedMemoryRanges(device, 1, &image_flush) == VK_SUCCESS); assert(writeTiff(filenames[i], image_data, render_size, nchannels) == 0); } vkUnmapMemory(device, image_buffer_memory); } assert(vkQueueWaitIdle(queue) == VK_SUCCESS); vkDestroyFence(device, fence, NULL); vkDestroyFramebuffer(device, framebuffer, NULL); for (size_t i = 0; i < NELEMS(images); i++){ vkDestroyImage(device, images[i], NULL); vkDestroyImageView(device, views[i], NULL); vkFreeMemory(device, image_memories[i], NULL); } vkDestroyBuffer(device, image_buffer, NULL); vkFreeMemory(device, image_buffer_memory, NULL); vkDestroyBuffer(device, verts_buffer, NULL); vkFreeMemory(device, verts_memory, NULL); vkDestroyBuffer(device, index_buffer, NULL); vkFreeMemory(device, index_memory, NULL); for (size_t i = 0; i < NELEMS(pipelines); i++){ vkDestroyPipeline(device, pipelines[i], NULL); } vkDestroyPipelineLayout(device, pipeline_layout, NULL); for(size_t i = 0; i < NELEMS(shaders); i++) vkDestroyShaderModule(device, shaders[i], NULL); vkDestroyRenderPass(device, render_pass, NULL); vkFreeCommandBuffers(device, cmd_pool, NELEMS(draw_buffers), draw_buffers); vkDestroyCommandPool(device, cmd_pool, NULL); vkDestroyDevice(device, NULL); { PFN_vkDestroyDebugReportCallbackEXT destroyDebugReportCallback = (PFN_vkDestroyDebugReportCallbackEXT) vkGetInstanceProcAddr(instance, "vkDestroyDebugReportCallbackEXT"); assert(destroyDebugReportCallback); destroyDebugReportCallback(instance, debug_callback, NULL); } vkDestroyInstance(instance, NULL); return 0; }