void Work::Bound::RecordIndirect(vk::CommandBuffer commandBuffer, IndirectBuffer<DispatchParams>& dispatchParams) { PushConstantOffset(commandBuffer, 0, mComputeSize.DomainSize.x); if (mComputeSize.DomainSize.y != 1) { PushConstantOffset(commandBuffer, 4, mComputeSize.DomainSize.y); } commandBuffer.bindDescriptorSets(vk::PipelineBindPoint::eCompute, mLayout, 0, {*mDescriptor}, {}); commandBuffer.bindPipeline(vk::PipelineBindPoint::eCompute, mPipeline); commandBuffer.dispatchIndirect(dispatchParams.Handle(), 0); }
void Work::Bound::Record(vk::CommandBuffer commandBuffer) { PushConstantOffset(commandBuffer, 0, mComputeSize.DomainSize.x); if (mComputeSize.DomainSize.y != 1) { PushConstantOffset(commandBuffer, 4, mComputeSize.DomainSize.y); } commandBuffer.bindDescriptorSets(vk::PipelineBindPoint::eCompute, mLayout, 0, {*mDescriptor}, {}); commandBuffer.bindPipeline(vk::PipelineBindPoint::eCompute, mPipeline); commandBuffer.dispatch(mComputeSize.WorkSize.x, mComputeSize.WorkSize.y, 1); }
void GenericBuffer::CopyFrom(vk::CommandBuffer commandBuffer, Texture& srcTexture) { auto textureSize = srcTexture.GetWidth() * srcTexture.GetHeight() * GetBytesPerPixel(srcTexture.GetFormat()); if (textureSize != mSize) { throw std::runtime_error("Cannot copy texture of different sizes"); } srcTexture.Barrier(commandBuffer, vk::ImageLayout::eGeneral, vk::AccessFlagBits::eShaderWrite | vk::AccessFlagBits::eColorAttachmentWrite, vk::ImageLayout::eTransferSrcOptimal, vk::AccessFlagBits::eTransferRead); auto info = vk::BufferImageCopy() .setImageSubresource({vk::ImageAspectFlagBits::eColor, 0, 0, 1}) .setImageExtent({srcTexture.GetWidth(), srcTexture.GetHeight(), 1}); commandBuffer.copyImageToBuffer( srcTexture.mImage, vk::ImageLayout::eTransferSrcOptimal, mBuffer, info); srcTexture.Barrier(commandBuffer, vk::ImageLayout::eTransferSrcOptimal, vk::AccessFlagBits::eTransferRead, vk::ImageLayout::eGeneral, vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eColorAttachmentRead); Barrier(commandBuffer, vk::AccessFlagBits::eTransferWrite, vk::AccessFlagBits::eShaderRead); }
void EndSingleTimeCommands(const vk::CommandBuffer& commandBuffer, const vk::Queue& queue, const vk::Device& device, const vk::CommandPool& commandPool) { commandBuffer.end(); vk::SubmitInfo submitInfo; submitInfo.commandBufferCount = 1; submitInfo.pCommandBuffers = &commandBuffer; queue.submit(1, &submitInfo, nullptr); queue.waitIdle(); device.freeCommandBuffers(commandPool, 1, &commandBuffer); }
void BufferBarrier(vk::Buffer buffer, vk::CommandBuffer commandBuffer, vk::AccessFlags oldAccess, vk::AccessFlags newAccess) { auto bufferMemoryBarriers = vk::BufferMemoryBarrier() .setSrcQueueFamilyIndex(VK_QUEUE_FAMILY_IGNORED) .setDstQueueFamilyIndex(VK_QUEUE_FAMILY_IGNORED) .setBuffer(buffer) .setSize(VK_WHOLE_SIZE) .setSrcAccessMask(oldAccess) .setDstAccessMask(newAccess); commandBuffer.pipelineBarrier(vk::PipelineStageFlagBits::eAllCommands, vk::PipelineStageFlagBits::eAllCommands, {}, nullptr, bufferMemoryBarriers, nullptr); }
void GenericBuffer::CopyFrom(vk::CommandBuffer commandBuffer, GenericBuffer& srcBuffer) { if (mSize != srcBuffer.mSize) { throw std::runtime_error("Cannot copy buffers of different sizes"); } // TODO improve barriers srcBuffer.Barrier( commandBuffer, vk::AccessFlagBits::eShaderWrite, vk::AccessFlagBits::eTransferRead); Barrier(commandBuffer, vk::AccessFlagBits::eShaderRead, vk::AccessFlagBits::eTransferWrite); auto region = vk::BufferCopy().setSize(mSize); commandBuffer.copyBuffer(srcBuffer.Handle(), mBuffer, region); Barrier(commandBuffer, vk::AccessFlagBits::eTransferWrite, vk::AccessFlagBits::eShaderRead); srcBuffer.Barrier( commandBuffer, vk::AccessFlagBits::eTransferRead, vk::AccessFlagBits::eShaderRead); }
void set_image_layout( vk::CommandBuffer cmdbuffer, vk::Image image, vk::ImageLayout oldImageLayout, vk::ImageLayout newImageLayout, vk::ImageSubresourceRange subresourceRange) { // Create an image barrier object vk::ImageMemoryBarrier imageMemoryBarrier{{},{}, oldImageLayout, newImageLayout, VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED , image, subresourceRange}; // Source layouts (old) // Undefined layout // Only allowed as initial layout! // Make sure any writes to the image have been finished if (oldImageLayout == vk::ImageLayout::ePreinitialized) { imageMemoryBarrier.srcAccessMask(vk::AccessFlagBits::eHostWrite | vk::AccessFlagBits::eTransferWrite); } // Old layout is color attachment // Make sure any writes to the color buffer have been finished if (oldImageLayout == vk::ImageLayout::eColorAttachmentOptimal) { imageMemoryBarrier.srcAccessMask(vk::AccessFlagBits::eColorAttachmentWrite); } // Old layout is depth/stencil attachment // Make sure any writes to the depth/stencil buffer have been finished if (oldImageLayout == vk::ImageLayout::eDepthStencilAttachmentOptimal) { imageMemoryBarrier.srcAccessMask(vk::AccessFlagBits::eDepthStencilAttachmentWrite); } // Old layout is transfer source // Make sure any reads from the image have been finished if (oldImageLayout == vk::ImageLayout::eTransferSrcOptimal) { imageMemoryBarrier.srcAccessMask(vk::AccessFlagBits::eTransferRead); } // Old layout is shader read (sampler, input attachment) // Make sure any shader reads from the image have been finished if (oldImageLayout == vk::ImageLayout::eShaderReadOnlyOptimal) { imageMemoryBarrier.srcAccessMask(vk::AccessFlagBits::eShaderRead); } // Target layouts (new) // New layout is transfer destination (copy, blit) // Make sure any copyies to the image have been finished if (newImageLayout == vk::ImageLayout::eTransferDstOptimal) { imageMemoryBarrier.dstAccessMask(vk::AccessFlagBits::eTransferWrite); } // New layout is transfer source (copy, blit) // Make sure any reads from and writes to the image have been finished if (newImageLayout == vk::ImageLayout::eTransferSrcOptimal) { imageMemoryBarrier.srcAccessMask() |= vk::AccessFlagBits::eTransferRead; imageMemoryBarrier.dstAccessMask(vk::AccessFlagBits::eTransferRead); } // New layout is color attachment // Make sure any writes to the color buffer hav been finished if (newImageLayout == vk::ImageLayout::eColorAttachmentOptimal) { imageMemoryBarrier.dstAccessMask(vk::AccessFlagBits::eColorAttachmentWrite); imageMemoryBarrier.srcAccessMask() |= vk::AccessFlagBits::eTransferRead; } // New layout is depth attachment // Make sure any writes to depth/stencil buffer have been finished if (newImageLayout == vk::ImageLayout::eDepthStencilAttachmentOptimal) { imageMemoryBarrier.dstAccessMask() |= vk::AccessFlagBits::eDepthStencilAttachmentWrite; } // New layout is shader read (sampler, input attachment) // Make sure any writes to the image have been finished if (newImageLayout == vk::ImageLayout::eShaderReadOnlyOptimal) { imageMemoryBarrier.srcAccessMask(vk::AccessFlagBits::eHostWrite | vk::AccessFlagBits::eTransferWrite); imageMemoryBarrier.dstAccessMask(vk::AccessFlagBits::eShaderRead); } cmdbuffer.pipelineBarrier(vk::PipelineStageFlagBits::eTopOfPipe, vk::PipelineStageFlagBits::eTopOfPipe, {}, {}, {}, { imageMemoryBarrier }); }
void GenericBuffer::Clear(vk::CommandBuffer commandBuffer) { Barrier(commandBuffer, vk::AccessFlagBits::eShaderRead, vk::AccessFlagBits::eTransferWrite); commandBuffer.fillBuffer(mBuffer, 0, mSize, 0); Barrier(commandBuffer, vk::AccessFlagBits::eTransferWrite, vk::AccessFlagBits::eShaderRead); }
static void setImageLayout( vk::CommandBuffer cmdbuffer, vk::Image image, vk::ImageAspectFlags aspectMask, vk::ImageLayout oldImageLayout, vk::ImageLayout newImageLayout, vk::ImageSubresourceRange subresourceRange) { // Create an image barrier object vk::ImageMemoryBarrier imageMemoryBarrier; imageMemoryBarrier.oldLayout = oldImageLayout; imageMemoryBarrier.newLayout = newImageLayout; imageMemoryBarrier.image = image; imageMemoryBarrier.subresourceRange = subresourceRange; // Source layouts (old) // Source access mask controls actions that have to be finished on the old layout // before it will be transitioned to the new layout switch (oldImageLayout) { case vk::ImageLayout::eUndefined: // Image layout is undefined (or does not matter) // Only valid as initial layout // No flags required, listed only for completeness //imageMemoryBarrier.srcAccessMask = 0; break; case vk::ImageLayout::ePreinitialized: // Image is preinitialized // Only valid as initial layout for linear images, preserves memory contents // Make sure host writes have been finished imageMemoryBarrier.srcAccessMask = vk::AccessFlagBits::eHostWrite; break; case vk::ImageLayout::eColorAttachmentOptimal: // Image is a color attachment // Make sure any writes to the color buffer have been finished imageMemoryBarrier.srcAccessMask = vk::AccessFlagBits::eColorAttachmentWrite; break; case vk::ImageLayout::eDepthStencilAttachmentOptimal: // Image is a depth/stencil attachment // Make sure any writes to the depth/stencil buffer have been finished imageMemoryBarrier.srcAccessMask = vk::AccessFlagBits::eDepthStencilAttachmentWrite; break; case vk::ImageLayout::eTransferSrcOptimal: // Image is a transfer source // Make sure any reads from the image have been finished imageMemoryBarrier.srcAccessMask = vk::AccessFlagBits::eTransferRead; break; case vk::ImageLayout::eTransferDstOptimal: // Image is a transfer destination // Make sure any writes to the image have been finished imageMemoryBarrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite; break; case vk::ImageLayout::eShaderReadOnlyOptimal: // Image is read by a shader // Make sure any shader reads from the image have been finished imageMemoryBarrier.srcAccessMask = vk::AccessFlagBits::eShaderRead; break; } // Target layouts (new) // Destination access mask controls the dependency for the new image layout switch (newImageLayout) { case vk::ImageLayout::eTransferDstOptimal: // Image will be used as a transfer destination // Make sure any writes to the image have been finished imageMemoryBarrier.dstAccessMask = vk::AccessFlagBits::eTransferWrite; break; case vk::ImageLayout::eTransferSrcOptimal: // Image will be used as a transfer source // Make sure any reads from and writes to the image have been finished imageMemoryBarrier.srcAccessMask = imageMemoryBarrier.srcAccessMask | vk::AccessFlagBits::eTransferRead; imageMemoryBarrier.dstAccessMask = vk::AccessFlagBits::eTransferRead; break; case vk::ImageLayout::eColorAttachmentOptimal: // Image will be used as a color attachment // Make sure any writes to the color buffer have been finished imageMemoryBarrier.srcAccessMask = vk::AccessFlagBits::eTransferRead; imageMemoryBarrier.dstAccessMask = vk::AccessFlagBits::eColorAttachmentWrite; break; case vk::ImageLayout::eDepthStencilAttachmentOptimal: // Image layout will be used as a depth/stencil attachment // Make sure any writes to depth/stencil buffer have been finished imageMemoryBarrier.dstAccessMask = imageMemoryBarrier.dstAccessMask | vk::AccessFlagBits::eDepthStencilAttachmentWrite; break; case vk::ImageLayout::eShaderReadOnlyOptimal: // Image will be read in a shader (sampler, input attachment) // Make sure any writes to the image have been finished //if (imageMemoryBarrier.srcAccessMask == 0) { imageMemoryBarrier.srcAccessMask = vk::AccessFlagBits::eHostWrite | vk::AccessFlagBits::eTransferWrite; } imageMemoryBarrier.dstAccessMask = vk::AccessFlagBits::eShaderRead; break; } // Put barrier on top vk::PipelineStageFlags srcStageFlags = vk::PipelineStageFlagBits::eTopOfPipe; vk::PipelineStageFlags destStageFlags = vk::PipelineStageFlagBits::eTopOfPipe; // Put barrier inside setup command buffer cmdbuffer.pipelineBarrier(srcStageFlags, destStageFlags, vk::DependencyFlags(), nullptr, nullptr, imageMemoryBarrier); }