sk_sp<SkSurface> VulkanWindowContext::getBackbufferSurface() { BackbufferInfo* backbuffer = this->getAvailableBackbuffer(); SkASSERT(backbuffer); // reset the fence GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, ResetFences(fBackendContext->fDevice, 2, backbuffer->fUsageFences)); // semaphores should be in unsignaled state // acquire the image VkResult res = fAcquireNextImageKHR(fBackendContext->fDevice, fSwapchain, UINT64_MAX, backbuffer->fAcquireSemaphore, VK_NULL_HANDLE, &backbuffer->fImageIndex); if (VK_ERROR_SURFACE_LOST_KHR == res) { // need to figure out how to create a new vkSurface without the platformData* // maybe use attach somehow? but need a Window return nullptr; } if (VK_ERROR_OUT_OF_DATE_KHR == res) { // tear swapchain down and try again if (!this->createSwapchain(0, 0, fDisplayParams)) { return nullptr; } // acquire the image res = fAcquireNextImageKHR(fBackendContext->fDevice, fSwapchain, UINT64_MAX, backbuffer->fAcquireSemaphore, VK_NULL_HANDLE, &backbuffer->fImageIndex); if (VK_SUCCESS != res) { return nullptr; } } // set up layout transfer from initial to color attachment VkImageLayout layout = fImageLayouts[backbuffer->fImageIndex]; VkPipelineStageFlags srcStageMask = (VK_IMAGE_LAYOUT_UNDEFINED == layout) ? VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT : VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; VkAccessFlags srcAccessMask = (VK_IMAGE_LAYOUT_UNDEFINED == layout) ? 0 : VK_ACCESS_MEMORY_READ_BIT; VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; VkImageMemoryBarrier imageMemoryBarrier = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType NULL, // pNext srcAccessMask, // outputMask dstAccessMask, // inputMask layout, // oldLayout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout fPresentQueueIndex, // srcQueueFamilyIndex fBackendContext->fGraphicsQueueIndex, // dstQueueFamilyIndex fImages[backbuffer->fImageIndex], // image { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 } // subresourceRange }; GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, ResetCommandBuffer(backbuffer->fTransitionCmdBuffers[0], 0)); VkCommandBufferBeginInfo info; memset(&info, 0, sizeof(VkCommandBufferBeginInfo)); info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; info.flags = 0; GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, BeginCommandBuffer(backbuffer->fTransitionCmdBuffers[0], &info)); GR_VK_CALL(fBackendContext->fInterface, CmdPipelineBarrier(backbuffer->fTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0, 0, nullptr, 0, nullptr, 1, &imageMemoryBarrier)); GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, EndCommandBuffer(backbuffer->fTransitionCmdBuffers[0])); VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; // insert the layout transfer into the queue and wait on the acquire VkSubmitInfo submitInfo; memset(&submitInfo, 0, sizeof(VkSubmitInfo)); submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submitInfo.waitSemaphoreCount = 1; submitInfo.pWaitSemaphores = &backbuffer->fAcquireSemaphore; submitInfo.pWaitDstStageMask = &waitDstStageFlags; submitInfo.commandBufferCount = 1; submitInfo.pCommandBuffers = &backbuffer->fTransitionCmdBuffers[0]; submitInfo.signalSemaphoreCount = 0; GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, QueueSubmit(fBackendContext->fQueue, 1, &submitInfo, backbuffer->fUsageFences[0])); return sk_ref_sp(fSurfaces[backbuffer->fImageIndex].get()); }
GrVkPipelineState* GrVkPipelineStateBuilder::finalize(GrPrimitiveType primitiveType, const GrVkRenderPass& renderPass, const GrVkPipelineState::Desc& desc) { VkDescriptorSetLayout dsLayout[2]; VkPipelineLayout pipelineLayout; VkShaderModule vertShaderModule; VkShaderModule fragShaderModule; GrVkResourceProvider& resourceProvider = fGpu->resourceProvider(); // This layout is not owned by the PipelineStateBuilder and thus should no be destroyed dsLayout[GrVkUniformHandler::kUniformBufferDescSet] = resourceProvider.getUniformDSLayout(); GrVkDescriptorSetManager::Handle samplerDSHandle; resourceProvider.getSamplerDescriptorSetHandle(fUniformHandler, &samplerDSHandle); dsLayout[GrVkUniformHandler::kSamplerDescSet] = resourceProvider.getSamplerDSLayout(samplerDSHandle); // Create the VkPipelineLayout VkPipelineLayoutCreateInfo layoutCreateInfo; memset(&layoutCreateInfo, 0, sizeof(VkPipelineLayoutCreateFlags)); layoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; layoutCreateInfo.pNext = 0; layoutCreateInfo.flags = 0; layoutCreateInfo.setLayoutCount = 2; layoutCreateInfo.pSetLayouts = dsLayout; layoutCreateInfo.pushConstantRangeCount = 0; layoutCreateInfo.pPushConstantRanges = nullptr; GR_VK_CALL_ERRCHECK(fGpu->vkInterface(), CreatePipelineLayout(fGpu->device(), &layoutCreateInfo, nullptr, &pipelineLayout)); // We need to enable the following extensions so that the compiler can correctly make spir-v // from our glsl shaders. fVS.extensions().appendf("#extension GL_ARB_separate_shader_objects : enable\n"); fFS.extensions().appendf("#extension GL_ARB_separate_shader_objects : enable\n"); fVS.extensions().appendf("#extension GL_ARB_shading_language_420pack : enable\n"); fFS.extensions().appendf("#extension GL_ARB_shading_language_420pack : enable\n"); this->finalizeShaders(); VkPipelineShaderStageCreateInfo shaderStageInfo[2]; SkAssertResult(CreateVkShaderModule(fGpu, VK_SHADER_STAGE_VERTEX_BIT, fVS, &vertShaderModule, &shaderStageInfo[0])); SkAssertResult(CreateVkShaderModule(fGpu, VK_SHADER_STAGE_FRAGMENT_BIT, fFS, &fragShaderModule, &shaderStageInfo[1])); GrVkPipeline* pipeline = resourceProvider.createPipeline(fPipeline, fPrimProc, shaderStageInfo, 2, primitiveType, renderPass, pipelineLayout); GR_VK_CALL(fGpu->vkInterface(), DestroyShaderModule(fGpu->device(), vertShaderModule, nullptr)); GR_VK_CALL(fGpu->vkInterface(), DestroyShaderModule(fGpu->device(), fragShaderModule, nullptr)); if (!pipeline) { GR_VK_CALL(fGpu->vkInterface(), DestroyPipelineLayout(fGpu->device(), pipelineLayout, nullptr)); GR_VK_CALL(fGpu->vkInterface(), DestroyDescriptorSetLayout(fGpu->device(), dsLayout[GrVkUniformHandler::kSamplerDescSet], nullptr)); this->cleanupFragmentProcessors(); return nullptr; } return new GrVkPipelineState(fGpu, desc, pipeline, pipelineLayout, samplerDSHandle, fUniformHandles, fUniformHandler.fUniforms, fUniformHandler.fCurrentVertexUBOOffset, fUniformHandler.fCurrentFragmentUBOOffset, (uint32_t)fUniformHandler.numSamplers(), fGeometryProcessor, fXferProcessor, fFragmentProcessors); }
bool GrVkPipelineStateBuilder::CreateVkShaderModule(const GrVkGpu* gpu, VkShaderStageFlagBits stage, const GrGLSLShaderBuilder& builder, VkShaderModule* shaderModule, VkPipelineShaderStageCreateInfo* stageInfo) { SkString shaderString; for (int i = 0; i < builder.fCompilerStrings.count(); ++i) { if (builder.fCompilerStrings[i]) { shaderString.append(builder.fCompilerStrings[i]); shaderString.append("\n"); } } VkShaderModuleCreateInfo moduleCreateInfo; memset(&moduleCreateInfo, 0, sizeof(VkShaderModuleCreateInfo)); moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; moduleCreateInfo.pNext = nullptr; moduleCreateInfo.flags = 0; #if USE_SKSL std::string code; #else shaderc_compilation_result_t result = nullptr; #endif if (gpu->vkCaps().canUseGLSLForShaderModule()) { moduleCreateInfo.codeSize = strlen(shaderString.c_str()); moduleCreateInfo.pCode = (const uint32_t*)shaderString.c_str(); } else { #if USE_SKSL bool result = gpu->shaderCompiler()->toSPIRV(vk_shader_stage_to_skiasl_kind(stage), std::string(shaderString.c_str()), &code); if (!result) { SkDebugf("%s\n", gpu->shaderCompiler()->errorText().c_str()); return false; } moduleCreateInfo.codeSize = code.size(); moduleCreateInfo.pCode = (const uint32_t*) code.c_str(); #else shaderc_compiler_t compiler = gpu->shadercCompiler(); shaderc_compile_options_t options = shaderc_compile_options_initialize(); shaderc_shader_kind shadercStage = vk_shader_stage_to_shaderc_kind(stage); result = shaderc_compile_into_spv(compiler, shaderString.c_str(), strlen(shaderString.c_str()), shadercStage, "shader", "main", options); shaderc_compile_options_release(options); #ifdef SK_DEBUG if (shaderc_result_get_num_errors(result)) { SkDebugf("%s\n", shaderString.c_str()); SkDebugf("%s\n", shaderc_result_get_error_message(result)); return false; } #endif // SK_DEBUG moduleCreateInfo.codeSize = shaderc_result_get_length(result); moduleCreateInfo.pCode = (const uint32_t*)shaderc_result_get_bytes(result); #endif // USE_SKSL } VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateShaderModule(gpu->device(), &moduleCreateInfo, nullptr, shaderModule)); if (!gpu->vkCaps().canUseGLSLForShaderModule()) { #if !USE_SKSL shaderc_result_release(result); #endif } if (err) { return false; } memset(stageInfo, 0, sizeof(VkPipelineShaderStageCreateInfo)); stageInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; stageInfo->pNext = nullptr; stageInfo->flags = 0; stageInfo->stage = stage; stageInfo->module = *shaderModule; stageInfo->pName = "main"; stageInfo->pSpecializationInfo = nullptr; return true; }
void GrVkSampler::freeGPUData(const GrVkGpu* gpu) const { SkASSERT(fSampler); GR_VK_CALL(gpu->vkInterface(), DestroySampler(gpu->device(), fSampler, nullptr)); }
void GrVkRenderPass::freeGPUData(const GrVkGpu* gpu) const { GR_VK_CALL(gpu->vkInterface(), DestroyRenderPass(gpu->device(), fRenderPass, nullptr)); }
GrVkCopyPipeline* GrVkCopyPipeline::Create(GrVkGpu* gpu, VkPipelineShaderStageCreateInfo* shaderStageInfo, VkPipelineLayout pipelineLayout, int numSamples, const GrVkRenderPass& renderPass, VkPipelineCache cache) { static const VkVertexInputAttributeDescription attributeDesc = { 0, // location 0, // binding VK_FORMAT_R32G32_SFLOAT, // format 0, // offset }; static const VkVertexInputBindingDescription bindingDesc = { 0, // binding 2 * sizeof(float), // stride VK_VERTEX_INPUT_RATE_VERTEX // inputRate }; static const VkPipelineVertexInputStateCreateInfo vertexInputInfo = { VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // sType nullptr, // pNext 0, // flags 1, // vertexBindingDescriptionCount &bindingDesc, // pVertexBindingDescriptions 1, // vertexAttributeDescriptionCnt &attributeDesc, // pVertexAttributeDescriptions }; static const VkPipelineInputAssemblyStateCreateInfo inputAssemblyInfo = { VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // sType nullptr, // pNext 0, // flags VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, // topology VK_FALSE // primitiveRestartEnable }; static const VkStencilOpState dummyStencilState = { VK_STENCIL_OP_KEEP, // failOp VK_STENCIL_OP_KEEP, // passOp VK_STENCIL_OP_KEEP, // depthFailOp VK_COMPARE_OP_NEVER, // compareOp 0, // compareMask 0, // writeMask 0 // reference }; static const VkPipelineDepthStencilStateCreateInfo stencilInfo = { VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // sType nullptr, // pNext 0, // flags VK_FALSE, // depthTestEnable VK_FALSE, // depthWriteEnable VK_COMPARE_OP_ALWAYS, // depthCompareOp VK_FALSE, // depthBoundsTestEnable VK_FALSE, // stencilTestEnable dummyStencilState, // front dummyStencilState, // bakc 0.0f, // minDepthBounds 1.0f // maxDepthBounds }; static const VkPipelineViewportStateCreateInfo viewportInfo = { VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // sType nullptr, // pNext 0, // flags 1, // viewportCount nullptr, // pViewports 1, // scissorCount nullptr // pScissors }; static const VkPipelineColorBlendAttachmentState attachmentState = { VK_FALSE, // blendEnable VK_BLEND_FACTOR_ONE, // srcColorBlendFactor VK_BLEND_FACTOR_ZERO, // dstColorBlendFactor VK_BLEND_OP_ADD, // colorBlendOp VK_BLEND_FACTOR_ONE, // srcAlphaBlendFactor VK_BLEND_FACTOR_ZERO, // dstAlphaBlendFactor VK_BLEND_OP_ADD, // alphaBlendOp VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | // colorWriteMask VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT // colorWriteMask }; static const VkPipelineColorBlendStateCreateInfo colorBlendInfo = { VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // sType nullptr, // pNext 0, // flags VK_FALSE, // logicOpEnable VK_LOGIC_OP_CLEAR, // logicOp 1, // attachmentCount &attachmentState, // pAttachments { 0.f, 0.f, 0.f, 0.f } // blendConstants[4] }; static const VkPipelineRasterizationStateCreateInfo rasterInfo = { VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // sType nullptr, // pNext 0, // flags VK_FALSE, // depthClampEnable VK_FALSE, // rasterizerDiscardEnabled VK_POLYGON_MODE_FILL, // polygonMode VK_CULL_MODE_NONE, // cullMode VK_FRONT_FACE_COUNTER_CLOCKWISE, // frontFace VK_FALSE, // depthBiasEnable 0.0f, // depthBiasConstantFactor 0.0f, // depthBiasClamp 0.0f, // depthBiasSlopeFactor 1.0f // lineWidth }; static const VkDynamicState dynamicStates[2] = { VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR }; static const VkPipelineDynamicStateCreateInfo dynamicInfo = { VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO, // sType nullptr, // pNext 0, // flags 2, // dynamicStateCount dynamicStates // pDynamicStates }; VkPipelineMultisampleStateCreateInfo multisampleInfo; setup_multisample_state(numSamples, &multisampleInfo); VkGraphicsPipelineCreateInfo pipelineCreateInfo; memset(&pipelineCreateInfo, 0, sizeof(VkGraphicsPipelineCreateInfo)); pipelineCreateInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; pipelineCreateInfo.pNext = nullptr; pipelineCreateInfo.flags = 0; pipelineCreateInfo.stageCount = 2; pipelineCreateInfo.pStages = shaderStageInfo; pipelineCreateInfo.pVertexInputState = &vertexInputInfo; pipelineCreateInfo.pInputAssemblyState = &inputAssemblyInfo; pipelineCreateInfo.pTessellationState = nullptr; pipelineCreateInfo.pViewportState = &viewportInfo; pipelineCreateInfo.pRasterizationState = &rasterInfo; pipelineCreateInfo.pMultisampleState = &multisampleInfo; pipelineCreateInfo.pDepthStencilState = &stencilInfo; pipelineCreateInfo.pColorBlendState = &colorBlendInfo; pipelineCreateInfo.pDynamicState = &dynamicInfo; pipelineCreateInfo.layout = pipelineLayout; pipelineCreateInfo.renderPass = renderPass.vkRenderPass(); pipelineCreateInfo.subpass = 0; pipelineCreateInfo.basePipelineHandle = VK_NULL_HANDLE; pipelineCreateInfo.basePipelineIndex = -1; VkPipeline vkPipeline; VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateGraphicsPipelines(gpu->device(), cache, 1, &pipelineCreateInfo, nullptr, &vkPipeline)); if (err) { SkDebugf("Failed to create copy pipeline. Error: %d\n", err); return nullptr; } return new GrVkCopyPipeline(vkPipeline, &renderPass); }
GrVkPipeline* GrVkPipeline::Create(GrVkGpu* gpu, const GrPipeline& pipeline, const GrPrimitiveProcessor& primProc, VkPipelineShaderStageCreateInfo* shaderStageInfo, int shaderStageCount, GrPrimitiveType primitiveType, const GrVkRenderPass& renderPass, VkPipelineLayout layout, VkPipelineCache cache) { VkPipelineVertexInputStateCreateInfo vertexInputInfo; VkVertexInputBindingDescription bindingDesc; // TODO: allocate this based on VkPhysicalDeviceLimits::maxVertexInputAttributes static const int kMaxVertexAttributes = 16; static VkVertexInputAttributeDescription attributeDesc[kMaxVertexAttributes]; setup_vertex_input_state(primProc, &vertexInputInfo, &bindingDesc, 1, attributeDesc, kMaxVertexAttributes); VkPipelineInputAssemblyStateCreateInfo inputAssemblyInfo; setup_input_assembly_state(primitiveType, &inputAssemblyInfo); VkPipelineDepthStencilStateCreateInfo depthStencilInfo; setup_depth_stencil_state(gpu, pipeline.getStencil(), &depthStencilInfo); GrRenderTarget* rt = pipeline.getRenderTarget(); GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt); VkPipelineViewportStateCreateInfo viewportInfo; setup_viewport_scissor_state(gpu, pipeline, vkRT, &viewportInfo); VkPipelineMultisampleStateCreateInfo multisampleInfo; setup_multisample_state(pipeline, &multisampleInfo); // We will only have one color attachment per pipeline. VkPipelineColorBlendAttachmentState attachmentStates[1]; VkPipelineColorBlendStateCreateInfo colorBlendInfo; setup_color_blend_state(gpu, pipeline, &colorBlendInfo, attachmentStates); VkPipelineRasterizationStateCreateInfo rasterInfo; setup_raster_state(gpu, pipeline, &rasterInfo); VkDynamicState dynamicStates[3]; VkPipelineDynamicStateCreateInfo dynamicInfo; setup_dynamic_state(gpu, pipeline, &dynamicInfo, dynamicStates); VkGraphicsPipelineCreateInfo pipelineCreateInfo; memset(&pipelineCreateInfo, 0, sizeof(VkGraphicsPipelineCreateInfo)); pipelineCreateInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; pipelineCreateInfo.pNext = nullptr; pipelineCreateInfo.flags = 0; pipelineCreateInfo.stageCount = shaderStageCount; pipelineCreateInfo.pStages = shaderStageInfo; pipelineCreateInfo.pVertexInputState = &vertexInputInfo; pipelineCreateInfo.pInputAssemblyState = &inputAssemblyInfo; pipelineCreateInfo.pTessellationState = nullptr; pipelineCreateInfo.pViewportState = &viewportInfo; pipelineCreateInfo.pRasterizationState = &rasterInfo; pipelineCreateInfo.pMultisampleState = &multisampleInfo; pipelineCreateInfo.pDepthStencilState = &depthStencilInfo; pipelineCreateInfo.pColorBlendState = &colorBlendInfo; pipelineCreateInfo.pDynamicState = &dynamicInfo; pipelineCreateInfo.layout = layout; pipelineCreateInfo.renderPass = renderPass.vkRenderPass(); pipelineCreateInfo.subpass = 0; pipelineCreateInfo.basePipelineHandle = VK_NULL_HANDLE; pipelineCreateInfo.basePipelineIndex = -1; VkPipeline vkPipeline; VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateGraphicsPipelines(gpu->device(), cache, 1, &pipelineCreateInfo, nullptr, &vkPipeline)); if (err) { return nullptr; } return new GrVkPipeline(vkPipeline); }
void GrVkPipelineState::setAndBindTextures(GrVkGpu* gpu, const GrPrimitiveProcessor& primProc, const GrPipeline& pipeline, const GrTextureProxy* const primProcTextures[], GrVkCommandBuffer* commandBuffer) { SkASSERT(primProcTextures || !primProc.numTextureSamplers()); struct SamplerBindings { GrSamplerState fState; GrVkTexture* fTexture; }; SkAutoSTMalloc<8, SamplerBindings> samplerBindings(fNumSamplers); int currTextureBinding = 0; fGeometryProcessor->setData(fDataManager, primProc, GrFragmentProcessor::CoordTransformIter(pipeline)); for (int i = 0; i < primProc.numTextureSamplers(); ++i) { const auto& sampler = primProc.textureSampler(i); auto texture = static_cast<GrVkTexture*>(primProcTextures[i]->peekTexture()); samplerBindings[currTextureBinding++] = {sampler.samplerState(), texture}; } GrFragmentProcessor::Iter iter(pipeline); GrGLSLFragmentProcessor::Iter glslIter(fFragmentProcessors.get(), fFragmentProcessorCnt); const GrFragmentProcessor* fp = iter.next(); GrGLSLFragmentProcessor* glslFP = glslIter.next(); while (fp && glslFP) { for (int i = 0; i < fp->numTextureSamplers(); ++i) { const auto& sampler = fp->textureSampler(i); samplerBindings[currTextureBinding++] = {sampler.samplerState(), static_cast<GrVkTexture*>(sampler.peekTexture())}; } fp = iter.next(); glslFP = glslIter.next(); } SkASSERT(!fp && !glslFP); if (GrTextureProxy* dstTextureProxy = pipeline.dstTextureProxy()) { samplerBindings[currTextureBinding++] = { GrSamplerState::ClampNearest(), static_cast<GrVkTexture*>(dstTextureProxy->peekTexture())}; } // Get new descriptor set SkASSERT(fNumSamplers == currTextureBinding); if (fNumSamplers) { if (fSamplerDescriptorSet) { fSamplerDescriptorSet->recycle(gpu); } fSamplerDescriptorSet = gpu->resourceProvider().getSamplerDescriptorSet(fSamplerDSHandle); int samplerDSIdx = GrVkUniformHandler::kSamplerDescSet; fDescriptorSets[samplerDSIdx] = fSamplerDescriptorSet->descriptorSet(); for (int i = 0; i < fNumSamplers; ++i) { const GrSamplerState& state = samplerBindings[i].fState; GrVkTexture* texture = samplerBindings[i].fTexture; const GrVkImageView* textureView = texture->textureView(); const GrVkSampler* sampler = nullptr; if (fImmutableSamplers[i]) { sampler = fImmutableSamplers[i]; } else { sampler = gpu->resourceProvider().findOrCreateCompatibleSampler( state, texture->ycbcrConversionInfo()); } SkASSERT(sampler); VkDescriptorImageInfo imageInfo; memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo)); imageInfo.sampler = sampler->sampler(); imageInfo.imageView = textureView->imageView(); imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet writeInfo; memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet)); writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; writeInfo.pNext = nullptr; writeInfo.dstSet = fDescriptorSets[GrVkUniformHandler::kSamplerDescSet]; writeInfo.dstBinding = i; writeInfo.dstArrayElement = 0; writeInfo.descriptorCount = 1; writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; writeInfo.pImageInfo = &imageInfo; writeInfo.pBufferInfo = nullptr; writeInfo.pTexelBufferView = nullptr; GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(), 1, &writeInfo, 0, nullptr)); commandBuffer->addResource(sampler); if (!fImmutableSamplers[i]) { sampler->unref(gpu); } commandBuffer->addResource(samplerBindings[i].fTexture->textureView()); commandBuffer->addResource(samplerBindings[i].fTexture->resource()); } commandBuffer->bindDescriptorSets(gpu, this, fPipelineLayout, samplerDSIdx, 1, &fDescriptorSets[samplerDSIdx], 0, nullptr); commandBuffer->addRecycledResource(fSamplerDescriptorSet); } }
bool waitFence(sk_gpu_test::PlatformFence opaqueFence) const override { VkFence fence = (VkFence)opaqueFence; static constexpr uint64_t kForever = ~((uint64_t)0); auto result = GR_VK_CALL(fVk, WaitForFences(fDevice, 1, &fence, true, kForever)); return result != VK_TIMEOUT; }
bool GrVkCopyManager::copySurfaceAsDraw(GrVkGpu* gpu, GrSurface* dst, GrSurfaceOrigin dstOrigin, GrSurface* src, GrSurfaceOrigin srcOrigin, const SkIRect& srcRect, const SkIPoint& dstPoint, bool canDiscardOutsideDstRect) { // None of our copy methods can handle a swizzle. TODO: Make copySurfaceAsDraw handle the // swizzle. if (gpu->caps()->shaderCaps()->configOutputSwizzle(src->config()) != gpu->caps()->shaderCaps()->configOutputSwizzle(dst->config())) { return false; } GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(dst->asRenderTarget()); if (!rt) { return false; } GrVkTexture* srcTex = static_cast<GrVkTexture*>(src->asTexture()); if (!srcTex) { return false; } if (VK_NULL_HANDLE == fVertShaderModule) { SkASSERT(VK_NULL_HANDLE == fFragShaderModule && nullptr == fPipelineLayout && nullptr == fVertexBuffer.get() && nullptr == fUniformBuffer.get()); if (!this->createCopyProgram(gpu)) { SkDebugf("Failed to create copy program.\n"); return false; } } SkASSERT(fPipelineLayout); GrVkResourceProvider& resourceProv = gpu->resourceProvider(); GrVkCopyPipeline* pipeline = resourceProv.findOrCreateCopyPipeline(rt, fShaderStageInfo, fPipelineLayout->layout()); if (!pipeline) { return false; } // UPDATE UNIFORM DESCRIPTOR SET int w = srcRect.width(); int h = srcRect.height(); // dst rect edges in NDC (-1 to 1) int dw = dst->width(); int dh = dst->height(); float dx0 = 2.f * dstPoint.fX / dw - 1.f; float dx1 = 2.f * (dstPoint.fX + w) / dw - 1.f; float dy0 = 2.f * dstPoint.fY / dh - 1.f; float dy1 = 2.f * (dstPoint.fY + h) / dh - 1.f; if (kBottomLeft_GrSurfaceOrigin == dstOrigin) { dy0 = -dy0; dy1 = -dy1; } float sx0 = (float)srcRect.fLeft; float sx1 = (float)(srcRect.fLeft + w); float sy0 = (float)srcRect.fTop; float sy1 = (float)(srcRect.fTop + h); int sh = src->height(); if (kBottomLeft_GrSurfaceOrigin == srcOrigin) { sy0 = sh - sy0; sy1 = sh - sy1; } // src rect edges in normalized texture space (0 to 1). int sw = src->width(); sx0 /= sw; sx1 /= sw; sy0 /= sh; sy1 /= sh; float uniData[] = { dx1 - dx0, dy1 - dy0, dx0, dy0, // posXform sx1 - sx0, sy1 - sy0, sx0, sy0 }; // texCoordXform fUniformBuffer->updateData(gpu, uniData, sizeof(uniData), nullptr); const GrVkDescriptorSet* uniformDS = resourceProv.getUniformDescriptorSet(); SkASSERT(uniformDS); VkDescriptorBufferInfo uniBufferInfo; uniBufferInfo.buffer = fUniformBuffer->buffer(); uniBufferInfo.offset = fUniformBuffer->offset(); uniBufferInfo.range = fUniformBuffer->size(); VkWriteDescriptorSet descriptorWrites; descriptorWrites.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptorWrites.pNext = nullptr; descriptorWrites.dstSet = uniformDS->descriptorSet(); descriptorWrites.dstBinding = GrVkUniformHandler::kGeometryBinding; descriptorWrites.dstArrayElement = 0; descriptorWrites.descriptorCount = 1; descriptorWrites.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptorWrites.pImageInfo = nullptr; descriptorWrites.pBufferInfo = &uniBufferInfo; descriptorWrites.pTexelBufferView = nullptr; GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(), 1, &descriptorWrites, 0, nullptr)); // UPDATE SAMPLER DESCRIPTOR SET const GrVkDescriptorSet* samplerDS = gpu->resourceProvider().getSamplerDescriptorSet(fSamplerDSHandle); GrSamplerState samplerState = GrSamplerState::ClampNearest(); GrVkSampler* sampler = resourceProv.findOrCreateCompatibleSampler( samplerState, GrVkYcbcrConversionInfo()); VkDescriptorImageInfo imageInfo; memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo)); imageInfo.sampler = sampler->sampler(); imageInfo.imageView = srcTex->textureView()->imageView(); imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet writeInfo; memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet)); writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; writeInfo.pNext = nullptr; writeInfo.dstSet = samplerDS->descriptorSet(); writeInfo.dstBinding = 0; writeInfo.dstArrayElement = 0; writeInfo.descriptorCount = 1; writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; writeInfo.pImageInfo = &imageInfo; writeInfo.pBufferInfo = nullptr; writeInfo.pTexelBufferView = nullptr; GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(), 1, &writeInfo, 0, nullptr)); VkDescriptorSet vkDescSets[] = { uniformDS->descriptorSet(), samplerDS->descriptorSet() }; GrVkRenderTarget* texRT = static_cast<GrVkRenderTarget*>(srcTex->asRenderTarget()); if (texRT) { gpu->resolveRenderTargetNoFlush(texRT); } // TODO: Make tighter bounds and then adjust bounds for origin and granularity if we see // any perf issues with using the whole bounds SkIRect bounds = SkIRect::MakeWH(rt->width(), rt->height()); // Change layouts of rt and texture. We aren't blending so we don't need color attachment read // access for blending. GrVkImage* targetImage = rt->msaaImage() ? rt->msaaImage() : rt; VkAccessFlags dstAccessFlags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; if (!canDiscardOutsideDstRect) { // We need to load the color attachment so need to be able to read it. dstAccessFlags |= VK_ACCESS_COLOR_ATTACHMENT_READ_BIT; } targetImage->setImageLayout(gpu, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, dstAccessFlags, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, false); srcTex->setImageLayout(gpu, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, false); GrStencilAttachment* stencil = rt->renderTargetPriv().getStencilAttachment(); if (stencil) { GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil; // We aren't actually using the stencil but we still load and store it so we need // appropriate barriers. // TODO: Once we refactor surface and how we conntect stencil to RTs, we should not even // have the stencil on this render pass if possible. vkStencil->setImageLayout(gpu, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, false); } VkAttachmentLoadOp loadOp = canDiscardOutsideDstRect ? VK_ATTACHMENT_LOAD_OP_DONT_CARE : VK_ATTACHMENT_LOAD_OP_LOAD; GrVkRenderPass::LoadStoreOps vkColorOps(loadOp, VK_ATTACHMENT_STORE_OP_STORE); GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE); const GrVkRenderPass* renderPass; const GrVkResourceProvider::CompatibleRPHandle& rpHandle = rt->compatibleRenderPassHandle(); if (rpHandle.isValid()) { renderPass = gpu->resourceProvider().findRenderPass(rpHandle, vkColorOps, vkStencilOps); } else { renderPass = gpu->resourceProvider().findRenderPass(*rt, vkColorOps, vkStencilOps); } SkASSERT(renderPass->isCompatible(*rt->simpleRenderPass())); GrVkPrimaryCommandBuffer* cmdBuffer = gpu->currentCommandBuffer(); cmdBuffer->beginRenderPass(gpu, renderPass, nullptr, *rt, bounds, true); GrVkSecondaryCommandBuffer* secondary = gpu->cmdPool()->findOrCreateSecondaryCommandBuffer(gpu); if (!secondary) { return false; } secondary->begin(gpu, rt->framebuffer(), renderPass); secondary->bindPipeline(gpu, pipeline); // Uniform DescriptorSet, Sampler DescriptorSet, and vertex shader uniformBuffer SkSTArray<3, const GrVkRecycledResource*> descriptorRecycledResources; descriptorRecycledResources.push_back(uniformDS); descriptorRecycledResources.push_back(samplerDS); descriptorRecycledResources.push_back(fUniformBuffer->resource()); // One sampler, texture view, and texture SkSTArray<3, const GrVkResource*> descriptorResources; descriptorResources.push_back(sampler); descriptorResources.push_back(srcTex->textureView()); descriptorResources.push_back(srcTex->resource()); secondary->bindDescriptorSets(gpu, descriptorRecycledResources, descriptorResources, fPipelineLayout, 0, 2, vkDescSets, 0, nullptr); // Set Dynamic viewport and stencil // We always use one viewport the size of the RT VkViewport viewport; viewport.x = 0.0f; viewport.y = 0.0f; viewport.width = SkIntToScalar(rt->width()); viewport.height = SkIntToScalar(rt->height()); viewport.minDepth = 0.0f; viewport.maxDepth = 1.0f; secondary->setViewport(gpu, 0, 1, &viewport); // We assume the scissor is not enabled so just set it to the whole RT VkRect2D scissor; scissor.extent.width = rt->width(); scissor.extent.height = rt->height(); scissor.offset.x = 0; scissor.offset.y = 0; secondary->setScissor(gpu, 0, 1, &scissor); secondary->bindInputBuffer(gpu, 0, fVertexBuffer.get()); secondary->draw(gpu, 4, 1, 0, 0); secondary->end(gpu); cmdBuffer->executeCommands(gpu, secondary); cmdBuffer->endRenderPass(gpu); secondary->unref(gpu); // Release all temp resources which should now be reffed by the cmd buffer pipeline->unref(gpu); uniformDS->unref(gpu); samplerDS->unref(gpu); sampler->unref(gpu); renderPass->unref(gpu); return true; }
bool GrVkCopyManager::createCopyProgram(GrVkGpu* gpu) { TRACE_EVENT0("skia", TRACE_FUNC); const GrShaderCaps* shaderCaps = gpu->caps()->shaderCaps(); const char* version = shaderCaps->versionDeclString(); SkSL::String vertShaderText(version); vertShaderText.append( "#extension GL_ARB_separate_shader_objects : enable\n" "#extension GL_ARB_shading_language_420pack : enable\n" "layout(set = 0, binding = 0) uniform vertexUniformBuffer {" "half4 uPosXform;" "half4 uTexCoordXform;" "};" "layout(location = 0) in float2 inPosition;" "layout(location = 1) out half2 vTexCoord;" "// Copy Program VS\n" "void main() {" "vTexCoord = half2(inPosition * uTexCoordXform.xy + uTexCoordXform.zw);" "sk_Position.xy = inPosition * uPosXform.xy + uPosXform.zw;" "sk_Position.zw = half2(0, 1);" "}" ); SkSL::String fragShaderText(version); fragShaderText.append( "#extension GL_ARB_separate_shader_objects : enable\n" "#extension GL_ARB_shading_language_420pack : enable\n" "layout(set = 1, binding = 0) uniform sampler2D uTextureSampler;" "layout(location = 1) in half2 vTexCoord;" "// Copy Program FS\n" "void main() {" "sk_FragColor = texture(uTextureSampler, vTexCoord);" "}" ); SkSL::Program::Settings settings; SkSL::String spirv; SkSL::Program::Inputs inputs; if (!GrCompileVkShaderModule(gpu, vertShaderText, VK_SHADER_STAGE_VERTEX_BIT, &fVertShaderModule, &fShaderStageInfo[0], settings, &spirv, &inputs)) { this->destroyResources(gpu); return false; } SkASSERT(inputs.isEmpty()); if (!GrCompileVkShaderModule(gpu, fragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, &fFragShaderModule, &fShaderStageInfo[1], settings, &spirv, &inputs)) { this->destroyResources(gpu); return false; } SkASSERT(inputs.isEmpty()); VkDescriptorSetLayout dsLayout[2]; GrVkResourceProvider& resourceProvider = gpu->resourceProvider(); dsLayout[GrVkUniformHandler::kUniformBufferDescSet] = resourceProvider.getUniformDSLayout(); uint32_t samplerVisibility = kFragment_GrShaderFlag; SkTArray<uint32_t> visibilityArray(&samplerVisibility, 1); resourceProvider.getSamplerDescriptorSetHandle(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, visibilityArray, &fSamplerDSHandle); dsLayout[GrVkUniformHandler::kSamplerDescSet] = resourceProvider.getSamplerDSLayout(fSamplerDSHandle); // Create the VkPipelineLayout VkPipelineLayoutCreateInfo layoutCreateInfo; memset(&layoutCreateInfo, 0, sizeof(VkPipelineLayoutCreateFlags)); layoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; layoutCreateInfo.pNext = 0; layoutCreateInfo.flags = 0; layoutCreateInfo.setLayoutCount = 2; layoutCreateInfo.pSetLayouts = dsLayout; layoutCreateInfo.pushConstantRangeCount = 0; layoutCreateInfo.pPushConstantRanges = nullptr; VkPipelineLayout pipelineLayout; VkResult err = GR_VK_CALL(gpu->vkInterface(), CreatePipelineLayout(gpu->device(), &layoutCreateInfo, nullptr, &pipelineLayout)); if (err) { this->destroyResources(gpu); return false; } fPipelineLayout = new GrVkPipelineLayout(pipelineLayout); static const float vdata[] = { 0, 0, 0, 1, 1, 0, 1, 1 }; fVertexBuffer = GrVkVertexBuffer::Make(gpu, sizeof(vdata), false); SkASSERT(fVertexBuffer.get()); fVertexBuffer->updateData(vdata, sizeof(vdata)); // We use 2 float4's for uniforms fUniformBuffer.reset(GrVkUniformBuffer::Create(gpu, 8 * sizeof(float))); SkASSERT(fUniformBuffer.get()); return true; }
bool GrVkHeap::subAlloc(VkDeviceSize size, VkDeviceSize alignment, uint32_t memoryTypeIndex, GrVkAlloc* alloc) { VkDeviceSize alignedSize = align_size(size, alignment); // if requested is larger than our subheap allocation, just alloc directly if (alignedSize > fSubHeapSize) { VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType NULL, // pNext size, // allocationSize memoryTypeIndex, // memoryTypeIndex }; VkResult err = GR_VK_CALL(fGpu->vkInterface(), AllocateMemory(fGpu->device(), &allocInfo, nullptr, &alloc->fMemory)); if (VK_SUCCESS != err) { return false; } alloc->fOffset = 0; alloc->fSize = 0; // hint that this is not a subheap allocation return true; } // first try to find a subheap that fits our allocation request int bestFitIndex = -1; VkDeviceSize bestFitSize = 0x7FFFFFFF; for (auto i = 0; i < fSubHeaps.count(); ++i) { if (fSubHeaps[i]->memoryTypeIndex() == memoryTypeIndex) { VkDeviceSize heapSize = fSubHeaps[i]->largestBlockSize(); if (heapSize >= alignedSize && heapSize < bestFitSize) { bestFitIndex = i; bestFitSize = heapSize; } } } if (bestFitIndex >= 0) { SkASSERT(fSubHeaps[bestFitIndex]->alignment() == alignment); if (fSubHeaps[bestFitIndex]->alloc(size, alloc)) { fUsedSize += alloc->fSize; return true; } return false; } // need to allocate a new subheap SkAutoTDelete<GrVkSubHeap>& subHeap = fSubHeaps.push_back(); subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, fSubHeapSize, alignment)); // try to recover from failed allocation by only allocating what we need if (subHeap->size() == 0) { VkDeviceSize alignedSize = align_size(size, alignment); subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, alignedSize, alignment)); if (subHeap->size() == 0) { return false; } } fAllocSize += fSubHeapSize; if (subHeap->alloc(size, alloc)) { fUsedSize += alloc->fSize; return true; } return false; }
GrVkSubHeap::~GrVkSubHeap() { const GrVkInterface* iface = fGpu->vkInterface(); GR_VK_CALL(iface, FreeMemory(fGpu->device(), fAlloc, nullptr)); }
bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu, VkImage image, bool linearTiling, GrVkAlloc* alloc) { const GrVkInterface* iface = gpu->vkInterface(); VkDevice device = gpu->device(); VkMemoryRequirements memReqs; GR_VK_CALL(iface, GetImageMemoryRequirements(device, image, &memReqs)); uint32_t typeIndex = 0; GrVkHeap* heap; if (linearTiling) { VkMemoryPropertyFlags desiredMemProps = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; if (!get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(), memReqs.memoryTypeBits, desiredMemProps, &typeIndex)) { // this memory type should always be available SkASSERT_RELEASE(get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(), memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &typeIndex)); } heap = gpu->getHeap(GrVkGpu::kLinearImage_Heap); } else { // this memory type should always be available SkASSERT_RELEASE(get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(), memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &typeIndex)); if (memReqs.size <= kMaxSmallImageSize) { heap = gpu->getHeap(GrVkGpu::kSmallOptimalImage_Heap); } else { heap = gpu->getHeap(GrVkGpu::kOptimalImage_Heap); } } if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, alloc)) { SkDebugf("Failed to alloc image\n"); return false; } // Bind Memory to device VkResult err = GR_VK_CALL(iface, BindImageMemory(device, image, alloc->fMemory, alloc->fOffset)); if (err) { SkASSERT_RELEASE(heap->free(*alloc)); return false; } gTotalImageMemory += alloc->fSize; VkDeviceSize pageAlignedSize = align_size(alloc->fSize, kMinVulkanPageSize); gTotalImageMemoryFullPage += pageAlignedSize; return true; }
void VulkanWindowContext::swapBuffers() { BackbufferInfo* backbuffer = fBackbuffers + fCurrentBackbufferIndex; this->presentRenderSurface(fSurfaces[backbuffer->fImageIndex], fRenderTargets[backbuffer->fImageIndex], 24); VkImageLayout layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; VkPipelineStageFlags srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkAccessFlags srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; VkAccessFlags dstAccessMask = VK_ACCESS_MEMORY_READ_BIT; VkImageMemoryBarrier imageMemoryBarrier = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType NULL, // pNext srcAccessMask, // outputMask dstAccessMask, // inputMask layout, // oldLayout VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout fBackendContext->fGraphicsQueueIndex, // srcQueueFamilyIndex fPresentQueueIndex, // dstQueueFamilyIndex fImages[backbuffer->fImageIndex], // image { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 } // subresourceRange }; GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, ResetCommandBuffer(backbuffer->fTransitionCmdBuffers[1], 0)); VkCommandBufferBeginInfo info; memset(&info, 0, sizeof(VkCommandBufferBeginInfo)); info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; info.flags = 0; GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, BeginCommandBuffer(backbuffer->fTransitionCmdBuffers[1], &info)); GR_VK_CALL(fBackendContext->fInterface, CmdPipelineBarrier(backbuffer->fTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0, 0, nullptr, 0, nullptr, 1, &imageMemoryBarrier)); GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, EndCommandBuffer(backbuffer->fTransitionCmdBuffers[1])); fImageLayouts[backbuffer->fImageIndex] = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; // insert the layout transfer into the queue and wait on the acquire VkSubmitInfo submitInfo; memset(&submitInfo, 0, sizeof(VkSubmitInfo)); submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submitInfo.waitSemaphoreCount = 0; submitInfo.pWaitDstStageMask = 0; submitInfo.commandBufferCount = 1; submitInfo.pCommandBuffers = &backbuffer->fTransitionCmdBuffers[1]; submitInfo.signalSemaphoreCount = 1; submitInfo.pSignalSemaphores = &backbuffer->fRenderSemaphore; GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, QueueSubmit(fBackendContext->fQueue, 1, &submitInfo, backbuffer->fUsageFences[1])); // Submit present operation to present queue const VkPresentInfoKHR presentInfo = { VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, // sType NULL, // pNext 1, // waitSemaphoreCount &backbuffer->fRenderSemaphore, // pWaitSemaphores 1, // swapchainCount &fSwapchain, // pSwapchains &backbuffer->fImageIndex, // pImageIndices NULL // pResults }; fQueuePresentKHR(fPresentQueue, &presentInfo); }
void deleteFence(sk_gpu_test::PlatformFence opaqueFence) const override { VkFence fence = (VkFence)opaqueFence; GR_VK_CALL(fVk, DestroyFence(fDevice, fence, nullptr)); SkDEBUGCODE(--fUnfinishedSyncs;) }
bool VulkanWindowContext::createSwapchain(uint32_t width, uint32_t height, const DisplayParams& params) { // check for capabilities VkSurfaceCapabilitiesKHR caps; VkResult res = fGetPhysicalDeviceSurfaceCapabilitiesKHR(fBackendContext->fPhysicalDevice, fSurface, &caps); if (VK_SUCCESS != res) { return false; } uint32_t surfaceFormatCount; res = fGetPhysicalDeviceSurfaceFormatsKHR(fBackendContext->fPhysicalDevice, fSurface, &surfaceFormatCount, nullptr); if (VK_SUCCESS != res) { return false; } SkAutoMalloc surfaceFormatAlloc(surfaceFormatCount * sizeof(VkSurfaceFormatKHR)); VkSurfaceFormatKHR* surfaceFormats = (VkSurfaceFormatKHR*)surfaceFormatAlloc.get(); res = fGetPhysicalDeviceSurfaceFormatsKHR(fBackendContext->fPhysicalDevice, fSurface, &surfaceFormatCount, surfaceFormats); if (VK_SUCCESS != res) { return false; } uint32_t presentModeCount; res = fGetPhysicalDeviceSurfacePresentModesKHR(fBackendContext->fPhysicalDevice, fSurface, &presentModeCount, nullptr); if (VK_SUCCESS != res) { return false; } SkAutoMalloc presentModeAlloc(presentModeCount * sizeof(VkPresentModeKHR)); VkPresentModeKHR* presentModes = (VkPresentModeKHR*)presentModeAlloc.get(); res = fGetPhysicalDeviceSurfacePresentModesKHR(fBackendContext->fPhysicalDevice, fSurface, &presentModeCount, presentModes); if (VK_SUCCESS != res) { return false; } VkExtent2D extent = caps.currentExtent; // use the hints if (extent.width == (uint32_t)-1) { extent.width = width; extent.height = height; } // clamp width; to protect us from broken hints if (extent.width < caps.minImageExtent.width) { extent.width = caps.minImageExtent.width; } else if (extent.width > caps.maxImageExtent.width) { extent.width = caps.maxImageExtent.width; } // clamp height if (extent.height < caps.minImageExtent.height) { extent.height = caps.minImageExtent.height; } else if (extent.height > caps.maxImageExtent.height) { extent.height = caps.maxImageExtent.height; } fWidth = (int)extent.width; fHeight = (int)extent.height; uint32_t imageCount = caps.minImageCount + 2; if (caps.maxImageCount > 0 && imageCount > caps.maxImageCount) { // Application must settle for fewer images than desired: imageCount = caps.maxImageCount; } VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; SkASSERT((caps.supportedUsageFlags & usageFlags) == usageFlags); SkASSERT(caps.supportedTransforms & caps.currentTransform); SkASSERT(caps.supportedCompositeAlpha & (VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR | VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR)); VkCompositeAlphaFlagBitsKHR composite_alpha = (caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR) ? VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR : VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; // Pick our surface format. For now, just make sure it matches our sRGB request: VkFormat surfaceFormat = VK_FORMAT_UNDEFINED; VkColorSpaceKHR colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR; bool wantSRGB = kSRGB_SkColorProfileType == params.fProfileType; for (uint32_t i = 0; i < surfaceFormatCount; ++i) { GrPixelConfig config; if (GrVkFormatToPixelConfig(surfaceFormats[i].format, &config) && GrPixelConfigIsSRGB(config) == wantSRGB) { surfaceFormat = surfaceFormats[i].format; colorSpace = surfaceFormats[i].colorSpace; break; } } fDisplayParams = params; if (VK_FORMAT_UNDEFINED == surfaceFormat) { return false; } // If mailbox mode is available, use it, as it is the lowest-latency non- // tearing mode. If not, fall back to FIFO which is always available. VkPresentModeKHR mode = VK_PRESENT_MODE_FIFO_KHR; for (uint32_t i = 0; i < presentModeCount; ++i) { // use mailbox if (VK_PRESENT_MODE_MAILBOX_KHR == presentModes[i]) { mode = presentModes[i]; break; } } VkSwapchainCreateInfoKHR swapchainCreateInfo; memset(&swapchainCreateInfo, 0, sizeof(VkSwapchainCreateInfoKHR)); swapchainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR; swapchainCreateInfo.surface = fSurface; swapchainCreateInfo.minImageCount = imageCount; swapchainCreateInfo.imageFormat = surfaceFormat; swapchainCreateInfo.imageColorSpace = colorSpace; swapchainCreateInfo.imageExtent = extent; swapchainCreateInfo.imageArrayLayers = 1; swapchainCreateInfo.imageUsage = usageFlags; uint32_t queueFamilies[] = { fBackendContext->fGraphicsQueueIndex, fPresentQueueIndex }; if (fBackendContext->fGraphicsQueueIndex != fPresentQueueIndex) { swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT; swapchainCreateInfo.queueFamilyIndexCount = 2; swapchainCreateInfo.pQueueFamilyIndices = queueFamilies; } else { swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE; swapchainCreateInfo.queueFamilyIndexCount = 0; swapchainCreateInfo.pQueueFamilyIndices = nullptr; } swapchainCreateInfo.preTransform = caps.currentTransform;; swapchainCreateInfo.compositeAlpha = composite_alpha; swapchainCreateInfo.presentMode = mode; swapchainCreateInfo.clipped = true; swapchainCreateInfo.oldSwapchain = fSwapchain; res = fCreateSwapchainKHR(fBackendContext->fDevice, &swapchainCreateInfo, nullptr, &fSwapchain); if (VK_SUCCESS != res) { return false; } // destroy the old swapchain if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) { GR_VK_CALL(fBackendContext->fInterface, DeviceWaitIdle(fBackendContext->fDevice)); this->destroyBuffers(); fDestroySwapchainKHR(fBackendContext->fDevice, swapchainCreateInfo.oldSwapchain, nullptr); } this->createBuffers(swapchainCreateInfo.imageFormat); return true; }
GrVkPipelineState* GrVkPipelineStateBuilder::finalize(GrPrimitiveType primitiveType, const GrVkRenderPass& renderPass, const GrVkPipelineState::Desc& desc) { VkDescriptorSetLayout dsLayout[2]; VkPipelineLayout pipelineLayout; VkShaderModule vertShaderModule; VkShaderModule fragShaderModule; uint32_t numSamplers = fSamplerUniforms.count(); SkAutoTDeleteArray<VkDescriptorSetLayoutBinding> dsSamplerBindings( new VkDescriptorSetLayoutBinding[numSamplers]); for (uint32_t i = 0; i < numSamplers; ++i) { UniformHandle uniHandle = fSamplerUniforms[i]; GrVkUniformHandler::UniformInfo uniformInfo = fUniformHandler.getUniformInfo(uniHandle); SkASSERT(kSampler2D_GrSLType == uniformInfo.fVariable.getType()); SkASSERT(0 == uniformInfo.fSetNumber); SkASSERT(uniformInfo.fBinding == i); dsSamplerBindings[i].binding = uniformInfo.fBinding; dsSamplerBindings[i].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dsSamplerBindings[i].descriptorCount = 1; dsSamplerBindings[i].stageFlags = visibility_to_vk_stage_flags(uniformInfo.fVisibility); dsSamplerBindings[i].pImmutableSamplers = nullptr; } VkDescriptorSetLayoutCreateInfo dsSamplerLayoutCreateInfo; memset(&dsSamplerLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo)); dsSamplerLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; dsSamplerLayoutCreateInfo.pNext = nullptr; dsSamplerLayoutCreateInfo.flags = 0; dsSamplerLayoutCreateInfo.bindingCount = fSamplerUniforms.count(); // Setting to nullptr fixes an error in the param checker validation layer. Even though // bindingCount is 0 (which is valid), it still tries to validate pBindings unless it is null. dsSamplerLayoutCreateInfo.pBindings = fSamplerUniforms.count() ? dsSamplerBindings.get() : nullptr; GR_VK_CALL_ERRCHECK(fGpu->vkInterface(), CreateDescriptorSetLayout(fGpu->device(), &dsSamplerLayoutCreateInfo, nullptr, &dsLayout[GrVkUniformHandler::kSamplerDescSet])); // Create Uniform Buffer Descriptor // We always attach uniform buffers to descriptor set 1. The vertex uniform buffer will have // binding 0 and the fragment binding 1. VkDescriptorSetLayoutBinding dsUniBindings[2]; memset(&dsUniBindings, 0, 2 * sizeof(VkDescriptorSetLayoutBinding)); dsUniBindings[0].binding = GrVkUniformHandler::kVertexBinding; dsUniBindings[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsUniBindings[0].descriptorCount = fUniformHandler.hasVertexUniforms() ? 1 : 0; dsUniBindings[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT; dsUniBindings[0].pImmutableSamplers = nullptr; dsUniBindings[1].binding = GrVkUniformHandler::kFragBinding; dsUniBindings[1].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsUniBindings[1].descriptorCount = fUniformHandler.hasFragmentUniforms() ? 1 : 0; dsUniBindings[1].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dsUniBindings[1].pImmutableSamplers = nullptr; VkDescriptorSetLayoutCreateInfo dsUniformLayoutCreateInfo; memset(&dsUniformLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo)); dsUniformLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; dsUniformLayoutCreateInfo.pNext = nullptr; dsUniformLayoutCreateInfo.flags = 0; dsUniformLayoutCreateInfo.bindingCount = 2; dsUniformLayoutCreateInfo.pBindings = dsUniBindings; GR_VK_CALL_ERRCHECK(fGpu->vkInterface(), CreateDescriptorSetLayout( fGpu->device(), &dsUniformLayoutCreateInfo, nullptr, &dsLayout[GrVkUniformHandler::kUniformBufferDescSet])); // Create the VkPipelineLayout VkPipelineLayoutCreateInfo layoutCreateInfo; memset(&layoutCreateInfo, 0, sizeof(VkPipelineLayoutCreateFlags)); layoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; layoutCreateInfo.pNext = 0; layoutCreateInfo.flags = 0; layoutCreateInfo.setLayoutCount = 2; layoutCreateInfo.pSetLayouts = dsLayout; layoutCreateInfo.pushConstantRangeCount = 0; layoutCreateInfo.pPushConstantRanges = nullptr; GR_VK_CALL_ERRCHECK(fGpu->vkInterface(), CreatePipelineLayout(fGpu->device(), &layoutCreateInfo, nullptr, &pipelineLayout)); // We need to enable the following extensions so that the compiler can correctly make spir-v // from our glsl shaders. fVS.extensions().appendf("#extension GL_ARB_separate_shader_objects : enable\n"); fFS.extensions().appendf("#extension GL_ARB_separate_shader_objects : enable\n"); fVS.extensions().appendf("#extension GL_ARB_shading_language_420pack : enable\n"); fFS.extensions().appendf("#extension GL_ARB_shading_language_420pack : enable\n"); this->finalizeShaders(); VkPipelineShaderStageCreateInfo shaderStageInfo[2]; SkAssertResult(CreateVkShaderModule(fGpu, VK_SHADER_STAGE_VERTEX_BIT, fVS, &vertShaderModule, &shaderStageInfo[0])); SkAssertResult(CreateVkShaderModule(fGpu, VK_SHADER_STAGE_FRAGMENT_BIT, fFS, &fragShaderModule, &shaderStageInfo[1])); GrVkResourceProvider& resourceProvider = fGpu->resourceProvider(); GrVkPipeline* pipeline = resourceProvider.createPipeline(fPipeline, fPrimProc, shaderStageInfo, 2, primitiveType, renderPass, pipelineLayout); GR_VK_CALL(fGpu->vkInterface(), DestroyShaderModule(fGpu->device(), vertShaderModule, nullptr)); GR_VK_CALL(fGpu->vkInterface(), DestroyShaderModule(fGpu->device(), fragShaderModule, nullptr)); if (!pipeline) { GR_VK_CALL(fGpu->vkInterface(), DestroyPipelineLayout(fGpu->device(), pipelineLayout, nullptr)); GR_VK_CALL(fGpu->vkInterface(), DestroyDescriptorSetLayout(fGpu->device(), dsLayout[0], nullptr)); GR_VK_CALL(fGpu->vkInterface(), DestroyDescriptorSetLayout(fGpu->device(), dsLayout[1], nullptr)); return nullptr; } return new GrVkPipelineState(fGpu, desc, pipeline, pipelineLayout, dsLayout, fUniformHandles, fUniformHandler.fUniforms, fUniformHandler.fCurrentVertexUBOOffset, fUniformHandler.fCurrentFragmentUBOOffset, numSamplers, fGeometryProcessor, fXferProcessor, fFragmentProcessors); }
void GrVkProgram::writeUniformBuffers(const GrVkGpu* gpu) { fProgramDataManager.uploadUniformBuffers(gpu, fVertexUniformBuffer, fFragmentUniformBuffer); VkWriteDescriptorSet descriptorWrites[2]; memset(descriptorWrites, 0, 2 * sizeof(VkWriteDescriptorSet)); uint32_t firstUniformWrite = 0; uint32_t uniformBindingUpdateCount = 0; VkDescriptorBufferInfo vertBufferInfo; // Vertex Uniform Buffer if (fVertexUniformBuffer.get()) { ++uniformBindingUpdateCount; memset(&vertBufferInfo, 0, sizeof(VkDescriptorBufferInfo)); vertBufferInfo.buffer = fVertexUniformBuffer->buffer(); vertBufferInfo.offset = 0; vertBufferInfo.range = fVertexUniformBuffer->size(); descriptorWrites[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptorWrites[0].pNext = nullptr; descriptorWrites[0].dstSet = fDescriptorSets[1]; descriptorWrites[0].dstBinding = GrVkUniformHandler::kVertexBinding; descriptorWrites[0].dstArrayElement = 0; descriptorWrites[0].descriptorCount = 1; descriptorWrites[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptorWrites[0].pImageInfo = nullptr; descriptorWrites[0].pBufferInfo = &vertBufferInfo; descriptorWrites[0].pTexelBufferView = nullptr; } VkDescriptorBufferInfo fragBufferInfo; // Fragment Uniform Buffer if (fFragmentUniformBuffer.get()) { if (0 == uniformBindingUpdateCount) { firstUniformWrite = 1; } ++uniformBindingUpdateCount; memset(&fragBufferInfo, 0, sizeof(VkDescriptorBufferInfo)); fragBufferInfo.buffer = fFragmentUniformBuffer->buffer(); fragBufferInfo.offset = 0; fragBufferInfo.range = fFragmentUniformBuffer->size(); descriptorWrites[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptorWrites[1].pNext = nullptr; descriptorWrites[1].dstSet = fDescriptorSets[1]; descriptorWrites[1].dstBinding = GrVkUniformHandler::kFragBinding;; descriptorWrites[1].dstArrayElement = 0; descriptorWrites[1].descriptorCount = 1; descriptorWrites[1].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptorWrites[1].pImageInfo = nullptr; descriptorWrites[1].pBufferInfo = &fragBufferInfo; descriptorWrites[1].pTexelBufferView = nullptr; } if (uniformBindingUpdateCount) { GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(), uniformBindingUpdateCount, &descriptorWrites[firstUniformWrite], 0, nullptr)); } }
GrVkPipelineState* GrVkPipelineStateBuilder::finalize(GrPrimitiveType primitiveType, const GrVkRenderPass& renderPass, const GrVkPipelineState::Desc& desc) { VkDescriptorSetLayout dsLayout[2]; VkPipelineLayout pipelineLayout; VkShaderModule vertShaderModule; VkShaderModule fragShaderModule; uint32_t numSamplers = (uint32_t)fUniformHandler.numSamplers(); SkAutoTDeleteArray<VkDescriptorSetLayoutBinding> dsSamplerBindings( new VkDescriptorSetLayoutBinding[numSamplers]); for (uint32_t i = 0; i < numSamplers; ++i) { const GrVkGLSLSampler& sampler = static_cast<const GrVkGLSLSampler&>(fUniformHandler.getSampler(i)); SkASSERT(sampler.binding() == i); dsSamplerBindings[i].binding = sampler.binding(); dsSamplerBindings[i].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dsSamplerBindings[i].descriptorCount = 1; dsSamplerBindings[i].stageFlags = visibility_to_vk_stage_flags(sampler.visibility()); dsSamplerBindings[i].pImmutableSamplers = nullptr; } VkDescriptorSetLayoutCreateInfo dsSamplerLayoutCreateInfo; memset(&dsSamplerLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo)); dsSamplerLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; dsSamplerLayoutCreateInfo.pNext = nullptr; dsSamplerLayoutCreateInfo.flags = 0; dsSamplerLayoutCreateInfo.bindingCount = numSamplers; // Setting to nullptr fixes an error in the param checker validation layer. Even though // bindingCount is 0 (which is valid), it still tries to validate pBindings unless it is null. dsSamplerLayoutCreateInfo.pBindings = numSamplers ? dsSamplerBindings.get() : nullptr; GR_VK_CALL_ERRCHECK(fGpu->vkInterface(), CreateDescriptorSetLayout(fGpu->device(), &dsSamplerLayoutCreateInfo, nullptr, &dsLayout[GrVkUniformHandler::kSamplerDescSet])); // This layout is not owned by the PipelineStateBuilder and thus should no be destroyed dsLayout[GrVkUniformHandler::kUniformBufferDescSet] = fGpu->resourceProvider().getUniDSLayout(); // Create the VkPipelineLayout VkPipelineLayoutCreateInfo layoutCreateInfo; memset(&layoutCreateInfo, 0, sizeof(VkPipelineLayoutCreateFlags)); layoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; layoutCreateInfo.pNext = 0; layoutCreateInfo.flags = 0; layoutCreateInfo.setLayoutCount = 2; layoutCreateInfo.pSetLayouts = dsLayout; layoutCreateInfo.pushConstantRangeCount = 0; layoutCreateInfo.pPushConstantRanges = nullptr; GR_VK_CALL_ERRCHECK(fGpu->vkInterface(), CreatePipelineLayout(fGpu->device(), &layoutCreateInfo, nullptr, &pipelineLayout)); // We need to enable the following extensions so that the compiler can correctly make spir-v // from our glsl shaders. fVS.extensions().appendf("#extension GL_ARB_separate_shader_objects : enable\n"); fFS.extensions().appendf("#extension GL_ARB_separate_shader_objects : enable\n"); fVS.extensions().appendf("#extension GL_ARB_shading_language_420pack : enable\n"); fFS.extensions().appendf("#extension GL_ARB_shading_language_420pack : enable\n"); this->finalizeShaders(); VkPipelineShaderStageCreateInfo shaderStageInfo[2]; SkAssertResult(CreateVkShaderModule(fGpu, VK_SHADER_STAGE_VERTEX_BIT, fVS, &vertShaderModule, &shaderStageInfo[0])); SkAssertResult(CreateVkShaderModule(fGpu, VK_SHADER_STAGE_FRAGMENT_BIT, fFS, &fragShaderModule, &shaderStageInfo[1])); GrVkResourceProvider& resourceProvider = fGpu->resourceProvider(); GrVkPipeline* pipeline = resourceProvider.createPipeline(fPipeline, fPrimProc, shaderStageInfo, 2, primitiveType, renderPass, pipelineLayout); GR_VK_CALL(fGpu->vkInterface(), DestroyShaderModule(fGpu->device(), vertShaderModule, nullptr)); GR_VK_CALL(fGpu->vkInterface(), DestroyShaderModule(fGpu->device(), fragShaderModule, nullptr)); if (!pipeline) { GR_VK_CALL(fGpu->vkInterface(), DestroyPipelineLayout(fGpu->device(), pipelineLayout, nullptr)); GR_VK_CALL(fGpu->vkInterface(), DestroyDescriptorSetLayout(fGpu->device(), dsLayout[GrVkUniformHandler::kSamplerDescSet], nullptr)); this->cleanupFragmentProcessors(); return nullptr; } return new GrVkPipelineState(fGpu, desc, pipeline, pipelineLayout, dsLayout[GrVkUniformHandler::kSamplerDescSet], fUniformHandles, fUniformHandler.fUniforms, fUniformHandler.fCurrentVertexUBOOffset, fUniformHandler.fCurrentFragmentUBOOffset, numSamplers, fGeometryProcessor, fXferProcessor, fFragmentProcessors); }
void GrVkPipeline::freeGPUData(const GrVkGpu* gpu) const { GR_VK_CALL(gpu->vkInterface(), DestroyPipeline(gpu->device(), fPipeline, nullptr)); }
void GrVkCommandBuffer::endRenderPass(const GrVkGpu* gpu) { SkASSERT(fIsActive); SkASSERT(fActiveRenderPass); GR_VK_CALL(gpu->vkInterface(), CmdEndRenderPass(fCmdBuffer)); fActiveRenderPass = nullptr; }
void GrVkImageView::freeGPUData(const GrVkGpu* gpu) const { GR_VK_CALL(gpu->vkInterface(), DestroyImageView(gpu->device(), fImageView, nullptr)); }
void GrVkPipelineState::writeSamplers(GrVkGpu* gpu, const SkTArray<const GrTextureAccess*>& textureBindings) { SkASSERT(fNumSamplers == textureBindings.count()); for (int i = 0; i < textureBindings.count(); ++i) { const GrTextureParams& params = textureBindings[i]->getParams(); GrVkTexture* texture = static_cast<GrVkTexture*>(textureBindings[i]->getTexture()); if (GrTextureParams::kMipMap_FilterMode == params.filterMode()) { if (texture->texturePriv().mipMapsAreDirty()) { gpu->generateMipmap(texture); texture->texturePriv().dirtyMipMaps(false); } } fSamplers.push(gpu->resourceProvider().findOrCreateCompatibleSampler(params, texture->texturePriv().maxMipMapLevel())); const GrVkImage::Resource* textureResource = texture->resource(); textureResource->ref(); fTextures.push(textureResource); const GrVkImageView* textureView = texture->textureView(); textureView->ref(); fTextureViews.push(textureView); // Change texture layout so it can be read in shader VkImageLayout layout = texture->currentLayout(); VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout); VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT; VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout); VkAccessFlags dstAccessMask = VK_ACCESS_SHADER_READ_BIT; texture->setImageLayout(gpu, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, srcAccessMask, dstAccessMask, srcStageMask, dstStageMask, false); VkDescriptorImageInfo imageInfo; memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo)); imageInfo.sampler = fSamplers[i]->sampler(); imageInfo.imageView = texture->textureView()->imageView(); imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet writeInfo; memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet)); writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; writeInfo.pNext = nullptr; writeInfo.dstSet = fDescriptorSets[GrVkUniformHandler::kSamplerDescSet]; writeInfo.dstBinding = i; writeInfo.dstArrayElement = 0; writeInfo.descriptorCount = 1; writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; writeInfo.pImageInfo = &imageInfo; writeInfo.pBufferInfo = nullptr; writeInfo.pTexelBufferView = nullptr; GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(), 1, &writeInfo, 0, nullptr)); } }