static void prepare_sampled_images(const GrProcessor& processor, GrVkGpu* gpu) { for (int i = 0; i < processor.numTextures(); ++i) { const GrTextureAccess& texAccess = processor.textureAccess(i); GrVkTexture* vkTexture = static_cast<GrVkTexture*>(processor.texture(i)); SkASSERT(vkTexture); // We may need to resolve the texture first if it is also a render target GrVkRenderTarget* texRT = static_cast<GrVkRenderTarget*>(vkTexture->asRenderTarget()); if (texRT) { gpu->onResolveRenderTarget(texRT); } const GrTextureParams& params = texAccess.getParams(); // Check if we need to regenerate any mip maps if (GrTextureParams::kMipMap_FilterMode == params.filterMode()) { if (vkTexture->texturePriv().mipMapsAreDirty()) { gpu->generateMipmap(vkTexture); vkTexture->texturePriv().dirtyMipMaps(false); } } // TODO: If we ever decide to create the secondary command buffers ahead of time before we // are actually going to submit them, we will need to track the sampled images and delay // adding the layout change/barrier until we are ready to submit. vkTexture->setImageLayout(gpu, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, false); } }
void GrVkPipelineState::writeSamplers(GrVkGpu* gpu, const SkTArray<const GrTextureAccess*>& textureBindings, bool allowSRGBInputs) { SkASSERT(fNumSamplers == textureBindings.count()); for (int i = 0; i < textureBindings.count(); ++i) { const GrTextureParams& params = textureBindings[i]->getParams(); GrVkTexture* texture = static_cast<GrVkTexture*>(textureBindings[i]->getTexture()); if (GrTextureParams::kMipMap_FilterMode == params.filterMode()) { if (texture->texturePriv().mipMapsAreDirty()) { gpu->generateMipmap(texture); texture->texturePriv().dirtyMipMaps(false); } } fSamplers.push(gpu->resourceProvider().findOrCreateCompatibleSampler(params, texture->texturePriv().maxMipMapLevel())); const GrVkResource* textureResource = texture->resource(); textureResource->ref(); fTextures.push(textureResource); const GrVkImageView* textureView = texture->textureView(allowSRGBInputs); textureView->ref(); fTextureViews.push(textureView); // Change texture layout so it can be read in shader texture->setImageLayout(gpu, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, false); VkDescriptorImageInfo imageInfo; memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo)); imageInfo.sampler = fSamplers[i]->sampler(); imageInfo.imageView = textureView->imageView(); imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet writeInfo; memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet)); writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; writeInfo.pNext = nullptr; writeInfo.dstSet = fDescriptorSets[GrVkUniformHandler::kSamplerDescSet]; writeInfo.dstBinding = i; writeInfo.dstArrayElement = 0; writeInfo.descriptorCount = 1; writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; writeInfo.pImageInfo = &imageInfo; writeInfo.pBufferInfo = nullptr; writeInfo.pTexelBufferView = nullptr; GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(), 1, &writeInfo, 0, nullptr)); } }
void GrVkProgram::writeSamplers(const GrVkGpu* gpu, const SkTArray<const GrTextureAccess*>& textureBindings) { SkASSERT(fNumSamplers == textureBindings.count()); for (int i = 0; i < textureBindings.count(); ++i) { fSamplers.push(GrVkSampler::Create(gpu, *textureBindings[i])); GrVkTexture* texture = static_cast<GrVkTexture*>(textureBindings[i]->getTexture()); const GrVkImage::Resource* textureResource = texture->resource(); textureResource->ref(); fTextures.push(textureResource); const GrVkImageView* textureView = texture->textureView(); textureView->ref(); fTextureViews.push(textureView); // Change texture layout so it can be read in shader VkImageLayout layout = texture->currentLayout(); VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout); VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT; VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout); VkAccessFlags dstAccessMask = VK_ACCESS_SHADER_READ_BIT; texture->setImageLayout(gpu, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, srcAccessMask, dstAccessMask, srcStageMask, dstStageMask, false); VkDescriptorImageInfo imageInfo; memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo)); imageInfo.sampler = fSamplers[i]->sampler(); imageInfo.imageView = texture->textureView()->imageView(); imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet writeInfo; memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet)); writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; writeInfo.pNext = nullptr; writeInfo.dstSet = fDescriptorSets[GrVkUniformHandler::kSamplerDescSet]; writeInfo.dstBinding = i; writeInfo.dstArrayElement = 0; writeInfo.descriptorCount = 1; writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; writeInfo.pImageInfo = &imageInfo; writeInfo.pBufferInfo = nullptr; writeInfo.pTexelBufferView = nullptr; GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(), 1, &writeInfo, 0, nullptr)); } }
void GrVkGpuRTCommandBuffer::onDraw(const GrPrimitiveProcessor& primProc, const GrPipeline& pipeline, const GrPipeline::FixedDynamicState* fixedDynamicState, const GrPipeline::DynamicStateArrays* dynamicStateArrays, const GrMesh meshes[], int meshCount, const SkRect& bounds) { if (!meshCount) { return; } CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo]; auto prepareSampledImage = [&](GrTexture* texture, GrSamplerState::Filter filter) { GrVkTexture* vkTexture = static_cast<GrVkTexture*>(texture); // We may need to resolve the texture first if it is also a render target GrVkRenderTarget* texRT = static_cast<GrVkRenderTarget*>(vkTexture->asRenderTarget()); if (texRT) { fGpu->resolveRenderTargetNoFlush(texRT); } // Check if we need to regenerate any mip maps if (GrSamplerState::Filter::kMipMap == filter && (vkTexture->width() != 1 || vkTexture->height() != 1)) { SkASSERT(vkTexture->texturePriv().mipMapped() == GrMipMapped::kYes); if (vkTexture->texturePriv().mipMapsAreDirty()) { fGpu->regenerateMipMapLevels(vkTexture); } } cbInfo.fSampledTextures.push_back(vkTexture); }; if (dynamicStateArrays && dynamicStateArrays->fPrimitiveProcessorTextures) { for (int m = 0, i = 0; m < meshCount; ++m) { for (int s = 0; s < primProc.numTextureSamplers(); ++s, ++i) { auto texture = dynamicStateArrays->fPrimitiveProcessorTextures[i]->peekTexture(); prepareSampledImage(texture, primProc.textureSampler(s).samplerState().filter()); } } } else { for (int i = 0; i < primProc.numTextureSamplers(); ++i) { auto texture = fixedDynamicState->fPrimitiveProcessorTextures[i]->peekTexture(); prepareSampledImage(texture, primProc.textureSampler(i).samplerState().filter()); } } GrFragmentProcessor::Iter iter(pipeline); while (const GrFragmentProcessor* fp = iter.next()) { for (int i = 0; i < fp->numTextureSamplers(); ++i) { const GrFragmentProcessor::TextureSampler& sampler = fp->textureSampler(i); prepareSampledImage(sampler.peekTexture(), sampler.samplerState().filter()); } } if (GrTexture* dstTexture = pipeline.peekDstTexture()) { cbInfo.fSampledTextures.push_back(sk_ref_sp(static_cast<GrVkTexture*>(dstTexture))); } GrPrimitiveType primitiveType = meshes[0].primitiveType(); GrVkPipelineState* pipelineState = this->prepareDrawState(primProc, pipeline, fixedDynamicState, dynamicStateArrays, primitiveType); if (!pipelineState) { return; } bool dynamicScissor = pipeline.isScissorEnabled() && dynamicStateArrays && dynamicStateArrays->fScissorRects; bool dynamicTextures = dynamicStateArrays && dynamicStateArrays->fPrimitiveProcessorTextures; for (int i = 0; i < meshCount; ++i) { const GrMesh& mesh = meshes[i]; if (mesh.primitiveType() != primitiveType) { SkDEBUGCODE(pipelineState = nullptr); primitiveType = mesh.primitiveType(); pipelineState = this->prepareDrawState(primProc, pipeline, fixedDynamicState, dynamicStateArrays, primitiveType); if (!pipelineState) { return; } } if (dynamicScissor) { GrVkPipeline::SetDynamicScissorRectState(fGpu, cbInfo.currentCmdBuf(), fRenderTarget, fOrigin, dynamicStateArrays->fScissorRects[i]); } if (dynamicTextures) { GrTextureProxy* const* meshProxies = dynamicStateArrays->fPrimitiveProcessorTextures + primProc.numTextureSamplers() * i; pipelineState->setAndBindTextures(fGpu, primProc, pipeline, meshProxies, cbInfo.currentCmdBuf()); } SkASSERT(pipelineState); mesh.sendToGpu(this); } cbInfo.fBounds.join(bounds); cbInfo.fIsEmpty = false; }
void GrVkPipelineState::setAndBindTextures(GrVkGpu* gpu, const GrPrimitiveProcessor& primProc, const GrPipeline& pipeline, const GrTextureProxy* const primProcTextures[], GrVkCommandBuffer* commandBuffer) { SkASSERT(primProcTextures || !primProc.numTextureSamplers()); struct SamplerBindings { GrSamplerState fState; GrVkTexture* fTexture; }; SkAutoSTMalloc<8, SamplerBindings> samplerBindings(fNumSamplers); int currTextureBinding = 0; fGeometryProcessor->setData(fDataManager, primProc, GrFragmentProcessor::CoordTransformIter(pipeline)); for (int i = 0; i < primProc.numTextureSamplers(); ++i) { const auto& sampler = primProc.textureSampler(i); auto texture = static_cast<GrVkTexture*>(primProcTextures[i]->peekTexture()); samplerBindings[currTextureBinding++] = {sampler.samplerState(), texture}; } GrFragmentProcessor::Iter iter(pipeline); GrGLSLFragmentProcessor::Iter glslIter(fFragmentProcessors.get(), fFragmentProcessorCnt); const GrFragmentProcessor* fp = iter.next(); GrGLSLFragmentProcessor* glslFP = glslIter.next(); while (fp && glslFP) { for (int i = 0; i < fp->numTextureSamplers(); ++i) { const auto& sampler = fp->textureSampler(i); samplerBindings[currTextureBinding++] = {sampler.samplerState(), static_cast<GrVkTexture*>(sampler.peekTexture())}; } fp = iter.next(); glslFP = glslIter.next(); } SkASSERT(!fp && !glslFP); if (GrTextureProxy* dstTextureProxy = pipeline.dstTextureProxy()) { samplerBindings[currTextureBinding++] = { GrSamplerState::ClampNearest(), static_cast<GrVkTexture*>(dstTextureProxy->peekTexture())}; } // Get new descriptor set SkASSERT(fNumSamplers == currTextureBinding); if (fNumSamplers) { if (fSamplerDescriptorSet) { fSamplerDescriptorSet->recycle(gpu); } fSamplerDescriptorSet = gpu->resourceProvider().getSamplerDescriptorSet(fSamplerDSHandle); int samplerDSIdx = GrVkUniformHandler::kSamplerDescSet; fDescriptorSets[samplerDSIdx] = fSamplerDescriptorSet->descriptorSet(); for (int i = 0; i < fNumSamplers; ++i) { const GrSamplerState& state = samplerBindings[i].fState; GrVkTexture* texture = samplerBindings[i].fTexture; const GrVkImageView* textureView = texture->textureView(); const GrVkSampler* sampler = nullptr; if (fImmutableSamplers[i]) { sampler = fImmutableSamplers[i]; } else { sampler = gpu->resourceProvider().findOrCreateCompatibleSampler( state, texture->ycbcrConversionInfo()); } SkASSERT(sampler); VkDescriptorImageInfo imageInfo; memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo)); imageInfo.sampler = sampler->sampler(); imageInfo.imageView = textureView->imageView(); imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet writeInfo; memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet)); writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; writeInfo.pNext = nullptr; writeInfo.dstSet = fDescriptorSets[GrVkUniformHandler::kSamplerDescSet]; writeInfo.dstBinding = i; writeInfo.dstArrayElement = 0; writeInfo.descriptorCount = 1; writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; writeInfo.pImageInfo = &imageInfo; writeInfo.pBufferInfo = nullptr; writeInfo.pTexelBufferView = nullptr; GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(), 1, &writeInfo, 0, nullptr)); commandBuffer->addResource(sampler); if (!fImmutableSamplers[i]) { sampler->unref(gpu); } commandBuffer->addResource(samplerBindings[i].fTexture->textureView()); commandBuffer->addResource(samplerBindings[i].fTexture->resource()); } commandBuffer->bindDescriptorSets(gpu, this, fPipelineLayout, samplerDSIdx, 1, &fDescriptorSets[samplerDSIdx], 0, nullptr); commandBuffer->addRecycledResource(fSamplerDescriptorSet); } }
bool GrVkCopyManager::copySurfaceAsDraw(GrVkGpu* gpu, GrSurface* dst, GrSurfaceOrigin dstOrigin, GrSurface* src, GrSurfaceOrigin srcOrigin, const SkIRect& srcRect, const SkIPoint& dstPoint, bool canDiscardOutsideDstRect) { // None of our copy methods can handle a swizzle. TODO: Make copySurfaceAsDraw handle the // swizzle. if (gpu->caps()->shaderCaps()->configOutputSwizzle(src->config()) != gpu->caps()->shaderCaps()->configOutputSwizzle(dst->config())) { return false; } GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(dst->asRenderTarget()); if (!rt) { return false; } GrVkTexture* srcTex = static_cast<GrVkTexture*>(src->asTexture()); if (!srcTex) { return false; } if (VK_NULL_HANDLE == fVertShaderModule) { SkASSERT(VK_NULL_HANDLE == fFragShaderModule && nullptr == fPipelineLayout && nullptr == fVertexBuffer.get() && nullptr == fUniformBuffer.get()); if (!this->createCopyProgram(gpu)) { SkDebugf("Failed to create copy program.\n"); return false; } } SkASSERT(fPipelineLayout); GrVkResourceProvider& resourceProv = gpu->resourceProvider(); GrVkCopyPipeline* pipeline = resourceProv.findOrCreateCopyPipeline(rt, fShaderStageInfo, fPipelineLayout->layout()); if (!pipeline) { return false; } // UPDATE UNIFORM DESCRIPTOR SET int w = srcRect.width(); int h = srcRect.height(); // dst rect edges in NDC (-1 to 1) int dw = dst->width(); int dh = dst->height(); float dx0 = 2.f * dstPoint.fX / dw - 1.f; float dx1 = 2.f * (dstPoint.fX + w) / dw - 1.f; float dy0 = 2.f * dstPoint.fY / dh - 1.f; float dy1 = 2.f * (dstPoint.fY + h) / dh - 1.f; if (kBottomLeft_GrSurfaceOrigin == dstOrigin) { dy0 = -dy0; dy1 = -dy1; } float sx0 = (float)srcRect.fLeft; float sx1 = (float)(srcRect.fLeft + w); float sy0 = (float)srcRect.fTop; float sy1 = (float)(srcRect.fTop + h); int sh = src->height(); if (kBottomLeft_GrSurfaceOrigin == srcOrigin) { sy0 = sh - sy0; sy1 = sh - sy1; } // src rect edges in normalized texture space (0 to 1). int sw = src->width(); sx0 /= sw; sx1 /= sw; sy0 /= sh; sy1 /= sh; float uniData[] = { dx1 - dx0, dy1 - dy0, dx0, dy0, // posXform sx1 - sx0, sy1 - sy0, sx0, sy0 }; // texCoordXform fUniformBuffer->updateData(gpu, uniData, sizeof(uniData), nullptr); const GrVkDescriptorSet* uniformDS = resourceProv.getUniformDescriptorSet(); SkASSERT(uniformDS); VkDescriptorBufferInfo uniBufferInfo; uniBufferInfo.buffer = fUniformBuffer->buffer(); uniBufferInfo.offset = fUniformBuffer->offset(); uniBufferInfo.range = fUniformBuffer->size(); VkWriteDescriptorSet descriptorWrites; descriptorWrites.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptorWrites.pNext = nullptr; descriptorWrites.dstSet = uniformDS->descriptorSet(); descriptorWrites.dstBinding = GrVkUniformHandler::kGeometryBinding; descriptorWrites.dstArrayElement = 0; descriptorWrites.descriptorCount = 1; descriptorWrites.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptorWrites.pImageInfo = nullptr; descriptorWrites.pBufferInfo = &uniBufferInfo; descriptorWrites.pTexelBufferView = nullptr; GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(), 1, &descriptorWrites, 0, nullptr)); // UPDATE SAMPLER DESCRIPTOR SET const GrVkDescriptorSet* samplerDS = gpu->resourceProvider().getSamplerDescriptorSet(fSamplerDSHandle); GrSamplerState samplerState = GrSamplerState::ClampNearest(); GrVkSampler* sampler = resourceProv.findOrCreateCompatibleSampler( samplerState, GrVkYcbcrConversionInfo()); VkDescriptorImageInfo imageInfo; memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo)); imageInfo.sampler = sampler->sampler(); imageInfo.imageView = srcTex->textureView()->imageView(); imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet writeInfo; memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet)); writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; writeInfo.pNext = nullptr; writeInfo.dstSet = samplerDS->descriptorSet(); writeInfo.dstBinding = 0; writeInfo.dstArrayElement = 0; writeInfo.descriptorCount = 1; writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; writeInfo.pImageInfo = &imageInfo; writeInfo.pBufferInfo = nullptr; writeInfo.pTexelBufferView = nullptr; GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(), 1, &writeInfo, 0, nullptr)); VkDescriptorSet vkDescSets[] = { uniformDS->descriptorSet(), samplerDS->descriptorSet() }; GrVkRenderTarget* texRT = static_cast<GrVkRenderTarget*>(srcTex->asRenderTarget()); if (texRT) { gpu->resolveRenderTargetNoFlush(texRT); } // TODO: Make tighter bounds and then adjust bounds for origin and granularity if we see // any perf issues with using the whole bounds SkIRect bounds = SkIRect::MakeWH(rt->width(), rt->height()); // Change layouts of rt and texture. We aren't blending so we don't need color attachment read // access for blending. GrVkImage* targetImage = rt->msaaImage() ? rt->msaaImage() : rt; VkAccessFlags dstAccessFlags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; if (!canDiscardOutsideDstRect) { // We need to load the color attachment so need to be able to read it. dstAccessFlags |= VK_ACCESS_COLOR_ATTACHMENT_READ_BIT; } targetImage->setImageLayout(gpu, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, dstAccessFlags, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, false); srcTex->setImageLayout(gpu, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, false); GrStencilAttachment* stencil = rt->renderTargetPriv().getStencilAttachment(); if (stencil) { GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil; // We aren't actually using the stencil but we still load and store it so we need // appropriate barriers. // TODO: Once we refactor surface and how we conntect stencil to RTs, we should not even // have the stencil on this render pass if possible. vkStencil->setImageLayout(gpu, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, false); } VkAttachmentLoadOp loadOp = canDiscardOutsideDstRect ? VK_ATTACHMENT_LOAD_OP_DONT_CARE : VK_ATTACHMENT_LOAD_OP_LOAD; GrVkRenderPass::LoadStoreOps vkColorOps(loadOp, VK_ATTACHMENT_STORE_OP_STORE); GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE); const GrVkRenderPass* renderPass; const GrVkResourceProvider::CompatibleRPHandle& rpHandle = rt->compatibleRenderPassHandle(); if (rpHandle.isValid()) { renderPass = gpu->resourceProvider().findRenderPass(rpHandle, vkColorOps, vkStencilOps); } else { renderPass = gpu->resourceProvider().findRenderPass(*rt, vkColorOps, vkStencilOps); } SkASSERT(renderPass->isCompatible(*rt->simpleRenderPass())); GrVkPrimaryCommandBuffer* cmdBuffer = gpu->currentCommandBuffer(); cmdBuffer->beginRenderPass(gpu, renderPass, nullptr, *rt, bounds, true); GrVkSecondaryCommandBuffer* secondary = gpu->cmdPool()->findOrCreateSecondaryCommandBuffer(gpu); if (!secondary) { return false; } secondary->begin(gpu, rt->framebuffer(), renderPass); secondary->bindPipeline(gpu, pipeline); // Uniform DescriptorSet, Sampler DescriptorSet, and vertex shader uniformBuffer SkSTArray<3, const GrVkRecycledResource*> descriptorRecycledResources; descriptorRecycledResources.push_back(uniformDS); descriptorRecycledResources.push_back(samplerDS); descriptorRecycledResources.push_back(fUniformBuffer->resource()); // One sampler, texture view, and texture SkSTArray<3, const GrVkResource*> descriptorResources; descriptorResources.push_back(sampler); descriptorResources.push_back(srcTex->textureView()); descriptorResources.push_back(srcTex->resource()); secondary->bindDescriptorSets(gpu, descriptorRecycledResources, descriptorResources, fPipelineLayout, 0, 2, vkDescSets, 0, nullptr); // Set Dynamic viewport and stencil // We always use one viewport the size of the RT VkViewport viewport; viewport.x = 0.0f; viewport.y = 0.0f; viewport.width = SkIntToScalar(rt->width()); viewport.height = SkIntToScalar(rt->height()); viewport.minDepth = 0.0f; viewport.maxDepth = 1.0f; secondary->setViewport(gpu, 0, 1, &viewport); // We assume the scissor is not enabled so just set it to the whole RT VkRect2D scissor; scissor.extent.width = rt->width(); scissor.extent.height = rt->height(); scissor.offset.x = 0; scissor.offset.y = 0; secondary->setScissor(gpu, 0, 1, &scissor); secondary->bindInputBuffer(gpu, 0, fVertexBuffer.get()); secondary->draw(gpu, 4, 1, 0, 0); secondary->end(gpu); cmdBuffer->executeCommands(gpu, secondary); cmdBuffer->endRenderPass(gpu); secondary->unref(gpu); // Release all temp resources which should now be reffed by the cmd buffer pipeline->unref(gpu); uniformDS->unref(gpu); samplerDS->unref(gpu); sampler->unref(gpu); renderPass->unref(gpu); return true; }
DEF_GPUTEST_FOR_VULKAN_CONTEXT(VkImageLayoutTest, reporter, ctxInfo) { GrContext* context = ctxInfo.grContext(); GrVkGpu* gpu = static_cast<GrVkGpu*>(context->contextPriv().getGpu()); GrBackendTexture backendTex = gpu->createTestingOnlyBackendTexture(nullptr, 1, 1, kRGBA_8888_GrPixelConfig, false, GrMipMapped::kNo); REPORTER_ASSERT(reporter, backendTex.isValid()); GrVkImageInfo info; REPORTER_ASSERT(reporter, backendTex.getVkImageInfo(&info)); VkImageLayout initLayout = info.fImageLayout; // Verify that setting that layout via a copy of a backendTexture is reflected in all the // backendTextures. GrBackendTexture backendTexCopy = backendTex; REPORTER_ASSERT(reporter, backendTexCopy.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, initLayout == info.fImageLayout); backendTexCopy.setVkImageLayout(VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); REPORTER_ASSERT(reporter, backendTex.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == info.fImageLayout); REPORTER_ASSERT(reporter, backendTexCopy.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == info.fImageLayout); // Setting back the layout since we didn't actually change it backendTex.setVkImageLayout(initLayout); sk_sp<SkImage> wrappedImage = SkImage::MakeFromTexture(context, backendTex, kTopLeft_GrSurfaceOrigin, kRGBA_8888_SkColorType, kPremul_SkAlphaType, nullptr); REPORTER_ASSERT(reporter, wrappedImage.get()); sk_sp<GrTextureProxy> texProxy = as_IB(wrappedImage)->asTextureProxyRef(); REPORTER_ASSERT(reporter, texProxy.get()); REPORTER_ASSERT(reporter, texProxy->priv().isInstantiated()); GrTexture* texture = texProxy->priv().peekTexture(); REPORTER_ASSERT(reporter, texture); // Verify that modifying the layout via the GrVkTexture is reflected in the GrBackendTexture GrVkTexture* vkTexture = static_cast<GrVkTexture*>(texture); REPORTER_ASSERT(reporter, initLayout == vkTexture->currentLayout()); vkTexture->updateImageLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); REPORTER_ASSERT(reporter, backendTex.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == info.fImageLayout); GrBackendTexture backendTexImage = wrappedImage->getBackendTexture(false); REPORTER_ASSERT(reporter, backendTexImage.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == info.fImageLayout); // Verify that modifying the layout via the GrBackendTexutre is reflected in the GrVkTexture backendTexImage.setVkImageLayout(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); REPORTER_ASSERT(reporter, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == vkTexture->currentLayout()); #ifdef SK_SUPPORT_LEGACY_BACKEND_OBJECTS // Verify that modifying the layout via the old textureHandle sitll works in is reflected in the // GrVkTexture and GrBackendTexture. GrVkImageInfo* backendInfo = (GrVkImageInfo*)wrappedImage->getTextureHandle(false); REPORTER_ASSERT(reporter, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == backendInfo->fImageLayout); backendInfo->updateImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); REPORTER_ASSERT(reporter, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == vkTexture->currentLayout()); REPORTER_ASSERT(reporter, backendTexImage.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == info.fImageLayout); #endif vkTexture->updateImageLayout(initLayout); REPORTER_ASSERT(reporter, backendTex.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, initLayout == info.fImageLayout); REPORTER_ASSERT(reporter, backendTexCopy.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, initLayout == info.fImageLayout); REPORTER_ASSERT(reporter, backendTexImage.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, initLayout == info.fImageLayout); // Check that we can do things like assigning the backend texture to invalid one, assign an // invalid one, assin a backend texture to inself etc. Success here is that we don't hit any of // our ref counting asserts. REPORTER_ASSERT(reporter, GrBackendTexture::TestingOnly_Equals(backendTex, backendTexCopy)); GrBackendTexture invalidTexture; REPORTER_ASSERT(reporter, !invalidTexture.isValid()); REPORTER_ASSERT(reporter, !GrBackendTexture::TestingOnly_Equals(invalidTexture, backendTexCopy)); backendTexCopy = invalidTexture; REPORTER_ASSERT(reporter, !backendTexCopy.isValid()); REPORTER_ASSERT(reporter, !GrBackendTexture::TestingOnly_Equals(invalidTexture, backendTexCopy)); invalidTexture = backendTex; REPORTER_ASSERT(reporter, invalidTexture.isValid()); REPORTER_ASSERT(reporter, GrBackendTexture::TestingOnly_Equals(invalidTexture, backendTex)); invalidTexture = static_cast<decltype(invalidTexture)&>(invalidTexture); REPORTER_ASSERT(reporter, invalidTexture.isValid()); REPORTER_ASSERT(reporter, GrBackendTexture::TestingOnly_Equals(invalidTexture, invalidTexture)); gpu->deleteTestingOnlyBackendTexture(backendTex); }