GrSemaphoresSubmitted GrGpu::finishFlush(int numSemaphores, GrBackendSemaphore backendSemaphores[]) { GrResourceProvider* resourceProvider = fContext->contextPriv().resourceProvider(); if (this->caps()->fenceSyncSupport()) { for (int i = 0; i < numSemaphores; ++i) { sk_sp<GrSemaphore> semaphore; if (backendSemaphores[i].isInitialized()) { semaphore = resourceProvider->wrapBackendSemaphore( backendSemaphores[i], GrResourceProvider::SemaphoreWrapType::kWillSignal, kBorrow_GrWrapOwnership); } else { semaphore = resourceProvider->makeSemaphore(false); } this->insertSemaphore(semaphore, false); if (!backendSemaphores[i].isInitialized()) { semaphore->setBackendSemaphore(&backendSemaphores[i]); } } } this->onFinishFlush((numSemaphores > 0 && this->caps()->fenceSyncSupport())); return this->caps()->fenceSyncSupport() ? GrSemaphoresSubmitted::kYes : GrSemaphoresSubmitted::kNo; }
void draw(Target* target, const GrGeometryProcessor* gp) const { GrResourceProvider* rp = target->resourceProvider(); SkScalar screenSpaceTol = GrPathUtils::kDefaultTolerance; SkScalar tol = GrPathUtils::scaleToleranceToSrc(screenSpaceTol, fViewMatrix, fShape.bounds()); SkPath path; fShape.asPath(&path); bool inverseFill = path.isInverseFillType(); // construct a cache key from the path's genID and the view matrix static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain(); GrUniqueKey key; static constexpr int kClipBoundsCnt = sizeof(fClipBounds) / sizeof(uint32_t); int shapeKeyDataCnt = fShape.unstyledKeySize(); SkASSERT(shapeKeyDataCnt >= 0); GrUniqueKey::Builder builder(&key, kDomain, shapeKeyDataCnt + kClipBoundsCnt); fShape.writeUnstyledKey(&builder[0]); // For inverse fills, the tessellation is dependent on clip bounds. if (inverseFill) { memcpy(&builder[shapeKeyDataCnt], &fClipBounds, sizeof(fClipBounds)); } else { memset(&builder[shapeKeyDataCnt], 0, sizeof(fClipBounds)); } builder.finish(); SkAutoTUnref<GrBuffer> cachedVertexBuffer(rp->findAndRefTByUniqueKey<GrBuffer>(key)); int actualCount; if (cache_match(cachedVertexBuffer.get(), tol, &actualCount)) { this->drawVertices(target, gp, cachedVertexBuffer.get(), 0, actualCount); return; } bool isLinear; bool canMapVB = GrCaps::kNone_MapFlags != target->caps().mapBufferFlags(); StaticVertexAllocator allocator(rp, canMapVB); int count = GrTessellator::PathToTriangles(path, tol, fClipBounds, &allocator, &isLinear); if (count == 0) { return; } this->drawVertices(target, gp, allocator.vertexBuffer(), 0, count); TessInfo info; info.fTolerance = isLinear ? 0 : tol; info.fCount = count; SkAutoTUnref<SkData> data(SkData::NewWithCopy(&info, sizeof(info))); key.setCustomData(data.get()); rp->assignUniqueKeyToResource(key, allocator.vertexBuffer()); }
static sk_sp<GrTextureProxy> create_wrapped_backend(GrContext* context, SkBackingFit fit, sk_sp<GrTexture>* backingSurface) { GrProxyProvider* proxyProvider = context->priv().proxyProvider(); GrResourceProvider* resourceProvider = context->priv().resourceProvider(); const GrSurfaceDesc desc = make_desc(kNone_GrSurfaceFlags); *backingSurface = resourceProvider->createTexture(desc, SkBudgeted::kNo, GrResourceProvider::Flags::kNoPendingIO); if (!(*backingSurface)) { return nullptr; } GrBackendTexture backendTex = (*backingSurface)->getBackendTexture(); backendTex.setPixelConfig(desc.fConfig); return proxyProvider->wrapBackendTexture(backendTex, kBottomLeft_GrSurfaceOrigin, kBorrow_GrWrapOwnership, GrWrapCacheable::kYes, kRead_GrIOType); }
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(ResourceAllocatorTest, reporter, ctxInfo) { GrProxyProvider* proxyProvider = ctxInfo.grContext()->contextPriv().proxyProvider(); GrResourceProvider* resourceProvider = ctxInfo.grContext()->contextPriv().resourceProvider(); bool orig = resourceProvider->testingOnly_setExplicitlyAllocateGPUResources(true); struct TestCase { ProxyParams fP1; ProxyParams fP2; bool fExpectation; }; constexpr bool kRT = true; constexpr bool kNotRT = false; constexpr bool kShare = true; constexpr bool kDontShare = false; // Non-RT GrSurfaces are never recycled on some platforms. bool kConditionallyShare = resourceProvider->caps()->reuseScratchTextures(); const GrPixelConfig kRGBA = kRGBA_8888_GrPixelConfig; const GrPixelConfig kBGRA = kBGRA_8888_GrPixelConfig; const SkBackingFit kE = SkBackingFit::kExact; const SkBackingFit kA = SkBackingFit::kApprox; const GrSurfaceOrigin kTL = kTopLeft_GrSurfaceOrigin; const GrSurfaceOrigin kBL = kBottomLeft_GrSurfaceOrigin; //-------------------------------------------------------------------------------------------- TestCase gOverlappingTests[] = { //---------------------------------------------------------------------------------------- // Two proxies with overlapping intervals and compatible descriptors should never share // RT version { { 64, kRT, kRGBA, kA, 0, kTL }, { 64, kRT, kRGBA, kA, 0, kTL }, kDontShare }, // non-RT version { { 64, kNotRT, kRGBA, kA, 0, kTL }, { 64, kNotRT, kRGBA, kA, 0, kTL }, kDontShare }, }; for (auto test : gOverlappingTests) { sk_sp<GrSurfaceProxy> p1 = make_deferred(proxyProvider, test.fP1); sk_sp<GrSurfaceProxy> p2 = make_deferred(proxyProvider, test.fP2); overlap_test(reporter, resourceProvider, std::move(p1), std::move(p2), test.fExpectation); } int k2 = ctxInfo.grContext()->caps()->getRenderTargetSampleCount(2, kRGBA); int k4 = ctxInfo.grContext()->caps()->getRenderTargetSampleCount(4, kRGBA); //-------------------------------------------------------------------------------------------- TestCase gNonOverlappingTests[] = { //---------------------------------------------------------------------------------------- // Two non-overlapping intervals w/ compatible proxies should share // both same size & approx { { 64, kRT, kRGBA, kA, 0, kTL }, { 64, kRT, kRGBA, kA, 0, kTL }, kShare }, { { 64, kNotRT, kRGBA, kA, 0, kTL }, { 64, kNotRT, kRGBA, kA, 0, kTL }, kConditionallyShare }, // diffs sizes but still approx { { 64, kRT, kRGBA, kA, 0, kTL }, { 50, kRT, kRGBA, kA, 0, kTL }, kShare }, { { 64, kNotRT, kRGBA, kA, 0, kTL }, { 50, kNotRT, kRGBA, kA, 0, kTL }, kConditionallyShare }, // sames sizes but exact { { 64, kRT, kRGBA, kE, 0, kTL }, { 64, kRT, kRGBA, kE, 0, kTL }, kShare }, { { 64, kNotRT, kRGBA, kE, 0, kTL }, { 64, kNotRT, kRGBA, kE, 0, kTL }, kConditionallyShare }, //---------------------------------------------------------------------------------------- // Two non-overlapping intervals w/ different exact sizes should not share { { 56, kRT, kRGBA, kE, 0, kTL }, { 54, kRT, kRGBA, kE, 0, kTL }, kDontShare }, // Two non-overlapping intervals w/ _very different_ approx sizes should not share { { 255, kRT, kRGBA, kA, 0, kTL }, { 127, kRT, kRGBA, kA, 0, kTL }, kDontShare }, // Two non-overlapping intervals w/ different MSAA sample counts should not share { { 64, kRT, kRGBA, kA, k2, kTL },{ 64, kRT, kRGBA, kA, k4, kTL}, k2 == k4 }, // Two non-overlapping intervals w/ different configs should not share { { 64, kRT, kRGBA, kA, 0, kTL }, { 64, kRT, kBGRA, kA, 0, kTL }, kDontShare }, // Two non-overlapping intervals w/ different RT classifications should never share { { 64, kRT, kRGBA, kA, 0, kTL }, { 64, kNotRT, kRGBA, kA, 0, kTL }, kDontShare }, { { 64, kNotRT, kRGBA, kA, 0, kTL }, { 64, kRT, kRGBA, kA, 0, kTL }, kDontShare }, // Two non-overlapping intervals w/ different origins should share { { 64, kRT, kRGBA, kA, 0, kTL }, { 64, kRT, kRGBA, kA, 0, kBL }, kShare }, }; for (auto test : gNonOverlappingTests) { sk_sp<GrSurfaceProxy> p1 = make_deferred(proxyProvider, test.fP1); sk_sp<GrSurfaceProxy> p2 = make_deferred(proxyProvider, test.fP2); if (!p1 || !p2) { continue; // creation can fail (i.e., for msaa4 on iOS) } non_overlap_test(reporter, resourceProvider, std::move(p1), std::move(p2), test.fExpectation); } { // Wrapped backend textures should never be reused TestCase t[1] = { { { 64, kNotRT, kRGBA, kE, 0, kTL }, { 64, kNotRT, kRGBA, kE, 0, kTL }, kDontShare } }; GrBackendTexture backEndTex; sk_sp<GrSurfaceProxy> p1 = make_backend(ctxInfo.grContext(), t[0].fP1, &backEndTex); sk_sp<GrSurfaceProxy> p2 = make_deferred(proxyProvider, t[0].fP2); non_overlap_test(reporter, resourceProvider, std::move(p1), std::move(p2), t[0].fExpectation); cleanup_backend(ctxInfo.grContext(), &backEndTex); } resourceProvider->testingOnly_setExplicitlyAllocateGPUResources(orig); }
bool GrDrawingManager::executeOpLists(int startIndex, int stopIndex, GrOpFlushState* flushState) { SkASSERT(startIndex <= stopIndex && stopIndex <= fOpLists.count()); GrResourceProvider* resourceProvider = fContext->contextPriv().resourceProvider(); bool anyOpListsExecuted = false; for (int i = startIndex; i < stopIndex; ++i) { if (!fOpLists[i]) { continue; } if (resourceProvider->explicitlyAllocateGPUResources()) { if (!fOpLists[i]->isInstantiated()) { // If the backing surface wasn't allocated drop the draw of the entire opList. fOpLists[i] = nullptr; continue; } } else { if (!fOpLists[i]->instantiate(resourceProvider)) { SkDebugf("OpList failed to instantiate.\n"); fOpLists[i] = nullptr; continue; } } // TODO: handle this instantiation via lazy surface proxies? // Instantiate all deferred proxies (being built on worker threads) so we can upload them fOpLists[i]->instantiateDeferredProxies(fContext->contextPriv().resourceProvider()); fOpLists[i]->prepare(flushState); } // Upload all data to the GPU flushState->preExecuteDraws(); // Execute the onFlush op lists first, if any. for (sk_sp<GrOpList>& onFlushOpList : fOnFlushCBOpLists) { if (!onFlushOpList->execute(flushState)) { SkDebugf("WARNING: onFlushOpList failed to execute.\n"); } SkASSERT(onFlushOpList->unique()); onFlushOpList = nullptr; } fOnFlushCBOpLists.reset(); // Execute the normal op lists. for (int i = startIndex; i < stopIndex; ++i) { if (!fOpLists[i]) { continue; } if (fOpLists[i]->execute(flushState)) { anyOpListsExecuted = true; } } SkASSERT(!flushState->commandBuffer()); SkASSERT(fTokenTracker.nextDrawToken() == fTokenTracker.nextTokenToFlush()); // We reset the flush state before the OpLists so that the last resources to be freed are those // that are written to in the OpLists. This helps to make sure the most recently used resources // are the last to be purged by the resource cache. flushState->reset(); for (int i = startIndex; i < stopIndex; ++i) { if (!fOpLists[i]) { continue; } if (!fOpLists[i]->unique()) { // TODO: Eventually this should be guaranteed unique. // https://bugs.chromium.org/p/skia/issues/detail?id=7111 fOpLists[i]->endFlush(); } fOpLists[i] = nullptr; } return anyOpListsExecuted; }
sk_sp<GrTexture> GrClipMaskManager::CreateAlphaClipMask(GrContext* context, int32_t elementsGenID, GrReducedClip::InitialState initialState, const GrReducedClip::ElementList& elements, const SkVector& clipToMaskOffset, const SkIRect& clipSpaceIBounds) { GrResourceProvider* resourceProvider = context->resourceProvider(); GrUniqueKey key; GetClipMaskKey(elementsGenID, clipSpaceIBounds, &key); if (GrTexture* texture = resourceProvider->findAndRefTextureByUniqueKey(key)) { return sk_sp<GrTexture>(texture); } // There's no texture in the cache. Let's try to allocate it then. GrPixelConfig config = kRGBA_8888_GrPixelConfig; if (context->caps()->isConfigRenderable(kAlpha_8_GrPixelConfig, false)) { config = kAlpha_8_GrPixelConfig; } sk_sp<GrDrawContext> dc(context->newDrawContext(SkBackingFit::kApprox, clipSpaceIBounds.width(), clipSpaceIBounds.height(), config)); if (!dc) { return nullptr; } // The texture may be larger than necessary, this rect represents the part of the texture // we populate with a rasterization of the clip. SkIRect maskSpaceIBounds = SkIRect::MakeWH(clipSpaceIBounds.width(), clipSpaceIBounds.height()); // The scratch texture that we are drawing into can be substantially larger than the mask. Only // clear the part that we care about. dc->clear(&maskSpaceIBounds, GrReducedClip::kAllIn_InitialState == initialState ? 0xffffffff : 0x00000000, true); // Set the matrix so that rendered clip elements are transformed to mask space from clip // space. const SkMatrix translate = SkMatrix::MakeTrans(clipToMaskOffset.fX, clipToMaskOffset.fY); // It is important that we use maskSpaceIBounds as the stencil rect in the below loop. // The second pass that zeros the stencil buffer renders the rect maskSpaceIBounds so the first // pass must not set values outside of this bounds or stencil values outside the rect won't be // cleared. // walk through each clip element and perform its set op for (GrReducedClip::ElementList::Iter iter = elements.headIter(); iter.get(); iter.next()) { const Element* element = iter.get(); SkRegion::Op op = element->getOp(); bool invert = element->isInverseFilled(); if (invert || SkRegion::kIntersect_Op == op || SkRegion::kReverseDifference_Op == op) { GrFixedClip clip(maskSpaceIBounds); // draw directly into the result with the stencil set to make the pixels affected // by the clip shape be non-zero. static constexpr GrUserStencilSettings kStencilInElement( GrUserStencilSettings::StaticInit< 0xffff, GrUserStencilTest::kAlways, 0xffff, GrUserStencilOp::kReplace, GrUserStencilOp::kReplace, 0xffff>() ); if (!stencil_element(dc.get(), clip, &kStencilInElement, translate, element)) { return nullptr; } // Draw to the exterior pixels (those with a zero stencil value). static constexpr GrUserStencilSettings kDrawOutsideElement( GrUserStencilSettings::StaticInit< 0x0000, GrUserStencilTest::kEqual, 0xffff, GrUserStencilOp::kZero, GrUserStencilOp::kZero, 0xffff>() ); if (!dc->drawContextPriv().drawAndStencilRect(clip, &kDrawOutsideElement, op, !invert, false, translate, SkRect::Make(clipSpaceIBounds))) { return nullptr; } } else { // all the remaining ops can just be directly draw into the accumulation buffer GrPaint paint; paint.setAntiAlias(element->isAA()); paint.setCoverageSetOpXPFactory(op, false); draw_element(dc.get(), GrNoClip(), paint, translate, element); } } sk_sp<GrTexture> texture(dc->asTexture()); SkASSERT(texture); texture->resourcePriv().setUniqueKey(key); return texture; }
void GrDrawVerticesOp::drawNonVolatile(Target* target) { static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain(); bool hasColorAttribute; bool hasLocalCoordsAttribute; bool hasBoneAttribute; sk_sp<GrGeometryProcessor> gp = this->makeGP(target->caps().shaderCaps(), &hasColorAttribute, &hasLocalCoordsAttribute, &hasBoneAttribute); SkASSERT(fMeshes.count() == 1); // Non-volatile meshes should never combine. // Get the resource provider. GrResourceProvider* rp = target->resourceProvider(); // Generate keys for the buffers. GrUniqueKey vertexKey, indexKey; GrUniqueKey::Builder vertexKeyBuilder(&vertexKey, kDomain, 2); GrUniqueKey::Builder indexKeyBuilder(&indexKey, kDomain, 2); vertexKeyBuilder[0] = indexKeyBuilder[0] = fMeshes[0].fVertices->uniqueID(); vertexKeyBuilder[1] = 0; indexKeyBuilder[1] = 1; vertexKeyBuilder.finish(); indexKeyBuilder.finish(); // Try to grab data from the cache. sk_sp<GrBuffer> vertexBuffer = rp->findByUniqueKey<GrBuffer>(vertexKey); sk_sp<GrBuffer> indexBuffer = this->isIndexed() ? rp->findByUniqueKey<GrBuffer>(indexKey) : nullptr; // Draw using the cached buffers if possible. if (vertexBuffer && (!this->isIndexed() || indexBuffer)) { this->drawVertices(target, std::move(gp), vertexBuffer.get(), 0, indexBuffer.get(), 0); return; } // Calculate the stride. size_t vertexStride = sizeof(SkPoint) + (hasColorAttribute ? sizeof(uint32_t) : 0) + (hasLocalCoordsAttribute ? sizeof(SkPoint) : 0) + (hasBoneAttribute ? 4 * (sizeof(int8_t) + sizeof(uint8_t)) : 0); SkASSERT(vertexStride == gp->debugOnly_vertexStride()); // Allocate vertex buffer. vertexBuffer.reset(rp->createBuffer(fVertexCount * vertexStride, kVertex_GrBufferType, kStatic_GrAccessPattern, GrResourceProvider::Flags::kNone)); void* verts = vertexBuffer ? vertexBuffer->map() : nullptr; if (!verts) { SkDebugf("Could not allocate vertices\n"); return; } // Allocate index buffer. uint16_t* indices = nullptr; if (this->isIndexed()) { indexBuffer.reset(rp->createBuffer(fIndexCount * sizeof(uint16_t), kIndex_GrBufferType, kStatic_GrAccessPattern, GrResourceProvider::Flags::kNone)); indices = indexBuffer ? static_cast<uint16_t*>(indexBuffer->map()) : nullptr; if (!indices) { SkDebugf("Could not allocate indices\n"); return; } } // Fill the buffers. this->fillBuffers(hasColorAttribute, hasLocalCoordsAttribute, hasBoneAttribute, vertexStride, verts, indices); // Unmap the buffers. vertexBuffer->unmap(); if (indexBuffer) { indexBuffer->unmap(); } // Cache the buffers. rp->assignUniqueKeyToResource(vertexKey, vertexBuffer.get()); rp->assignUniqueKeyToResource(indexKey, indexBuffer.get()); // Draw the vertices. this->drawVertices(target, std::move(gp), vertexBuffer.get(), 0, indexBuffer.get(), 0); }
GrTexture* GrClipMaskManager::createAlphaClipMask(int32_t elementsGenID, GrReducedClip::InitialState initialState, const GrReducedClip::ElementList& elements, const SkVector& clipToMaskOffset, const SkIRect& clipSpaceIBounds) { GrResourceProvider* resourceProvider = fDrawTarget->cmmAccess().resourceProvider(); GrUniqueKey key; GetClipMaskKey(elementsGenID, clipSpaceIBounds, &key); if (GrTexture* texture = resourceProvider->findAndRefTextureByUniqueKey(key)) { return texture; } SkAutoTUnref<GrTexture> texture(this->createCachedMask( clipSpaceIBounds.width(), clipSpaceIBounds.height(), key, true)); // There's no texture in the cache. Let's try to allocate it then. if (!texture) { return nullptr; } // Set the matrix so that rendered clip elements are transformed to mask space from clip // space. SkMatrix translate; translate.setTranslate(clipToMaskOffset); // The texture may be larger than necessary, this rect represents the part of the texture // we populate with a rasterization of the clip. SkIRect maskSpaceIBounds = SkIRect::MakeWH(clipSpaceIBounds.width(), clipSpaceIBounds.height()); // The scratch texture that we are drawing into can be substantially larger than the mask. Only // clear the part that we care about. fDrawTarget->clear(&maskSpaceIBounds, GrReducedClip::kAllIn_InitialState == initialState ? 0xffffffff : 0x00000000, true, texture->asRenderTarget()); // When we use the stencil in the below loop it is important to have this clip installed. // The second pass that zeros the stencil buffer renders the rect maskSpaceIBounds so the first // pass must not set values outside of this bounds or stencil values outside the rect won't be // cleared. GrClip clip(maskSpaceIBounds); SkAutoTUnref<GrTexture> temp; // walk through each clip element and perform its set op for (GrReducedClip::ElementList::Iter iter = elements.headIter(); iter.get(); iter.next()) { const Element* element = iter.get(); SkRegion::Op op = element->getOp(); bool invert = element->isInverseFilled(); if (invert || SkRegion::kIntersect_Op == op || SkRegion::kReverseDifference_Op == op) { GrPipelineBuilder pipelineBuilder; pipelineBuilder.setClip(clip); GrPathRenderer* pr = nullptr; bool useTemp = !this->canStencilAndDrawElement(&pipelineBuilder, texture, &pr, element); GrTexture* dst; // This is the bounds of the clip element in the space of the alpha-mask. The temporary // mask buffer can be substantially larger than the actually clip stack element. We // touch the minimum number of pixels necessary and use decal mode to combine it with // the accumulator. SkIRect maskSpaceElementIBounds; if (useTemp) { if (invert) { maskSpaceElementIBounds = maskSpaceIBounds; } else { SkRect elementBounds = element->getBounds(); elementBounds.offset(clipToMaskOffset); elementBounds.roundOut(&maskSpaceElementIBounds); } if (!temp) { temp.reset(this->createTempMask(maskSpaceIBounds.fRight, maskSpaceIBounds.fBottom)); if (!temp) { texture->resourcePriv().removeUniqueKey(); return nullptr; } } dst = temp; // clear the temp target and set blend to replace fDrawTarget->clear(&maskSpaceElementIBounds, invert ? 0xffffffff : 0x00000000, true, dst->asRenderTarget()); set_coverage_drawing_xpf(SkRegion::kReplace_Op, invert, &pipelineBuilder); } else { // draw directly into the result with the stencil set to make the pixels affected // by the clip shape be non-zero. dst = texture; GR_STATIC_CONST_SAME_STENCIL(kStencilInElement, kReplace_StencilOp, kReplace_StencilOp, kAlways_StencilFunc, 0xffff, 0xffff, 0xffff); pipelineBuilder.setStencil(kStencilInElement); set_coverage_drawing_xpf(op, invert, &pipelineBuilder); } if (!this->drawElement(&pipelineBuilder, translate, dst, element, pr)) { texture->resourcePriv().removeUniqueKey(); return nullptr; } if (useTemp) { GrPipelineBuilder backgroundPipelineBuilder; backgroundPipelineBuilder.setRenderTarget(texture->asRenderTarget()); // Now draw into the accumulator using the real operation and the temp buffer as a // texture this->mergeMask(&backgroundPipelineBuilder, texture, temp, op, maskSpaceIBounds, maskSpaceElementIBounds); } else { GrPipelineBuilder backgroundPipelineBuilder; backgroundPipelineBuilder.setRenderTarget(texture->asRenderTarget()); set_coverage_drawing_xpf(op, !invert, &backgroundPipelineBuilder); // Draw to the exterior pixels (those with a zero stencil value). GR_STATIC_CONST_SAME_STENCIL(kDrawOutsideElement, kZero_StencilOp, kZero_StencilOp, kEqual_StencilFunc, 0xffff, 0x0000, 0xffff); backgroundPipelineBuilder.setStencil(kDrawOutsideElement); // The color passed in here does not matter since the coverageSetOpXP won't read it. fDrawTarget->drawNonAARect(backgroundPipelineBuilder, GrColor_WHITE, translate, clipSpaceIBounds); } } else { GrPipelineBuilder pipelineBuilder; // all the remaining ops can just be directly draw into the accumulation buffer set_coverage_drawing_xpf(op, false, &pipelineBuilder); // The color passed in here does not matter since the coverageSetOpXP won't read it. this->drawElement(&pipelineBuilder, translate, texture, element); } } return texture.detach(); }
GrTexture* GrClipMaskManager::createSoftwareClipMask(int32_t elementsGenID, GrReducedClip::InitialState initialState, const GrReducedClip::ElementList& elements, const SkVector& clipToMaskOffset, const SkIRect& clipSpaceIBounds) { GrUniqueKey key; GetClipMaskKey(elementsGenID, clipSpaceIBounds, &key); GrResourceProvider* resourceProvider = fDrawTarget->cmmAccess().resourceProvider(); if (GrTexture* texture = resourceProvider->findAndRefTextureByUniqueKey(key)) { return texture; } // The mask texture may be larger than necessary. We round out the clip space bounds and pin // the top left corner of the resulting rect to the top left of the texture. SkIRect maskSpaceIBounds = SkIRect::MakeWH(clipSpaceIBounds.width(), clipSpaceIBounds.height()); GrSWMaskHelper helper(this->getContext()); // Set the matrix so that rendered clip elements are transformed to mask space from clip // space. SkMatrix translate; translate.setTranslate(clipToMaskOffset); helper.init(maskSpaceIBounds, &translate, false); helper.clear(GrReducedClip::kAllIn_InitialState == initialState ? 0xFF : 0x00); SkStrokeRec stroke(SkStrokeRec::kFill_InitStyle); for (GrReducedClip::ElementList::Iter iter(elements.headIter()) ; iter.get(); iter.next()) { const Element* element = iter.get(); SkRegion::Op op = element->getOp(); if (SkRegion::kIntersect_Op == op || SkRegion::kReverseDifference_Op == op) { // Intersect and reverse difference require modifying pixels outside of the geometry // that is being "drawn". In both cases we erase all the pixels outside of the geometry // but leave the pixels inside the geometry alone. For reverse difference we invert all // the pixels before clearing the ones outside the geometry. if (SkRegion::kReverseDifference_Op == op) { SkRect temp = SkRect::Make(clipSpaceIBounds); // invert the entire scene helper.draw(temp, SkRegion::kXOR_Op, false, 0xFF); } SkPath clipPath; element->asPath(&clipPath); clipPath.toggleInverseFillType(); helper.draw(clipPath, stroke, SkRegion::kReplace_Op, element->isAA(), 0x00); continue; } // The other ops (union, xor, diff) only affect pixels inside // the geometry so they can just be drawn normally if (Element::kRect_Type == element->getType()) { helper.draw(element->getRect(), op, element->isAA(), 0xFF); } else { SkPath path; element->asPath(&path); helper.draw(path, stroke, op, element->isAA(), 0xFF); } } // Allocate clip mask texture GrTexture* result = this->createCachedMask(clipSpaceIBounds.width(), clipSpaceIBounds.height(), key, false); if (nullptr == result) { return nullptr; } helper.toTexture(result); return result; }
void draw(Target* target, const GrGeometryProcessor* gp) const { GrResourceProvider* rp = target->resourceProvider(); SkScalar screenSpaceTol = GrPathUtils::kDefaultTolerance; SkScalar tol = GrPathUtils::scaleToleranceToSrc(screenSpaceTol, fViewMatrix, fPath.getBounds()); SkScalar styleScale = SK_Scalar1; if (fStyle.applies()) { styleScale = GrStyle::MatrixToScaleFactor(fViewMatrix); } // construct a cache key from the path's genID and the view matrix static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain(); GrUniqueKey key; int clipBoundsCnt = fPath.isInverseFillType() ? sizeof(fClipBounds) / sizeof(uint32_t) : 0; int styleDataCnt = GrStyle::KeySize(fStyle, GrStyle::Apply::kPathEffectAndStrokeRec); if (styleDataCnt >= 0) { GrUniqueKey::Builder builder(&key, kDomain, 2 + clipBoundsCnt + styleDataCnt); builder[0] = fPath.getGenerationID(); builder[1] = fPath.getFillType(); // For inverse fills, the tessellation is dependent on clip bounds. if (fPath.isInverseFillType()) { memcpy(&builder[2], &fClipBounds, sizeof(fClipBounds)); } if (styleDataCnt) { GrStyle::WriteKey(&builder[2 + clipBoundsCnt], fStyle, GrStyle::Apply::kPathEffectAndStrokeRec, styleScale); } builder.finish(); SkAutoTUnref<GrBuffer> cachedVertexBuffer(rp->findAndRefTByUniqueKey<GrBuffer>(key)); int actualCount; if (cache_match(cachedVertexBuffer.get(), tol, &actualCount)) { this->drawVertices(target, gp, cachedVertexBuffer.get(), 0, actualCount); return; } } SkPath path; if (fStyle.applies()) { SkStrokeRec::InitStyle fill; SkAssertResult(fStyle.applyToPath(&path, &fill, fPath, styleScale)); SkASSERT(SkStrokeRec::kFill_InitStyle == fill); } else { path = fPath; } bool isLinear; bool canMapVB = GrCaps::kNone_MapFlags != target->caps().mapBufferFlags(); StaticVertexAllocator allocator(rp, canMapVB); int count = GrTessellator::PathToTriangles(path, tol, fClipBounds, &allocator, &isLinear); if (count == 0) { return; } this->drawVertices(target, gp, allocator.vertexBuffer(), 0, count); if (!fPath.isVolatile() && styleDataCnt >= 0) { TessInfo info; info.fTolerance = isLinear ? 0 : tol; info.fCount = count; SkAutoTUnref<SkData> data(SkData::NewWithCopy(&info, sizeof(info))); key.setCustomData(data.get()); rp->assignUniqueKeyToResource(key, allocator.vertexBuffer()); SkPathPriv::AddGenIDChangeListener(fPath, new PathInvalidator(key)); } }
GrTexture* GrClipMaskManager::CreateAlphaClipMask(GrContext* context, int32_t elementsGenID, GrReducedClip::InitialState initialState, const GrReducedClip::ElementList& elements, const SkVector& clipToMaskOffset, const SkIRect& clipSpaceIBounds) { GrResourceProvider* resourceProvider = context->resourceProvider(); GrUniqueKey key; GetClipMaskKey(elementsGenID, clipSpaceIBounds, &key); if (GrTexture* texture = resourceProvider->findAndRefTextureByUniqueKey(key)) { return texture; } // There's no texture in the cache. Let's try to allocate it then. GrSurfaceDesc desc; desc.fWidth = clipSpaceIBounds.width(); desc.fHeight = clipSpaceIBounds.height(); desc.fFlags = kRenderTarget_GrSurfaceFlag; if (context->caps()->isConfigRenderable(kAlpha_8_GrPixelConfig, false)) { desc.fConfig = kAlpha_8_GrPixelConfig; } else { desc.fConfig = kRGBA_8888_GrPixelConfig; } SkAutoTUnref<GrTexture> texture(resourceProvider->createApproxTexture(desc, 0)); if (!texture) { return nullptr; } texture->resourcePriv().setUniqueKey(key); SkAutoTUnref<GrDrawContext> dc(context->drawContext(texture->asRenderTarget())); if (!dc) { return nullptr; } // The texture may be larger than necessary, this rect represents the part of the texture // we populate with a rasterization of the clip. SkIRect maskSpaceIBounds = SkIRect::MakeWH(clipSpaceIBounds.width(), clipSpaceIBounds.height()); // The scratch texture that we are drawing into can be substantially larger than the mask. Only // clear the part that we care about. dc->clear(&maskSpaceIBounds, GrReducedClip::kAllIn_InitialState == initialState ? 0xffffffff : 0x00000000, true); // Set the matrix so that rendered clip elements are transformed to mask space from clip // space. const SkMatrix translate = SkMatrix::MakeTrans(clipToMaskOffset.fX, clipToMaskOffset.fY); // It is important that we use maskSpaceIBounds as the stencil rect in the below loop. // The second pass that zeros the stencil buffer renders the rect maskSpaceIBounds so the first // pass must not set values outside of this bounds or stencil values outside the rect won't be // cleared. // walk through each clip element and perform its set op for (GrReducedClip::ElementList::Iter iter = elements.headIter(); iter.get(); iter.next()) { const Element* element = iter.get(); SkRegion::Op op = element->getOp(); bool invert = element->isInverseFilled(); if (invert || SkRegion::kIntersect_Op == op || SkRegion::kReverseDifference_Op == op) { #ifdef SK_DEBUG GrPathRenderer* pr = GetPathRenderer(context, texture, translate, element); if (Element::kRect_Type != element->getType() && !pr) { // UseSWOnlyPath should now filter out all cases where gpu-side mask merging would // be performed (i.e., pr would be NULL for a non-rect path). // See https://bug.skia.org/4519 for rationale and details. SkASSERT(0); } #endif // draw directly into the result with the stencil set to make the pixels affected // by the clip shape be non-zero. GR_STATIC_CONST_SAME_STENCIL(kStencilInElement, kReplace_StencilOp, kReplace_StencilOp, kAlways_StencilFunc, 0xffff, 0xffff, 0xffff) if (!stencil_element(dc, &maskSpaceIBounds, kStencilInElement, translate, element)) { texture->resourcePriv().removeUniqueKey(); return nullptr; } // Draw to the exterior pixels (those with a zero stencil value). GR_STATIC_CONST_SAME_STENCIL(kDrawOutsideElement, kZero_StencilOp, kZero_StencilOp, kEqual_StencilFunc, 0xffff, 0x0000, 0xffff); if (!dc->drawContextPriv().drawAndStencilRect(&maskSpaceIBounds, kDrawOutsideElement, op, !invert, false, translate, SkRect::Make(clipSpaceIBounds))) { texture->resourcePriv().removeUniqueKey(); return nullptr; } } else {
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(DeferredProxyTest, reporter, ctxInfo) { GrProxyProvider* proxyProvider = ctxInfo.grContext()->priv().proxyProvider(); GrResourceProvider* resourceProvider = ctxInfo.grContext()->priv().resourceProvider(); const GrCaps& caps = *ctxInfo.grContext()->priv().caps(); int attempt = 0; // useful for debugging for (auto origin : { kBottomLeft_GrSurfaceOrigin, kTopLeft_GrSurfaceOrigin }) { for (auto widthHeight : { 100, 128, 1048576 }) { for (auto config : { kAlpha_8_GrPixelConfig, kRGB_565_GrPixelConfig, kRGBA_8888_GrPixelConfig, kRGBA_1010102_GrPixelConfig, kRGB_ETC1_GrPixelConfig }) { for (auto fit : { SkBackingFit::kExact, SkBackingFit::kApprox }) { for (auto budgeted : { SkBudgeted::kYes, SkBudgeted::kNo }) { for (auto numSamples : {1, 4, 16, 128}) { // We don't have recycling support for compressed textures if (GrPixelConfigIsCompressed(config) && SkBackingFit::kApprox == fit) { continue; } GrSurfaceDesc desc; desc.fFlags = kRenderTarget_GrSurfaceFlag; desc.fWidth = widthHeight; desc.fHeight = widthHeight; desc.fConfig = config; desc.fSampleCnt = numSamples; GrSRGBEncoded srgbEncoded; GrColorType colorType = GrPixelConfigToColorTypeAndEncoding(config, &srgbEncoded); const GrBackendFormat format = caps.getBackendFormatFromGrColorType(colorType, srgbEncoded); { sk_sp<GrTexture> tex; if (SkBackingFit::kApprox == fit) { tex = resourceProvider->createApproxTexture( desc, GrResourceProvider::Flags::kNoPendingIO); } else { tex = resourceProvider->createTexture( desc, budgeted, GrResourceProvider::Flags::kNoPendingIO); } sk_sp<GrTextureProxy> proxy = proxyProvider->createProxy(format, desc, origin, fit, budgeted); REPORTER_ASSERT(reporter, SkToBool(tex) == SkToBool(proxy)); if (proxy) { REPORTER_ASSERT(reporter, proxy->asRenderTargetProxy()); // This forces the proxy to compute and cache its // pre-instantiation size guess. Later, when it is actually // instantiated, it checks that the instantiated size is <= to // the pre-computation. If the proxy never computed its // pre-instantiation size then the check is skipped. proxy->gpuMemorySize(); check_surface(reporter, proxy.get(), origin, widthHeight, widthHeight, config, budgeted); int supportedSamples = caps.getRenderTargetSampleCount(numSamples, config); check_rendertarget(reporter, caps, resourceProvider, proxy->asRenderTargetProxy(), supportedSamples, fit, caps.maxWindowRectangles()); } } desc.fFlags = kNone_GrSurfaceFlags; { sk_sp<GrTexture> tex; if (SkBackingFit::kApprox == fit) { tex = resourceProvider->createApproxTexture( desc, GrResourceProvider::Flags::kNoPendingIO); } else { tex = resourceProvider->createTexture( desc, budgeted, GrResourceProvider::Flags::kNoPendingIO); } sk_sp<GrTextureProxy> proxy( proxyProvider->createProxy(format, desc, origin, fit, budgeted)); REPORTER_ASSERT(reporter, SkToBool(tex) == SkToBool(proxy)); if (proxy) { // This forces the proxy to compute and cache its // pre-instantiation size guess. Later, when it is actually // instantiated, it checks that the instantiated size is <= to // the pre-computation. If the proxy never computed its // pre-instantiation size then the check is skipped. proxy->gpuMemorySize(); check_surface(reporter, proxy.get(), origin, widthHeight, widthHeight, config, budgeted); check_texture(reporter, resourceProvider, proxy->asTextureProxy(), fit); } } attempt++; } } } } } } }
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrPipelineDynamicStateTest, reporter, ctxInfo) { GrContext* const context = ctxInfo.grContext(); GrResourceProvider* rp = context->resourceProvider(); sk_sp<GrRenderTargetContext> rtc( context->makeDeferredRenderTargetContext(SkBackingFit::kExact, kScreenSize, kScreenSize, kRGBA_8888_GrPixelConfig, nullptr)); if (!rtc) { ERRORF(reporter, "could not create render target context."); return; } constexpr float d = (float) kScreenSize; Vertex vdata[kNumMeshes * 4] = { {0, 0, kMeshColors[0]}, {0, d, kMeshColors[0]}, {d, 0, kMeshColors[0]}, {d, d, kMeshColors[0]}, {0, 0, kMeshColors[1]}, {0, d, kMeshColors[1]}, {d, 0, kMeshColors[1]}, {d, d, kMeshColors[1]}, {0, 0, kMeshColors[2]}, {0, d, kMeshColors[2]}, {d, 0, kMeshColors[2]}, {d, d, kMeshColors[2]}, {0, 0, kMeshColors[3]}, {0, d, kMeshColors[3]}, {d, 0, kMeshColors[3]}, {d, d, kMeshColors[3]} }; sk_sp<const GrBuffer> vbuff(rp->createBuffer(sizeof(vdata), kVertex_GrBufferType, kDynamic_GrAccessPattern, GrResourceProvider::kNoPendingIO_Flag | GrResourceProvider::kRequireGpuMemory_Flag, vdata)); if (!vbuff) { ERRORF(reporter, "vbuff is null."); return; } uint32_t resultPx[kScreenSize * kScreenSize]; for (ScissorState scissorState : {ScissorState::kEnabled, ScissorState::kDisabled}) { rtc->clear(nullptr, 0xbaaaaaad, true); rtc->priv().testingOnly_addDrawOp( skstd::make_unique<GrPipelineDynamicStateTestOp>(scissorState, vbuff)); rtc->readPixels(SkImageInfo::Make(kScreenSize, kScreenSize, kRGBA_8888_SkColorType, kPremul_SkAlphaType), resultPx, 4 * kScreenSize, 0, 0, 0); for (int y = 0; y < kScreenSize; ++y) { for (int x = 0; x < kScreenSize; ++x) { int expectedColorIdx; if (ScissorState::kEnabled == scissorState) { expectedColorIdx = (x < kScreenSplitX ? 0 : 2) + (y < kScreenSplitY ? 0 : 1); } else { expectedColorIdx = kNumMeshes - 1; } uint32_t expected = kMeshColors[expectedColorIdx]; uint32_t actual = resultPx[y * kScreenSize + x]; if (expected != actual) { ERRORF(reporter, "[scissor=%s] pixel (%i,%i): got 0x%x expected 0x%x", ScissorState::kEnabled == scissorState ? "enabled" : "disabled", x, y, actual, expected); return; } } } } }