DEF_GPUTEST(TessellatingPathRendererTests, reporter, factory) { GrContext* context = factory->get(static_cast<GrContextFactory::GLContextType>(0)); if (nullptr == context) { return; } GrSurfaceDesc desc; desc.fFlags = kRenderTarget_GrSurfaceFlag; desc.fWidth = 800; desc.fHeight = 800; desc.fConfig = kSkia8888_GrPixelConfig; desc.fOrigin = kTopLeft_GrSurfaceOrigin; SkAutoTUnref<GrTexture> texture(context->textureProvider()->createApproxTexture(desc)); GrTestTarget tt; GrRenderTarget* rt = texture->asRenderTarget(); context->getTestTarget(&tt, rt); GrDrawTarget* dt = tt.target(); GrResourceProvider* rp = tt.resourceProvider(); test_path(dt, rt, rp, create_path_0()); test_path(dt, rt, rp, create_path_1()); test_path(dt, rt, rp, create_path_2()); test_path(dt, rt, rp, create_path_3()); test_path(dt, rt, rp, create_path_4()); test_path(dt, rt, rp, create_path_5()); test_path(dt, rt, rp, create_path_6()); test_path(dt, rt, rp, create_path_7()); test_path(dt, rt, rp, create_path_8()); test_path(dt, rt, rp, create_path_9()); test_path(dt, rt, rp, create_path_10()); test_path(dt, rt, rp, create_path_11()); test_path(dt, rt, rp, create_path_12()); test_path(dt, rt, rp, create_path_13()); test_path(dt, rt, rp, create_path_14()); test_path(dt, rt, rp, create_path_15()); }
static SkCanvas* createAcceleratedCanvas(const IntSize& size, ImageBufferData* data, DeferralMode deferralMode) { RefPtr<GraphicsContext3D> context3D = SharedGraphicsContext3D::get(); if (!context3D) return 0; GrContext* gr = context3D->grContext(); if (!gr) return 0; gr->resetContext(); GrTextureDesc desc; desc.fFlags = kRenderTarget_GrTextureFlagBit; desc.fSampleCnt = 0; desc.fWidth = size.width(); desc.fHeight = size.height(); desc.fConfig = kSkia8888_PM_GrPixelConfig; SkAutoTUnref<GrTexture> texture(gr->createUncachedTexture(desc, 0, 0)); if (!texture.get()) return 0; SkCanvas* canvas; SkAutoTUnref<SkDevice> device(new SkGpuDevice(gr, texture.get())); if (deferralMode == Deferred) { SkAutoTUnref<AcceleratedDeviceContext> deviceContext(new AcceleratedDeviceContext(context3D.get())); canvas = new SkDeferredCanvas(device.get(), deviceContext.get()); } else canvas = new SkCanvas(device.get()); data->m_platformContext.setAccelerated(true); #if USE(ACCELERATED_COMPOSITING) data->m_platformLayer = Canvas2DLayerChromium::create(context3D.release(), size); data->m_platformLayer->setTextureId(texture.get()->getTextureHandle()); data->m_platformLayer->setCanvas(canvas); #endif return canvas; }
bool SkBicubicImageFilter::filterImageGPU(Proxy* proxy, const SkBitmap& src, const SkMatrix& ctm, SkBitmap* result, SkIPoint* offset) { SkBitmap srcBM; if (!SkImageFilterUtils::GetInputResultGPU(getInput(0), proxy, src, ctm, &srcBM, offset)) { return false; } GrTexture* srcTexture = srcBM.getTexture(); GrContext* context = srcTexture->getContext(); SkRect dstRect = SkRect::MakeWH(srcBM.width() * fScale.fWidth, srcBM.height() * fScale.fHeight); GrTextureDesc desc; desc.fFlags = kRenderTarget_GrTextureFlagBit | kNoStencil_GrTextureFlagBit; desc.fWidth = SkScalarCeilToInt(dstRect.width()); desc.fHeight = SkScalarCeilToInt(dstRect.height()); desc.fConfig = kSkia8888_GrPixelConfig; GrAutoScratchTexture ast(context, desc); SkAutoTUnref<GrTexture> dst(ast.detach()); if (!dst) { return false; } GrContext::AutoRenderTarget art(context, dst->asRenderTarget()); GrPaint paint; paint.addColorEffect(GrBicubicEffect::Create(srcTexture, fCoefficients))->unref(); SkRect srcRect; srcBM.getBounds(&srcRect); context->drawRectToRect(paint, dstRect, srcRect); return SkImageFilterUtils::WrapTexture(dst, desc.fWidth, desc.fHeight, result); }
MailboxTextureHolder::MailboxTextureHolder( std::unique_ptr<TextureHolder> textureHolder) { DCHECK(textureHolder->isSkiaTextureHolder()); sk_sp<SkImage> image = textureHolder->skImage(); DCHECK(image); gpu::gles2::GLES2Interface* sharedGL = SharedGpuContext::gl(); GrContext* sharedGrContext = SharedGpuContext::gr(); if (!sharedGrContext) { // Can happen if the context is lost. The SkImage won't be any good now // anyway. return; } GLuint imageTextureId = skia::GrBackendObjectToGrGLTextureInfo(image->getTextureHandle(true)) ->fID; sharedGL->BindTexture(GL_TEXTURE_2D, imageTextureId); sharedGL->GenMailboxCHROMIUM(m_mailbox.name); sharedGL->ProduceTextureCHROMIUM(GL_TEXTURE_2D, m_mailbox.name); const GLuint64 fenceSync = sharedGL->InsertFenceSyncCHROMIUM(); sharedGL->Flush(); sharedGL->GenSyncTokenCHROMIUM(fenceSync, m_syncToken.GetData()); sharedGL->BindTexture(GL_TEXTURE_2D, 0); // We changed bound textures in this function, so reset the GrContext. sharedGrContext->resetContext(kTextureBinding_GrGLBackendState); m_size = IntSize(image->width(), image->height()); m_textureId = imageTextureId; m_isConvertedFromSkiaTexture = true; }
static SkCanvas* createAcceleratedCanvas(const IntSize& size, ImageBufferData* data) { RefPtr<GraphicsContext3D> context3D = SharedGraphicsContext3D::get(); if (!context3D) return 0; GrContext* gr = context3D->grContext(); if (!gr) return 0; gr->resetContext(); GrTextureDesc desc; desc.fFlags = kRenderTarget_GrTextureFlagBit; desc.fSampleCnt = 0; desc.fWidth = size.width(); desc.fHeight = size.height(); desc.fConfig = kSkia8888_GrPixelConfig; SkAutoTUnref<GrTexture> texture(gr->createUncachedTexture(desc, 0, 0)); if (!texture.get()) return 0; SkCanvas* canvas; SkAutoTUnref<SkDevice> device(new SkGpuDevice(gr, texture.get())); #if USE(ACCELERATED_COMPOSITING) Canvas2DLayerBridge::ThreadMode threadMode = WebKit::Platform::current()->isThreadedCompositingEnabled() ? Canvas2DLayerBridge::Threaded : Canvas2DLayerBridge::SingleThread; data->m_layerBridge = Canvas2DLayerBridge::create(context3D.release(), size, threadMode, texture.get()->getTextureHandle()); canvas = data->m_layerBridge->skCanvas(device.get()); #else canvas = new SkCanvas(device.get()); #endif data->m_platformContext.setAccelerated(true); return canvas; }
void onDrawContent(SkCanvas* canvas) override { SkPaint paint; SkSafeUnref(paint.setTypeface(SkTypeface::CreateFromFile("/skimages/samplefont.ttf"))); paint.setAntiAlias(true); paint.setFilterQuality(kMedium_SkFilterQuality); SkString outString("fps: "); fTimer.end(); // TODO: generalize this timing code in utils fTimes[fCurrentTime] = (float)(fTimer.fWall); fCurrentTime = (fCurrentTime + 1) & 0x1f; float meanTime = 0.0f; for (int i = 0; i < 32; ++i) { meanTime += fTimes[i]; } meanTime /= 32.f; SkScalar fps = 1000.f / meanTime; outString.appendScalar(fps); outString.append(" ms: "); outString.appendScalar(meanTime); SkString modeString("Text scale: "); modeString.appendU32(fSizeScale); modeString.append("x"); fTimer.start(); canvas->save(); #if SK_SUPPORT_GPU SkBaseDevice* device = canvas->getDevice_just_for_deprecated_compatibility_testing(); GrContext* grContext = canvas->getGrContext(); if (grContext) { GrTexture* tex = grContext->getFontAtlasTexture(GrMaskFormat::kA8_GrMaskFormat); reinterpret_cast<SkGpuDevice*>(device)->drawTexture(tex, SkRect::MakeXYWH(512, 10, 512, 512), paint); } #endif canvas->translate(180, 180); canvas->rotate(fRotation); canvas->scale(fScale, fScale); canvas->translate(-180, -180); const char* text = "Hamburgefons"; size_t length = strlen(text); SkScalar y = SkIntToScalar(0); for (int i = 12; i <= 26; i++) { paint.setTextSize(SkIntToScalar(i*fSizeScale)); y += paint.getFontSpacing(); DrawTheText(canvas, text, length, SkIntToScalar(110), y, paint); } canvas->restore(); paint.setTextSize(16); // canvas->drawText(outString.c_str(), outString.size(), 512.f, 540.f, paint); canvas->drawText(modeString.c_str(), modeString.size(), 768.f, 540.f, paint); }
// The gradient shader will use the texture strip atlas if it has too many colors. Make sure // abandoning the context works. DEF_GPUTEST_FOR_RENDERING_CONTEXTS(TextureStripAtlasManagerGradientTest, reporter, ctxInfo) { GrContext* context = ctxInfo.grContext(); static const SkColor gColors[] = { SK_ColorRED, SK_ColorGREEN, SK_ColorBLUE, SK_ColorCYAN, SK_ColorMAGENTA, SK_ColorYELLOW, SK_ColorBLACK }; static const SkScalar gPos[] = { 0, 0.17f, 0.32f, 0.49f, 0.66f, 0.83f, 1.0f }; SkPaint p; p.setShader(SkGradientShader::MakeTwoPointConical(SkPoint::Make(0, 0), 1.0f, SkPoint::Make(10.0f, 20.0f), 2.0f, gColors, gPos, 7, SkTileMode::kClamp)); SkImageInfo info = SkImageInfo::MakeN32Premul(128, 128); auto surface(SkSurface::MakeRenderTarget(context, SkBudgeted::kNo, info)); SkCanvas* canvas = surface->getCanvas(); SkRect r = SkRect::MakeXYWH(10, 10, 100, 100); canvas->drawRect(r, p); context->abandonContext(); }
SkData* Request::getJsonBatchList(int n) { SkCanvas* canvas = this->getCanvas(); SkASSERT(fGPUEnabled); // TODO if this is inefficient we could add a method to GrAuditTrail which takes // a Json::Value and is only compiled in this file Json::Value parsedFromString; #if SK_SUPPORT_GPU GrRenderTarget* rt = canvas->internal_private_accessTopLayerRenderTarget(); SkASSERT(rt); GrContext* ctx = rt->getContext(); SkASSERT(ctx); GrAuditTrail* at = ctx->getAuditTrail(); GrAuditTrail::AutoManageBatchList enable(at); fDebugCanvas->drawTo(canvas, n); Json::Reader reader; SkDEBUGCODE(bool parsingSuccessful = )reader.parse(at->toJson(true).c_str(), parsedFromString); SkASSERT(parsingSuccessful); #endif SkDynamicMemoryWStream stream; stream.writeText(Json::FastWriter().write(parsedFromString).c_str()); return stream.copyToData(); }
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(SpecialImage_Gpu, reporter, ctxInfo) { GrContext* context = ctxInfo.grContext(); SkBitmap bm = create_bm(); const GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(bm.info(), *context->caps()); sk_sp<GrTextureProxy> proxy(GrSurfaceProxy::MakeDeferred(context->resourceProvider(), desc, SkBudgeted::kNo, bm.getPixels(), bm.rowBytes())); if (!proxy) { return; } sk_sp<SkSpecialImage> fullSImg(SkSpecialImage::MakeDeferredFromGpu( context, SkIRect::MakeWH(kFullSize, kFullSize), kNeedNewImageUniqueID_SpecialImage, proxy, nullptr)); const SkIRect& subset = SkIRect::MakeXYWH(kPad, kPad, kSmallerSize, kSmallerSize); { sk_sp<SkSpecialImage> subSImg1(SkSpecialImage::MakeDeferredFromGpu( context, subset, kNeedNewImageUniqueID_SpecialImage, std::move(proxy), nullptr)); test_image(subSImg1, reporter, context, true, kPad, kFullSize); } { sk_sp<SkSpecialImage> subSImg2(fullSImg->makeSubset(subset)); test_image(subSImg2, reporter, context, true, kPad, kFullSize); } }
static const GrGLContext* get_gl_context(SkCanvas* canvas) { // This bench exclusively tests GL calls directly if (NULL == canvas->getGrContext()) { return NULL; } GrContext* context = canvas->getGrContext(); GrTestTarget tt; context->getTestTarget(&tt); if (!tt.target()) { SkDebugf("Couldn't get Gr test target."); return NULL; } const GrGLContext* ctx = tt.glContext(); if (!ctx) { SkDebugf("Couldn't get an interface\n"); return NULL; } // We only care about gpus with drawArraysInstanced support if (!ctx->interface()->fFunctions.fDrawArraysInstanced) { return NULL; } return ctx; }
static int get_glprograms_max_stages(const sk_gpu_test::ContextInfo& ctxInfo) { GrContext* context = ctxInfo.grContext(); GrGLGpu* gpu = static_cast<GrGLGpu*>(context->contextPriv().getGpu()); int maxStages = 6; if (kGLES_GrGLStandard == gpu->glStandard()) { // We've had issues with driver crashes and HW limits being exceeded with many effects on // Android devices. We have passes on ARM devices with the default number of stages. // TODO When we run ES 3.00 GLSL in more places, test again #ifdef SK_BUILD_FOR_ANDROID if (kARM_GrGLVendor != gpu->ctxInfo().vendor()) { maxStages = 1; } #endif // On iOS we can exceed the maximum number of varyings. http://skbug.com/6627. #ifdef SK_BUILD_FOR_IOS maxStages = 3; #endif } if (ctxInfo.type() == sk_gpu_test::GrContextFactory::kANGLE_D3D9_ES2_ContextType || ctxInfo.type() == sk_gpu_test::GrContextFactory::kANGLE_D3D11_ES2_ContextType) { // On Angle D3D we will hit a limit of out variables if we use too many stages. maxStages = 3; } return maxStages; }
GrTexture* GrTextureAdjuster::refTextureSafeForParams(const GrTextureParams& params, SkIPoint* outOffset) { GrTexture* texture = this->originalTexture(); GrContext* context = texture->getContext(); CopyParams copyParams; const SkIRect* contentArea = this->contentAreaOrNull(); if (contentArea && GrTextureParams::kMipMap_FilterMode == params.filterMode()) { // If we generate a MIP chain for texture it will read pixel values from outside the content // area. copyParams.fWidth = contentArea->width(); copyParams.fHeight = contentArea->height(); copyParams.fFilter = GrTextureParams::kBilerp_FilterMode; } else if (!context->getGpu()->makeCopyForTextureParams(texture, params, ©Params)) { if (outOffset) { if (contentArea) { outOffset->set(contentArea->fLeft, contentArea->fRight); } else { outOffset->set(0, 0); } } return SkRef(texture); } GrTexture* copy = this->refCopy(copyParams); if (copy && outOffset) { outOffset->set(0, 0); } return copy; }
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(TextureProxyTest, reporter, ctxInfo) { GrContext* context = ctxInfo.grContext(); GrProxyProvider* proxyProvider = context->priv().proxyProvider(); GrResourceCache* cache = context->priv().getResourceCache(); REPORTER_ASSERT(reporter, !proxyProvider->numUniqueKeyProxies_TestOnly()); REPORTER_ASSERT(reporter, 0 == cache->getResourceCount()); for (auto fit : { SkBackingFit::kExact, SkBackingFit::kApprox }) { for (auto create : { deferred_tex, deferred_texRT, wrapped, wrapped_with_key }) { REPORTER_ASSERT(reporter, 0 == cache->getResourceCount()); basic_test(context, reporter, create(reporter, context, proxyProvider, fit)); } REPORTER_ASSERT(reporter, 0 == cache->getResourceCount()); sk_sp<GrTexture> backingTex; sk_sp<GrTextureProxy> proxy = create_wrapped_backend(context, fit, &backingTex); basic_test(context, reporter, std::move(proxy)); backingTex = nullptr; cache->purgeAllUnlocked(); } invalidation_test(context, reporter); invalidation_and_instantiation_test(context, reporter); }
DEF_GPUTEST(GpuDrawPath, reporter, factory) { return; for (int type = 0; type < GrContextFactory::kLastGLContextType; ++type) { GrContextFactory::GLContextType glType = static_cast<GrContextFactory::GLContextType>(type); GrContext* grContext = factory->get(glType); if (NULL == grContext) { continue; } static const int sampleCounts[] = { 0, 4, 16 }; for (size_t i = 0; i < SK_ARRAY_COUNT(sampleCounts); ++i) { const int W = 255; const int H = 255; GrTextureDesc desc; desc.fConfig = kSkia8888_GrPixelConfig; desc.fFlags = kRenderTarget_GrTextureFlagBit; desc.fWidth = W; desc.fHeight = H; desc.fSampleCnt = sampleCounts[i]; SkAutoTUnref<GrTexture> texture(grContext->createUncachedTexture(desc, NULL, 0)); SkAutoTUnref<SkGpuDevice> device(SkNEW_ARGS(SkGpuDevice, (grContext, texture.get()))); SkCanvas drawingCanvas(device.get()); test_drawPathEmpty(reporter, &drawingCanvas); } } }
SkImage* SkSurface_Gpu::onNewImageSnapshot(SkBudgeted budgeted, ForceCopyMode forceCopyMode) { GrRenderTarget* rt = fDevice->accessRenderTarget(); SkASSERT(rt); GrTexture* tex = rt->asTexture(); SkAutoTUnref<GrTexture> copy; // TODO: Force a copy when the rt is an external resource. if (kYes_ForceCopyMode == forceCopyMode || !tex) { GrSurfaceDesc desc = fDevice->accessRenderTarget()->desc(); GrContext* ctx = fDevice->context(); desc.fFlags = desc.fFlags & ~kRenderTarget_GrSurfaceFlag; copy.reset(ctx->textureProvider()->createTexture(desc, budgeted)); if (!copy) { return nullptr; } if (!ctx->copySurface(copy, rt)) { return nullptr; } tex = copy; } const SkImageInfo info = fDevice->imageInfo(); SkImage* image = nullptr; if (tex) { image = new SkImage_Gpu(info.width(), info.height(), kNeedNewImageUniqueID, info.alphaType(), tex, budgeted); } return image; }
sk_sp<SkImage> SkSurface_Gpu::onNewImageSnapshot(SkBudgeted budgeted, ForceCopyMode forceCopyMode) { GrRenderTarget* rt = fDevice->accessDrawContext()->accessRenderTarget(); SkASSERT(rt); GrTexture* tex = rt->asTexture(); SkAutoTUnref<GrTexture> copy; // If the original render target is a buffer originally created by the client, then we don't // want to ever retarget the SkSurface at another buffer we create. Force a copy now to avoid // copy-on-write. if (kYes_ForceCopyMode == forceCopyMode || !tex || rt->resourcePriv().refsWrappedObjects()) { GrSurfaceDesc desc = fDevice->accessDrawContext()->desc(); GrContext* ctx = fDevice->context(); desc.fFlags = desc.fFlags & ~kRenderTarget_GrSurfaceFlag; copy.reset(ctx->textureProvider()->createTexture(desc, budgeted)); if (!copy) { return nullptr; } if (!ctx->copySurface(copy, rt)) { return nullptr; } tex = copy; } const SkImageInfo info = fDevice->imageInfo(); sk_sp<SkImage> image; if (tex) { image = sk_make_sp<SkImage_Gpu>(info.width(), info.height(), kNeedNewImageUniqueID, info.alphaType(), tex, sk_ref_sp(info.colorSpace()), budgeted); } return image; }
bool SkXfermodeImageFilter::filterImageGPU(Proxy* proxy, const SkBitmap& src, const Context& ctx, SkBitmap* result, SkIPoint* offset) const { SkBitmap background = src; SkIPoint backgroundOffset = SkIPoint::Make(0, 0); if (getInput(0) && !getInput(0)->getInputResultGPU(proxy, src, ctx, &background, &backgroundOffset)) { return onFilterImage(proxy, src, ctx, result, offset); } GrTexture* backgroundTex = background.getTexture(); SkBitmap foreground = src; SkIPoint foregroundOffset = SkIPoint::Make(0, 0); if (getInput(1) && !getInput(1)->getInputResultGPU(proxy, src, ctx, &foreground, &foregroundOffset)) { return onFilterImage(proxy, src, ctx, result, offset); } GrTexture* foregroundTex = foreground.getTexture(); GrContext* context = foregroundTex->getContext(); GrFragmentProcessor* xferProcessor = NULL; GrSurfaceDesc desc; desc.fFlags = kRenderTarget_GrSurfaceFlag | kNoStencil_GrSurfaceFlag; desc.fWidth = src.width(); desc.fHeight = src.height(); desc.fConfig = kSkia8888_GrPixelConfig; SkAutoTUnref<GrTexture> dst( context->refScratchTexture(desc, GrContext::kApprox_ScratchTexMatch)); if (!dst) { return false; } GrContext::AutoRenderTarget art(context, dst->asRenderTarget()); if (!fMode || !fMode->asFragmentProcessor(&xferProcessor, backgroundTex)) { // canFilterImageGPU() should've taken care of this SkASSERT(false); return false; } SkMatrix foregroundMatrix = GrCoordTransform::MakeDivByTextureWHMatrix(foregroundTex); foregroundMatrix.preTranslate(SkIntToScalar(backgroundOffset.fX-foregroundOffset.fX), SkIntToScalar(backgroundOffset.fY-foregroundOffset.fY)); SkRect srcRect; src.getBounds(&srcRect); GrPaint paint; paint.addColorTextureProcessor(foregroundTex, foregroundMatrix); paint.addColorProcessor(xferProcessor)->unref(); context->drawRect(paint, srcRect); offset->fX = backgroundOffset.fX; offset->fY = backgroundOffset.fY; WrapTexture(dst, src.width(), src.height(), result); return true; }
void GrRenderTarget::resolve() { // go through context so that all necessary flushing occurs GrContext* context = this->getContext(); if (NULL == context) { return; } context->resolveRenderTarget(this); }
void onDraw(SkCanvas* canvas) override { GrRenderTargetContext* renderTargetContext = canvas->internal_private_accessTopLayerRenderTargetContext(); if (!renderTargetContext) { skiagm::GM::DrawGpuOnlyMessage(canvas); return; } GrContext* context = canvas->getGrContext(); if (!context) { return; } GrProxyProvider* proxyProvider = context->contextPriv().proxyProvider(); sk_sp<GrTextureProxy> proxy[3]; for (int i = 0; i < 3; ++i) { int index = (0 == i) ? 0 : 1; GrSurfaceDesc desc; desc.fWidth = fBmp[index].width(); desc.fHeight = fBmp[index].height(); desc.fConfig = SkImageInfo2GrPixelConfig(fBmp[index].info(), *context->caps()); SkASSERT(kUnknown_GrPixelConfig != desc.fConfig); proxy[i] = proxyProvider->createTextureProxy( desc, SkBudgeted::kYes, fBmp[index].getPixels(), fBmp[index].rowBytes()); if (!proxy[i]) { return; } } constexpr SkScalar kDrawPad = 10.f; constexpr SkScalar kTestPad = 10.f; constexpr SkScalar kColorSpaceOffset = 36.f; SkISize sizes[3] = {{YSIZE, YSIZE}, {USIZE, USIZE}, {VSIZE, VSIZE}}; for (int space = kJPEG_SkYUVColorSpace; space <= kLastEnum_SkYUVColorSpace; ++space) { SkRect renderRect = SkRect::MakeWH(SkIntToScalar(fBmp[0].width()), SkIntToScalar(fBmp[0].height())); renderRect.outset(kDrawPad, kDrawPad); SkScalar y = kDrawPad + kTestPad + space * kColorSpaceOffset; SkScalar x = kDrawPad + kTestPad; GrPaint grPaint; grPaint.setXPFactory(GrPorterDuffXPFactory::Get(SkBlendMode::kSrc)); auto fp = GrYUVtoRGBEffect::Make(proxy[0], proxy[1], proxy[2], sizes, static_cast<SkYUVColorSpace>(space), true); if (fp) { SkMatrix viewMatrix; viewMatrix.setTranslate(x, y); grPaint.addColorFragmentProcessor(std::move(fp)); std::unique_ptr<GrDrawOp> op(GrRectOpFactory::MakeNonAAFill( std::move(grPaint), viewMatrix, renderRect, GrAAType::kNone)); renderTargetContext->priv().testingOnly_addDrawOp(std::move(op)); } } }
bool SkImageFilter::filterImageGPU(Proxy* proxy, const SkBitmap& src, const Context& ctx, SkBitmap* result, SkIPoint* offset) const { #if SK_SUPPORT_GPU SkBitmap input = src; SkASSERT(fInputCount == 1); SkIPoint srcOffset = SkIPoint::Make(0, 0); if (!this->filterInputGPU(0, proxy, src, ctx, &input, &srcOffset)) { return false; } GrTexture* srcTexture = input.getTexture(); SkIRect bounds; if (!this->applyCropRect(ctx, proxy, input, &srcOffset, &bounds, &input)) { return false; } SkRect srcRect = SkRect::Make(bounds); SkRect dstRect = SkRect::MakeWH(srcRect.width(), srcRect.height()); GrContext* context = srcTexture->getContext(); GrSurfaceDesc desc; desc.fFlags = kRenderTarget_GrSurfaceFlag, desc.fWidth = bounds.width(); desc.fHeight = bounds.height(); desc.fConfig = kRGBA_8888_GrPixelConfig; SkAutoTUnref<GrTexture> dst(context->textureProvider()->createTexture(desc, GrTextureProvider::FromImageFilter(ctx.sizeConstraint()))); if (!dst) { return false; } // setup new clip GrClip clip(dstRect); GrFragmentProcessor* fp; offset->fX = bounds.left(); offset->fY = bounds.top(); bounds.offset(-srcOffset); SkMatrix matrix(ctx.ctm()); matrix.postTranslate(SkIntToScalar(-bounds.left()), SkIntToScalar(-bounds.top())); GrPaint paint; if (this->asFragmentProcessor(&fp, srcTexture, matrix, bounds)) { SkASSERT(fp); paint.addColorFragmentProcessor(fp)->unref(); paint.setPorterDuffXPFactory(SkXfermode::kSrc_Mode); SkAutoTUnref<GrDrawContext> drawContext(context->drawContext(dst->asRenderTarget())); if (drawContext) { drawContext->fillRectToRect(clip, paint, SkMatrix::I(), dstRect, srcRect); WrapTexture(dst, bounds.width(), bounds.height(), result); return true; } } #endif return false; }
// Test out the SkSpecialImage::makeTextureImage entry point DEF_GPUTEST_FOR_RENDERING_CONTEXTS(SpecialImage_MakeTexture, reporter, ctxInfo) { GrContext* context = ctxInfo.grContext(); SkBitmap bm = create_bm(); const SkIRect& subset = SkIRect::MakeXYWH(kPad, kPad, kSmallerSize, kSmallerSize); { // raster sk_sp<SkSpecialImage> rasterImage(SkSpecialImage::MakeFromRaster( SkIRect::MakeWH(kFullSize, kFullSize), bm)); { sk_sp<SkSpecialImage> fromRaster(rasterImage->makeTextureImage(context)); test_texture_backed(reporter, rasterImage, fromRaster); } { sk_sp<SkSpecialImage> subRasterImage(rasterImage->makeSubset(subset)); sk_sp<SkSpecialImage> fromSubRaster(subRasterImage->makeTextureImage(context)); test_texture_backed(reporter, subRasterImage, fromSubRaster); } } { // gpu const GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(bm.info(), *context->caps()); sk_sp<GrTextureProxy> proxy(GrSurfaceProxy::MakeDeferred(context->resourceProvider(), desc, SkBudgeted::kNo, bm.getPixels(), bm.rowBytes())); if (!proxy) { return; } sk_sp<SkSpecialImage> gpuImage(SkSpecialImage::MakeDeferredFromGpu( context, SkIRect::MakeWH(kFullSize, kFullSize), kNeedNewImageUniqueID_SpecialImage, std::move(proxy), nullptr)); { sk_sp<SkSpecialImage> fromGPU(gpuImage->makeTextureImage(context)); test_texture_backed(reporter, gpuImage, fromGPU); } { sk_sp<SkSpecialImage> subGPUImage(gpuImage->makeSubset(subset)); sk_sp<SkSpecialImage> fromSubGPU(subGPUImage->makeTextureImage(context)); test_texture_backed(reporter, subGPUImage, fromSubGPU); } } }
static SkGrPixelRef* copyToTexturePixelRef(GrTexture* texture, const SkIRect* subset) { if (NULL == texture) { return NULL; } GrContext* context = texture->getContext(); if (NULL == context) { return NULL; } GrTextureDesc desc; SkIPoint pointStorage; SkIPoint* topLeft; if (subset != NULL) { SkASSERT(SkIRect::MakeWH(texture->width(), texture->height()).contains(*subset)); // Create a new texture that is the size of subset. desc.fWidth = subset->width(); desc.fHeight = subset->height(); pointStorage.set(subset->x(), subset->y()); topLeft = &pointStorage; } else { desc.fWidth = texture->width(); desc.fHeight = texture->height(); topLeft = NULL; } desc.fFlags = kRenderTarget_GrTextureFlagBit | kNoStencil_GrTextureFlagBit; desc.fConfig = texture->config(); SkImageInfo info; if (!GrPixelConfig2ColorType(desc.fConfig, &info.fColorType)) { return NULL; } info.fWidth = desc.fWidth; info.fHeight = desc.fHeight; info.fAlphaType = kPremul_SkAlphaType; GrTexture* dst = context->createUncachedTexture(desc, NULL, 0); if (NULL == dst) { return NULL; } context->copyTexture(texture, dst->asRenderTarget(), topLeft); // TODO: figure out if this is responsible for Chrome canvas errors #if 0 // The render texture we have created (to perform the copy) isn't fully // functional (since it doesn't have a stencil buffer). Release it here // so the caller doesn't try to render to it. // TODO: we can undo this release when dynamic stencil buffer attach/ // detach has been implemented dst->releaseRenderTarget(); #endif SkGrPixelRef* pixelRef = SkNEW_ARGS(SkGrPixelRef, (info, dst)); SkSafeUnref(dst); return pixelRef; }
void GrContext::TextBlobCacheOverBudgetCB(void* data) { SkASSERT(data); // Unlike the GrResourceCache, TextBlobs are drawn at the SkGpuDevice level, therefore they // cannot use fFlushTorReduceCacheSize because it uses AutoCheckFlush. The solution is to move // drawText calls to below the GrContext level, but this is not trivial because they call // drawPath on SkGpuDevice GrContext* context = reinterpret_cast<GrContext*>(data); context->flush(); }
void GrRenderTarget::discard() { // go through context so that all necessary flushing occurs GrContext* context = this->getContext(); GrDrawContext* drawContext = context ? context->drawContext() : NULL; if (!drawContext) { return; } drawContext->discard(this); }
static GrPath* get_gr_path(GrGpu* gpu, const SkPath& skPath, const SkStrokeRec& stroke) { GrContext* ctx = gpu->getContext(); GrResourceKey resourceKey = GrPath::ComputeKey(skPath, stroke); SkAutoTUnref<GrPath> path(static_cast<GrPath*>(ctx->findAndRefCachedResource(resourceKey))); if (NULL == path || !path->isEqualTo(skPath, stroke)) { path.reset(gpu->pathRendering()->createPath(skPath, stroke)); ctx->addResourceToCache(resourceKey, path); } return path.detach(); }
GrAuditTrail* SkDebugCanvas::getAuditTrail(SkCanvas* canvas) { GrAuditTrail* at = nullptr; #if SK_SUPPORT_GPU GrContext* ctx = canvas->getGrContext(); if (ctx) { at = ctx->getAuditTrail(); } #endif return at; }
SkGrPixelRef::~SkGrPixelRef() { if (fUnlock) { GrContext* context = fSurface->getContext(); GrTexture* texture = fSurface->asTexture(); if (NULL != context && NULL != texture) { context->unlockScratchTexture(texture); } } GrSafeUnref(fSurface); }
bool LayerTextureUpdaterSkPicture::createFrameBuffer() { ASSERT(!m_fbo); ASSERT(!m_bufferSize.isEmpty()); // SKIA only needs color and stencil buffers, not depth buffer. // But it is very uncommon for cards to support color + stencil FBO config. // The most common config is color + packed-depth-stencil. // Instead of iterating through all possible FBO configs, we only try the // most common one here. // FIXME: Delegate the task of creating frame-buffer to SKIA. // It has all necessary code to iterate through all possible configs // and choose the one most suitable for its purposes. Extensions3D* extensions = context()->getExtensions(); if (!extensions->supports("GL_OES_packed_depth_stencil")) return false; extensions->ensureEnabled("GL_OES_packed_depth_stencil"); // Create and bind a frame-buffer-object. m_fbo = context()->createFramebuffer(); if (!m_fbo) return false; context()->bindFramebuffer(GraphicsContext3D::FRAMEBUFFER, m_fbo); // We just need to create a stencil buffer for FBO. // The color buffer (texture) will be provided by tiles. // SKIA does not need depth buffer. m_depthStencilBuffer = context()->createRenderbuffer(); if (!m_depthStencilBuffer) { deleteFrameBuffer(); return false; } context()->bindRenderbuffer(GraphicsContext3D::RENDERBUFFER, m_depthStencilBuffer); context()->renderbufferStorage(GraphicsContext3D::RENDERBUFFER, Extensions3D::DEPTH24_STENCIL8, m_bufferSize.width(), m_bufferSize.height()); context()->framebufferRenderbuffer(GraphicsContext3D::FRAMEBUFFER, GraphicsContext3D::DEPTH_ATTACHMENT, GraphicsContext3D::RENDERBUFFER, m_depthStencilBuffer); context()->framebufferRenderbuffer(GraphicsContext3D::FRAMEBUFFER, GraphicsContext3D::STENCIL_ATTACHMENT, GraphicsContext3D::RENDERBUFFER, m_depthStencilBuffer); // Create a skia gpu canvas. GrContext* skiaContext = m_context->grContext(); GrPlatformSurfaceDesc targetDesc; targetDesc.reset(); targetDesc.fSurfaceType = kRenderTarget_GrPlatformSurfaceType; targetDesc.fRenderTargetFlags = kNone_GrPlatformRenderTargetFlagBit; targetDesc.fWidth = m_bufferSize.width(); targetDesc.fHeight = m_bufferSize.height(); targetDesc.fConfig = kRGBA_8888_GrPixelConfig; targetDesc.fStencilBits = 8; targetDesc.fPlatformRenderTarget = m_fbo; SkAutoTUnref<GrRenderTarget> target(static_cast<GrRenderTarget*>(skiaContext->createPlatformSurface(targetDesc))); SkAutoTUnref<SkDevice> device(new SkGpuDevice(skiaContext, target.get())); m_canvas = adoptPtr(new SkCanvas(device.get())); context()->bindFramebuffer(GraphicsContext3D::FRAMEBUFFER, 0); return true; }
GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext, const GrContextOptions& options) { GrContext* context = SkNEW(GrContext); if (context->init(backend, backendContext, options)) { return context; } else { context->unref(); return NULL; } }
GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext, const GrContextOptions& options) { GrContext* context = new GrContext; if (context->init(backend, backendContext, options)) { return context; } else { context->unref(); return nullptr; } }