GrTexture* GrGpu::createTexture(const GrTextureDesc& desc, const void* srcData, size_t rowBytes) { this->handleDirtyContext(); GrTexture* tex = this->onCreateTexture(desc, srcData, rowBytes); if (NULL != tex && (kRenderTarget_GrTextureFlagBit & desc.fFlags) && !(kNoStencil_GrTextureFlagBit & desc.fFlags)) { GrAssert(NULL != tex->asRenderTarget()); // TODO: defer this and attach dynamically if (!this->attachStencilBufferToRenderTarget(tex->asRenderTarget())) { tex->unref(); return NULL; } } return tex; }
/** * This method allows us to interrupt the normal deletion process and place * textures back in the texture cache when their ref count goes to zero. */ void GrTexture::internal_dispose() const { if (this->impl()->isSetFlag((GrTextureFlags) GrTextureImpl::kReturnToCache_FlagBit) && this->INHERITED::getContext()) { GrTexture* nonConstThis = const_cast<GrTexture *>(this); this->ref(); // restore ref count to initial setting nonConstThis->impl()->resetFlag((GrTextureFlags) GrTextureImpl::kReturnToCache_FlagBit); nonConstThis->INHERITED::getContext()->addExistingTextureToCache(nonConstThis); // Note: "this" texture might be freed inside addExistingTextureToCache // if it is purged. return; } this->INHERITED::internal_dispose(); }
GrTexture* GrGpu::wrapBackendTexture(const GrBackendTextureDesc& desc) { this->handleDirtyContext(); GrTexture* tex = this->onWrapBackendTexture(desc); if (NULL == tex) { return NULL; } // TODO: defer this and attach dynamically GrRenderTarget* tgt = tex->asRenderTarget(); if (NULL != tgt && !this->attachStencilBufferToRenderTarget(tgt)) { tex->unref(); return NULL; } else { return tex; } }
static inline void GenKey(const GrGeometryProcessor& gp, const GrGLSLCaps&, GrProcessorKeyBuilder* b) { const GrDistanceFieldA8TextGeoProc& dfTexEffect = gp.cast<GrDistanceFieldA8TextGeoProc>(); uint32_t key = dfTexEffect.getFlags(); key |= dfTexEffect.colorIgnored() << 16; key |= ComputePosKey(dfTexEffect.viewMatrix()) << 25; b->add32(key); // Currently we hardcode numbers to convert atlas coordinates to normalized floating point SkASSERT(gp.numTextures() == 1); GrTexture* atlas = gp.textureAccess(0).getTexture(); SkASSERT(atlas); b->add32(atlas->width()); b->add32(atlas->height()); }
bool SkBlurImageFilter::filterImageGPU(Proxy* proxy, const SkBitmap& src, const Context& ctx, SkBitmap* result, SkIPoint* offset) const { #if SK_SUPPORT_GPU SkBitmap input = src; SkIPoint srcOffset = SkIPoint::Make(0, 0); if (!this->filterInputGPU(0, proxy, src, ctx, &input, &srcOffset)) { return false; } SkIRect srcBounds = input.bounds(); srcBounds.offset(srcOffset); SkIRect dstBounds; if (!this->applyCropRect(this->mapContext(ctx), srcBounds, &dstBounds)) { return false; } if (!srcBounds.intersect(dstBounds)) { return false; } SkVector sigma = map_sigma(fSigma, ctx.ctm()); if (sigma.x() == 0 && sigma.y() == 0) { input.extractSubset(result, srcBounds); offset->fX = srcBounds.x(); offset->fY = srcBounds.y(); return true; } offset->fX = dstBounds.fLeft; offset->fY = dstBounds.fTop; srcBounds.offset(-srcOffset); dstBounds.offset(-srcOffset); SkRect srcBoundsF(SkRect::Make(srcBounds)); GrTexture* inputTexture = input.getTexture(); SkAutoTUnref<GrTexture> tex(SkGpuBlurUtils::GaussianBlur(inputTexture->getContext(), inputTexture, false, SkRect::Make(dstBounds), &srcBoundsF, sigma.x(), sigma.y())); if (!tex) { return false; } GrWrapTextureInBitmap(tex, dstBounds.width(), dstBounds.height(), false, result); return true; #else SkDEBUGFAIL("Should not call in GPU-less build"); return false; #endif }
// Create a mask of 'devPath' and place the result in 'mask'. static GrTexture* create_mask_GPU(GrContext* context, const SkRect& maskRect, const SkPath& devPath, const GrStrokeInfo& strokeInfo, bool doAA, int sampleCnt) { GrSurfaceDesc desc; desc.fFlags = kRenderTarget_GrSurfaceFlag; desc.fWidth = SkScalarCeilToInt(maskRect.width()); desc.fHeight = SkScalarCeilToInt(maskRect.height()); desc.fSampleCnt = doAA ? sampleCnt : 0; // We actually only need A8, but it often isn't supported as a // render target so default to RGBA_8888 desc.fConfig = kRGBA_8888_GrPixelConfig; if (context->caps()->isConfigRenderable(kAlpha_8_GrPixelConfig, desc.fSampleCnt > 0)) { desc.fConfig = kAlpha_8_GrPixelConfig; } GrTexture* mask = context->textureProvider()->createApproxTexture(desc); if (nullptr == mask) { return nullptr; } SkRect clipRect = SkRect::MakeWH(maskRect.width(), maskRect.height()); GrDrawContext* drawContext = context->drawContext(); if (!drawContext) { return nullptr; } drawContext->clear(mask->asRenderTarget(), nullptr, 0x0, true); GrPaint tempPaint; tempPaint.setAntiAlias(doAA); tempPaint.setCoverageSetOpXPFactory(SkRegion::kReplace_Op); // setup new clip GrClip clip(clipRect); // Draw the mask into maskTexture with the path's top-left at the origin using tempPaint. SkMatrix translate; translate.setTranslate(-maskRect.fLeft, -maskRect.fTop); drawContext->drawPath(mask->asRenderTarget(), clip, tempPaint, translate, devPath, strokeInfo); return mask; }
static inline void GenKey(const GrGeometryProcessor& proc, const GrGLSLCaps&, GrProcessorKeyBuilder* b) { const GrBitmapTextGeoProc& gp = proc.cast<GrBitmapTextGeoProc>(); uint32_t key = 0; key |= gp.usesLocalCoords() && gp.localMatrix().hasPerspective() ? 0x1 : 0x0; key |= gp.colorIgnored() ? 0x2 : 0x0; key |= gp.maskFormat() << 3; b->add32(key); // Currently we hardcode numbers to convert atlas coordinates to normalized floating point SkASSERT(gp.numTextures() == 1); GrTexture* atlas = gp.textureAccess(0).getTexture(); SkASSERT(atlas); b->add32(atlas->width()); b->add32(atlas->height()); }
GrFragmentProcessor* GrMagnifierEffect::TestCreate(SkRandom* random, GrContext* context, const GrDrawTargetCaps&, GrTexture** textures) { GrTexture* texture = textures[0]; const int kMaxWidth = 200; const int kMaxHeight = 200; const int kMaxInset = 20; uint32_t width = random->nextULessThan(kMaxWidth); uint32_t height = random->nextULessThan(kMaxHeight); uint32_t x = random->nextULessThan(kMaxWidth - width); uint32_t y = random->nextULessThan(kMaxHeight - height); uint32_t inset = random->nextULessThan(kMaxInset); GrFragmentProcessor* effect = GrMagnifierEffect::Create( texture, (float) width / texture->width(), (float) height / texture->height(), texture->width() / (float) x, texture->height() / (float) y, (float) inset / texture->width(), (float) inset / texture->height()); SkASSERT(effect); return effect; }
void GrBatchFontCache::dump() const { static int gDumpCount = 0; for (int i = 0; i < kMaskFormatCount; ++i) { if (fAtlases[i]) { GrTexture* texture = fAtlases[i]->getTexture(); if (texture) { SkString filename; #ifdef SK_BUILD_FOR_ANDROID filename.printf("/sdcard/fontcache_%d%d.png", gDumpCount, i); #else filename.printf("fontcache_%d%d.png", gDumpCount, i); #endif texture->surfacePriv().savePixels(filename.c_str()); } } } ++gDumpCount; }
void GrFontCache::dump() const { static int gDumpCount = 0; for (int i = 0; i < kAtlasCount; ++i) { if (NULL != fAtlasMgr[i]) { GrTexture* texture = fAtlasMgr[i]->getTexture(); if (NULL != texture) { SkString filename; #ifdef SK_BUILD_FOR_ANDROID filename.printf("/sdcard/fontcache_%d%d.png", gDumpCount, i); #else filename.printf("fontcache_%d%d.png", gDumpCount, i); #endif texture->savePixels(filename.c_str()); } } } ++gDumpCount; }
void writePathVertices(GrDrawBatch::Target* target, GrBatchAtlas* atlas, intptr_t offset, GrColor color, size_t vertexStride, const SkMatrix& viewMatrix, const ShapeData* shapeData) const { GrTexture* texture = atlas->getTexture(); SkScalar dx = shapeData->fBounds.fLeft; SkScalar dy = shapeData->fBounds.fTop; SkScalar width = shapeData->fBounds.width(); SkScalar height = shapeData->fBounds.height(); SkScalar invScale = 1.0f / shapeData->fScale; dx *= invScale; dy *= invScale; width *= invScale; height *= invScale; SkPoint* positions = reinterpret_cast<SkPoint*>(offset); // vertex positions // TODO make the vertex attributes a struct SkRect r = SkRect::MakeXYWH(dx, dy, width, height); positions->setRectFan(r.left(), r.top(), r.right(), r.bottom(), vertexStride); // colors for (int i = 0; i < kVerticesPerQuad; i++) { GrColor* colorPtr = (GrColor*)(offset + sizeof(SkPoint) + i * vertexStride); *colorPtr = color; } const SkScalar tx = SkIntToScalar(shapeData->fAtlasLocation.fX); const SkScalar ty = SkIntToScalar(shapeData->fAtlasLocation.fY); // vertex texture coords SkPoint* textureCoords = (SkPoint*)(offset + sizeof(SkPoint) + sizeof(GrColor)); textureCoords->setRectFan(tx / texture->width(), ty / texture->height(), (tx + shapeData->fBounds.width()) / texture->width(), (ty + shapeData->fBounds.height()) / texture->height(), vertexStride); }
bool GrDrawTarget::copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, const SkIPoint& dstPoint) { SkASSERT(dst); SkASSERT(src); SkIRect clippedSrcRect; SkIPoint clippedDstPoint; // If the rect is outside the src or dst then we've already succeeded. if (!clip_srcrect_and_dstpoint(dst, src, srcRect, dstPoint, &clippedSrcRect, &clippedDstPoint)) { return true; } if (this->onCopySurface(dst, src, clippedSrcRect, clippedDstPoint)) { return true; } GrRenderTarget* rt = dst->asRenderTarget(); GrTexture* tex = src->asTexture(); if ((dst == src) || !rt || !tex) { return false; } GrPipelineBuilder pipelineBuilder; pipelineBuilder.setRenderTarget(rt); SkMatrix matrix; matrix.setTranslate(SkIntToScalar(clippedSrcRect.fLeft - clippedDstPoint.fX), SkIntToScalar(clippedSrcRect.fTop - clippedDstPoint.fY)); matrix.postIDiv(tex->width(), tex->height()); pipelineBuilder.addColorTextureProcessor(tex, matrix); SkIRect dstRect = SkIRect::MakeXYWH(clippedDstPoint.fX, clippedDstPoint.fY, clippedSrcRect.width(), clippedSrcRect.height()); this->drawSimpleRect(&pipelineBuilder, GrColor_WHITE, SkMatrix::I(), dstRect); return true; }
static SkGrPixelRef* copy_to_new_texture_pixelref(GrTexture* texture, SkColorType dstCT, SkColorProfileType dstPT, const SkIRect* subset) { if (NULL == texture || kUnknown_SkColorType == dstCT) { return NULL; } GrContext* context = texture->getContext(); if (NULL == context) { return NULL; } GrSurfaceDesc desc; SkIRect srcRect; if (!subset) { desc.fWidth = texture->width(); desc.fHeight = texture->height(); srcRect = SkIRect::MakeWH(texture->width(), texture->height()); } else { SkASSERT(SkIRect::MakeWH(texture->width(), texture->height()).contains(*subset)); // Create a new texture that is the size of subset. desc.fWidth = subset->width(); desc.fHeight = subset->height(); srcRect = *subset; } desc.fFlags = kRenderTarget_GrSurfaceFlag; desc.fConfig = SkImageInfo2GrPixelConfig(dstCT, kPremul_SkAlphaType, dstPT); GrTexture* dst = context->createTexture(desc, false, NULL, 0); if (NULL == dst) { return NULL; } // Blink is relying on the above copy being sent to GL immediately in the case when the source // is a WebGL canvas backing store. We could have a TODO to remove this flush flag, but we have // a larger TODO to remove SkGrPixelRef entirely. context->copySurface(dst->asRenderTarget(), texture, srcRect, SkIPoint::Make(0,0), GrContext::kFlushWrites_PixelOp); SkImageInfo info = SkImageInfo::Make(desc.fWidth, desc.fHeight, dstCT, kPremul_SkAlphaType, dstPT); SkGrPixelRef* pixelRef = SkNEW_ARGS(SkGrPixelRef, (info, dst)); SkSafeUnref(dst); return pixelRef; }
GrTexture* GrClipMaskManager::createCachedMask(int width, int height, const GrUniqueKey& key, bool renderTarget) { GrSurfaceDesc desc; desc.fWidth = width; desc.fHeight = height; desc.fFlags = renderTarget ? kRenderTarget_GrSurfaceFlag : kNone_GrSurfaceFlags; if (!renderTarget || fDrawTarget->caps()->isConfigRenderable(kAlpha_8_GrPixelConfig, false)) { desc.fConfig = kAlpha_8_GrPixelConfig; } else { desc.fConfig = kRGBA_8888_GrPixelConfig; } GrTexture* texture = fDrawTarget->cmmAccess().resourceProvider()->createApproxTexture(desc, 0); if (!texture) { return nullptr; } texture->resourcePriv().setUniqueKey(key); return texture; }
void wrap_tex_test(skiatest::Reporter* reporter, GrContext* context) { GrVkGpu* gpu = static_cast<GrVkGpu*>(context->getGpu()); GrBackendObject backendObj = gpu->createTestingOnlyBackendTexture(nullptr, kW, kH, kPixelConfig, false); const GrVkImageInfo* backendTex = reinterpret_cast<const GrVkImageInfo*>(backendObj); // check basic borrowed creation GrBackendTextureDesc desc; desc.fConfig = kPixelConfig; desc.fWidth = kW; desc.fHeight = kH; desc.fTextureHandle = backendObj; GrTexture* tex = gpu->wrapBackendTexture(desc, kBorrow_GrWrapOwnership); REPORTER_ASSERT(reporter, tex); tex->unref(); // image is null GrVkImageInfo backendCopy = *backendTex; backendCopy.fImage = VK_NULL_HANDLE; desc.fTextureHandle = (GrBackendObject) &backendCopy; tex = gpu->wrapBackendTexture(desc, kBorrow_GrWrapOwnership); REPORTER_ASSERT(reporter, !tex); tex = gpu->wrapBackendTexture(desc, kAdopt_GrWrapOwnership); REPORTER_ASSERT(reporter, !tex); // alloc is null backendCopy.fImage = backendTex->fImage; backendCopy.fAlloc = { VK_NULL_HANDLE, 0, 0 }; tex = gpu->wrapBackendTexture(desc, kBorrow_GrWrapOwnership); REPORTER_ASSERT(reporter, !tex); tex = gpu->wrapBackendTexture(desc, kAdopt_GrWrapOwnership); REPORTER_ASSERT(reporter, !tex); // check adopt creation backendCopy.fAlloc = backendTex->fAlloc; tex = gpu->wrapBackendTexture(desc, kAdopt_GrWrapOwnership); REPORTER_ASSERT(reporter, tex); tex->unref(); gpu->deleteTestingOnlyBackendTexture(backendObj, true); }
void GrGLMatrixConvolutionEffect::onSetData(const GrGLSLProgramDataManager& pdman, const GrFragmentProcessor& processor) { const GrMatrixConvolutionEffect& conv = processor.cast<GrMatrixConvolutionEffect>(); GrSurfaceProxy* proxy = conv.textureSampler(0).proxy(); GrTexture* texture = proxy->priv().peekTexture(); float imageIncrement[2]; float ySign = proxy->origin() == kTopLeft_GrSurfaceOrigin ? 1.0f : -1.0f; imageIncrement[0] = 1.0f / texture->width(); imageIncrement[1] = ySign / texture->height(); pdman.set2fv(fImageIncrementUni, 1, imageIncrement); pdman.set2fv(fKernelOffsetUni, 1, conv.kernelOffset()); int kernelCount = conv.kernelSize().width() * conv.kernelSize().height(); int arrayCount = (kernelCount + 3) / 4; SkASSERT(4 * arrayCount >= kernelCount); pdman.set4fv(fKernelUni, arrayCount, conv.kernel()); pdman.set1f(fGainUni, conv.gain()); pdman.set1f(fBiasUni, conv.bias()); fDomain.setData(pdman, conv.domain(), proxy); }
void GrGpu::didWriteToSurface(GrSurface* surface, GrSurfaceOrigin origin, const SkIRect* bounds, uint32_t mipLevels) const { SkASSERT(surface); // Mark any MIP chain and resolve buffer as dirty if and only if there is a non-empty bounds. if (nullptr == bounds || !bounds->isEmpty()) { if (GrRenderTarget* target = surface->asRenderTarget()) { SkIRect flippedBounds; if (kBottomLeft_GrSurfaceOrigin == origin && bounds) { flippedBounds = {bounds->fLeft, surface->height() - bounds->fBottom, bounds->fRight, surface->height() - bounds->fTop}; bounds = &flippedBounds; } target->flagAsNeedingResolve(bounds); } GrTexture* texture = surface->asTexture(); if (texture && 1 == mipLevels) { texture->texturePriv().markMipMapsDirty(); } } }
bool GrDrawTarget::copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, const SkIPoint& dstPoint) { SkASSERT(dst); SkASSERT(src); SkIRect clippedSrcRect; SkIPoint clippedDstPoint; // If the rect is outside the src or dst then we've already succeeded. if (!clip_srcrect_and_dstpoint(dst, src, srcRect, dstPoint, &clippedSrcRect, &clippedDstPoint)) { SkASSERT(GrDrawTarget::canCopySurface(dst, src, srcRect, dstPoint)); return true; } if (!GrDrawTarget::canCopySurface(dst, src, clippedSrcRect, clippedDstPoint)) { return false; } GrRenderTarget* rt = dst->asRenderTarget(); GrTexture* tex = src->asTexture(); GrDrawTarget::AutoStateRestore asr(this, kReset_ASRInit); this->drawState()->setRenderTarget(rt); SkMatrix matrix; matrix.setTranslate(SkIntToScalar(clippedSrcRect.fLeft - clippedDstPoint.fX), SkIntToScalar(clippedSrcRect.fTop - clippedDstPoint.fY)); matrix.postIDiv(tex->width(), tex->height()); this->drawState()->addColorTextureProcessor(tex, matrix); SkIRect dstRect = SkIRect::MakeXYWH(clippedDstPoint.fX, clippedDstPoint.fY, clippedSrcRect.width(), clippedSrcRect.height()); this->drawSimpleRect(dstRect); return true; }
void setData(const GrGLSLProgramDataManager& pdman, const GrPrimitiveProcessor& proc) override { SkASSERT(fTextureSizeUni.isValid()); GrTexture* texture = proc.texture(0); if (texture->width() != fTextureSize.width() || texture->height() != fTextureSize.height()) { fTextureSize = SkISize::Make(texture->width(), texture->height()); pdman.set2f(fTextureSizeUni, SkIntToScalar(fTextureSize.width()), SkIntToScalar(fTextureSize.height())); } const GrDistanceFieldPathGeoProc& dfpgp = proc.cast<GrDistanceFieldPathGeoProc>(); if (!dfpgp.viewMatrix().isIdentity() && !fViewMatrix.cheapEqualTo(dfpgp.viewMatrix())) { fViewMatrix = dfpgp.viewMatrix(); float viewMatrix[3 * 3]; GrGLSLGetMatrix<3>(viewMatrix, fViewMatrix); pdman.setMatrix3f(fViewMatrixUniform, viewMatrix); } }
GrTexture* GrTextureAdjuster::refCopy(const CopyParams& copyParams) { GrTexture* texture = this->originalTexture(); GrContext* context = texture->getContext(); const SkIRect* contentArea = this->contentAreaOrNull(); GrUniqueKey key; this->makeCopyKey(copyParams, &key); if (key.isValid()) { GrTexture* cachedCopy = context->textureProvider()->findAndRefTextureByUniqueKey(key); if (cachedCopy) { return cachedCopy; } } GrTexture* copy = copy_on_gpu(texture, contentArea, copyParams); if (copy) { if (key.isValid()) { copy->resourcePriv().setUniqueKey(key); this->didCacheCopy(key); } } return copy; }
static void add_texture_key(GrProcessorKeyBuilder* b, const GrProcessor& proc, const GrGLSLCaps& caps) { int numTextures = proc.numTextures(); SkASSERT(0 == proc.numBuffers()); // Need two bytes per key (swizzle, sampler type, and precision). int word32Count = (proc.numTextures() + 1) / 2; if (0 == word32Count) { return; } uint16_t* k16 = SkTCast<uint16_t*>(b->add32n(word32Count)); for (int i = 0; i < numTextures; ++i) { const GrTextureAccess& access = proc.textureAccess(i); GrTexture* texture = access.getTexture(); k16[i] = SkToU16(caps.configTextureSwizzle(texture->config()).asKey() | (caps.samplerPrecision(texture->config(), access.getVisibility()) << 8)); } // zero the last 16 bits if the number of textures is odd. if (numTextures & 0x1) { k16[numTextures] = 0; } }
GrTexture* GrGpu::createTexture(const GrTextureDesc& desc, const void* srcData, size_t rowBytes) { if (!this->caps()->isConfigTexturable(desc.fConfig)) { return NULL; } if ((desc.fFlags & kRenderTarget_GrTextureFlagBit) && !this->caps()->isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) { return NULL; } GrTexture *tex = NULL; if (GrPixelConfigIsCompressed(desc.fConfig)) { // We shouldn't be rendering into this SkASSERT((desc.fFlags & kRenderTarget_GrTextureFlagBit) == 0); if (!this->caps()->npotTextureTileSupport() && (!SkIsPow2(desc.fWidth) || !SkIsPow2(desc.fHeight))) { return NULL; } this->handleDirtyContext(); tex = this->onCreateCompressedTexture(desc, srcData); } else { this->handleDirtyContext(); tex = this->onCreateTexture(desc, srcData, rowBytes); if (tex && (kRenderTarget_GrTextureFlagBit & desc.fFlags) && !(kNoStencil_GrTextureFlagBit & desc.fFlags)) { SkASSERT(tex->asRenderTarget()); // TODO: defer this and attach dynamically if (!this->attachStencilBufferToRenderTarget(tex->asRenderTarget())) { tex->unref(); return NULL; } } } return tex; }
GrTexture* SkImageCacherator::tryLockAsTexture(GrContext* ctx, SkImageUsageType usage) { #if SK_SUPPORT_GPU const uint32_t uniqueID = fGenerator->uniqueID(); const SkImageInfo& info = this->info(); GrUniqueKey key; GrMakeKeyFromImageID(&key, uniqueID, info.width(), info.height(), SkIPoint::Make(0, 0), *ctx->caps(), usage); GrTexture* tex = ctx->textureProvider()->findAndRefTextureByUniqueKey(key); if (tex) { return tex; // we got a cache hit! } tex = fGenerator->generateTexture(ctx, usage); if (tex) { tex->resourcePriv().setUniqueKey(key); } return tex; #else return nullptr; #endif }
GrTexture* GrGpu::wrapBackendTexture(const GrBackendTextureDesc& desc, GrWrapOwnership ownership) { this->handleDirtyContext(); if (!this->caps()->isConfigTexturable(desc.fConfig)) { return nullptr; } if ((desc.fFlags & kRenderTarget_GrBackendTextureFlag) && !this->caps()->isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) { return nullptr; } GrTexture* tex = this->onWrapBackendTexture(desc, ownership); if (nullptr == tex) { return nullptr; } // TODO: defer this and attach dynamically GrRenderTarget* tgt = tex->asRenderTarget(); if (tgt && !fContext->resourceProvider()->attachStencilAttachment(tgt)) { tex->unref(); return nullptr; } else { return tex; } }
unsigned Canvas2DLayerBridge::prepareTexture(WebTextureUpdater& updater) { #if ENABLE(CANVAS_USES_MAILBOX) ASSERT_NOT_REACHED(); return 0; #else m_context->makeContextCurrent(); TRACE_EVENT0("cc", "Canvas2DLayerBridge::SkCanvas::flush"); m_canvas->flush(); m_context->flush(); // Notify skia that the state of the backing store texture object will be touched by the compositor GrRenderTarget* renderTarget = reinterpret_cast<GrRenderTarget*>(m_canvas->getDevice()->accessRenderTarget()); if (renderTarget) { GrTexture* texture = renderTarget->asTexture(); texture->invalidateCachedState(); return texture->getTextureHandle(); } return 0; #endif // !ENABLE(CANVAS_USES_MAILBOX) }
GrTexture* GrGpu::createTexture(const GrSurfaceDesc& desc, bool budgeted, const void* srcData, size_t rowBytes) { if (!this->caps()->isConfigTexturable(desc.fConfig)) { return NULL; } bool isRT = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag); if (isRT && !this->caps()->isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) { return NULL; } GrTexture *tex = NULL; if (GrPixelConfigIsCompressed(desc.fConfig)) { // We shouldn't be rendering into this SkASSERT((desc.fFlags & kRenderTarget_GrSurfaceFlag) == 0); if (!this->caps()->npotTextureTileSupport() && (!SkIsPow2(desc.fWidth) || !SkIsPow2(desc.fHeight))) { return NULL; } this->handleDirtyContext(); tex = this->onCreateCompressedTexture(desc, budgeted, srcData); } else { this->handleDirtyContext(); tex = this->onCreateTexture(desc, budgeted, srcData, rowBytes); } if (!this->caps()->reuseScratchTextures() && !isRT) { tex->resourcePriv().removeScratchKey(); } if (tex) { fStats.incTextureCreates(); if (srcData) { fStats.incTextureUploads(); } } return tex; }
bool SkImageFilter::filterImageGPU(Proxy* proxy, const SkBitmap& src, SkBitmap* result) { #if SK_SUPPORT_GPU SkBitmap input; SkASSERT(fInputCount == 1); if (!SkImageFilterUtils::GetInputResultGPU(this->getInput(0), proxy, src, &input)) { return false; } GrTexture* srcTexture = (GrTexture*) input.getTexture(); SkRect rect; src.getBounds(&rect); GrContext* context = srcTexture->getContext(); GrTextureDesc desc; desc.fFlags = kRenderTarget_GrTextureFlagBit, desc.fWidth = input.width(); desc.fHeight = input.height(); desc.fConfig = kRGBA_8888_GrPixelConfig; GrAutoScratchTexture dst(context, desc); GrContext::AutoMatrix am; am.setIdentity(context); GrContext::AutoRenderTarget art(context, dst.texture()->asRenderTarget()); GrContext::AutoClip acs(context, rect); GrEffectRef* effect; this->asNewEffect(&effect, srcTexture); SkASSERT(effect); SkAutoUnref effectRef(effect); GrPaint paint; paint.colorStage(0)->setEffect(effect); context->drawRect(paint, rect); SkAutoTUnref<GrTexture> resultTex(dst.detach()); SkImageFilterUtils::WrapTexture(resultTex, input.width(), input.height(), result); return true; #else return false; #endif }
GrTexture* GrGpu::createTexture(const GrTextureDesc& desc, const void* srcData, size_t rowBytes) { if (kUnknown_GrPixelConfig == desc.fConfig) { return NULL; } if ((desc.fFlags & kRenderTarget_GrTextureFlagBit) && !this->caps()->isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) { return NULL; } this->handleDirtyContext(); GrTexture* tex = this->onCreateTexture(desc, srcData, rowBytes); if (NULL != tex && (kRenderTarget_GrTextureFlagBit & desc.fFlags) && !(kNoStencil_GrTextureFlagBit & desc.fFlags)) { SkASSERT(NULL != tex->asRenderTarget()); // TODO: defer this and attach dynamically if (!this->attachStencilBufferToRenderTarget(tex->asRenderTarget())) { tex->unref(); return NULL; } } return tex; }
GrTexture* GrTextureAdjuster::refTextureSafeForParams(const GrTextureParams& params, SkSourceGammaTreatment gammaTreatment, SkIPoint* outOffset) { GrTexture* texture = this->originalTexture(); GrContext* context = texture->getContext(); CopyParams copyParams; const SkIRect* contentArea = this->contentAreaOrNull(); if (!context) { // The texture was abandoned. return nullptr; } if (contentArea && GrTextureParams::kMipMap_FilterMode == params.filterMode()) { // If we generate a MIP chain for texture it will read pixel values from outside the content // area. copyParams.fWidth = contentArea->width(); copyParams.fHeight = contentArea->height(); copyParams.fFilter = GrTextureParams::kBilerp_FilterMode; } else if (!context->getGpu()->makeCopyForTextureParams(texture, params, ©Params)) { if (outOffset) { if (contentArea) { outOffset->set(contentArea->fLeft, contentArea->fRight); } else { outOffset->set(0, 0); } } return SkRef(texture); } GrTexture* copy = this->refCopy(copyParams); if (copy && outOffset) { outOffset->set(0, 0); } return copy; }
virtual void setData(const GrGLProgramDataManager& pdman, const GrPrimitiveProcessor& proc, const GrBatchTracker& bt) override { SkASSERT(fTextureSizeUni.isValid()); GrTexture* texture = proc.texture(0); if (texture->width() != fTextureSize.width() || texture->height() != fTextureSize.height()) { fTextureSize = SkISize::Make(texture->width(), texture->height()); pdman.set2f(fTextureSizeUni, SkIntToScalar(fTextureSize.width()), SkIntToScalar(fTextureSize.height())); } this->setUniformViewMatrix(pdman, proc.viewMatrix()); const DistanceFieldNoGammaBatchTracker& local = bt.cast<DistanceFieldNoGammaBatchTracker>(); if (kUniform_GrGPInput == local.fInputColorType && local.fColor != fColor) { GrGLfloat c[4]; GrColorToRGBAFloat(local.fColor, c); pdman.set4fv(fColorUniform, 1, c); fColor = local.fColor; } }