bool GrGpu::getReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes, GrPixelConfig readConfig, DrawPreference* drawPreference, ReadPixelTempDrawInfo* tempDrawInfo) { SkASSERT(drawPreference); SkASSERT(tempDrawInfo); SkASSERT(kGpuPrefersDraw_DrawPreference != *drawPreference); // We currently do not support reading into a compressed buffer if (GrPixelConfigIsCompressed(readConfig)) { return false; } if (!this->onGetReadPixelsInfo(srcSurface, width, height, rowBytes, readConfig, drawPreference, tempDrawInfo)) { return false; } // Check to see if we're going to request that the caller draw when drawing is not possible. if (!srcSurface->asTexture() || !this->caps()->isConfigRenderable(tempDrawInfo->fTempSurfaceDesc.fConfig, false)) { // If we don't have a fallback to a straight read then fail. if (kRequireDraw_DrawPreference == *drawPreference) { return false; } *drawPreference = kNoDraw_DrawPreference; } return true; }
bool GrGpu::getWritePixelsInfo(GrSurface* dstSurface, int width, int height, size_t rowBytes, GrPixelConfig srcConfig, DrawPreference* drawPreference, WritePixelTempDrawInfo* tempDrawInfo) { SkASSERT(drawPreference); SkASSERT(tempDrawInfo); SkASSERT(kGpuPrefersDraw_DrawPreference != *drawPreference); if (GrPixelConfigIsCompressed(dstSurface->desc().fConfig) && dstSurface->desc().fConfig != srcConfig) { return false; } if (this->caps()->useDrawInsteadOfPartialRenderTargetWrite() && SkToBool(dstSurface->asRenderTarget()) && (width < dstSurface->width() || height < dstSurface->height())) { ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference); } if (!this->onGetWritePixelsInfo(dstSurface, width, height, rowBytes, srcConfig, drawPreference, tempDrawInfo)) { return false; } // Check to see if we're going to request that the caller draw when drawing is not possible. if (!dstSurface->asRenderTarget() || !this->caps()->isConfigTexturable(tempDrawInfo->fTempSurfaceDesc.fConfig)) { // If we don't have a fallback to a straight upload then fail. if (kRequireDraw_DrawPreference == *drawPreference || !this->caps()->isConfigTexturable(srcConfig)) { return false; } *drawPreference = kNoDraw_DrawPreference; } return true; }
void GrGLRenderTarget::init(const GrSurfaceDesc& desc, const IDDesc& idDesc) { fRTFBOID = idDesc.fRTFBOID; fTexFBOID = idDesc.fTexFBOID; fMSColorRenderbufferID = idDesc.fMSColorRenderbufferID; fIsWrapped = kWrapped_LifeCycle == idDesc.fLifeCycle; fViewport.fLeft = 0; fViewport.fBottom = 0; fViewport.fWidth = desc.fWidth; fViewport.fHeight = desc.fHeight; // We own one color value for each MSAA sample. int colorValuesPerPixel = SkTMax(1, fDesc.fSampleCnt); if (fTexFBOID != fRTFBOID) { // If we own the resolve buffer then that is one more sample per pixel. colorValuesPerPixel += 1; } else if (fTexFBOID != 0) { // For auto-resolving FBOs, the MSAA buffer is free. colorValuesPerPixel = 1; } SkASSERT(kUnknown_GrPixelConfig != fDesc.fConfig); SkASSERT(!GrPixelConfigIsCompressed(fDesc.fConfig)); size_t colorBytes = GrBytesPerPixel(fDesc.fConfig); SkASSERT(colorBytes > 0); fGpuMemorySize = colorValuesPerPixel * fDesc.fWidth * fDesc.fHeight * colorBytes; }
size_t GrGLRenderTarget::gpuMemorySize() const { SkASSERT(kUnknown_GrPixelConfig != fDesc.fConfig); SkASSERT(!GrPixelConfigIsCompressed(fDesc.fConfig)); size_t colorBytes = GrBytesPerPixel(fDesc.fConfig); SkASSERT(colorBytes > 0); return fColorValuesPerPixel * fDesc.fWidth * fDesc.fHeight * colorBytes; }
// Lazy-callback version GrSurfaceProxy::GrSurfaceProxy(LazyInstantiateCallback&& callback, LazyInstantiationType lazyType, const GrBackendFormat& format, const GrSurfaceDesc& desc, GrSurfaceOrigin origin, SkBackingFit fit, SkBudgeted budgeted, GrInternalSurfaceFlags surfaceFlags) : fSurfaceFlags(surfaceFlags) , fFormat(format) , fConfig(desc.fConfig) , fWidth(desc.fWidth) , fHeight(desc.fHeight) , fOrigin(origin) , fFit(fit) , fBudgeted(budgeted) , fLazyInstantiateCallback(std::move(callback)) , fLazyInstantiationType(lazyType) , fNeedsClear(SkToBool(desc.fFlags & kPerformInitialClear_GrSurfaceFlag)) , fGpuMemorySize(kInvalidGpuMemorySize) , fLastOpList(nullptr) { SkASSERT(fFormat.isValid()); // NOTE: the default fUniqueID ctor pulls a value from the same pool as the GrGpuResources. if (fLazyInstantiateCallback) { SkASSERT(is_valid_fully_lazy(desc, fit) || is_valid_partially_lazy(desc)); } else { SkASSERT(is_valid_non_lazy(desc)); } if (GrPixelConfigIsCompressed(desc.fConfig)) { SkASSERT(!SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag)); fSurfaceFlags |= GrInternalSurfaceFlags::kReadOnly; } }
GrTexture* GrTextureProvider::createTexture(const GrSurfaceDesc& desc, bool budgeted, const void* srcData, size_t rowBytes) { if (this->isAbandoned()) { return NULL; } if ((desc.fFlags & kRenderTarget_GrSurfaceFlag) && !fGpu->caps()->isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) { return NULL; } if (!GrPixelConfigIsCompressed(desc.fConfig)) { static const uint32_t kFlags = kExact_ScratchTextureFlag | kNoCreate_ScratchTextureFlag; if (GrTexture* texture = this->internalRefScratchTexture(desc, kFlags)) { if (!srcData || texture->writePixels(0, 0, desc.fWidth, desc.fHeight, desc.fConfig, srcData, rowBytes)) { if (!budgeted) { texture->resourcePriv().makeUnbudgeted(); } return texture; } texture->unref(); } } return fGpu->createTexture(desc, budgeted, srcData, rowBytes); }
GrLayerAtlas::GrLayerAtlas(GrTextureProvider* texProvider, GrPixelConfig config, GrSurfaceFlags flags, const SkISize& backingTextureSize, int numPlotsX, int numPlotsY) { fTexProvider = texProvider; fPixelConfig = config; fFlags = flags; fBackingTextureSize = backingTextureSize; int textureWidth = fBackingTextureSize.width(); int textureHeight = fBackingTextureSize.height(); int plotWidth = textureWidth / numPlotsX; int plotHeight = textureHeight / numPlotsY; SkASSERT(plotWidth * numPlotsX == textureWidth); SkASSERT(plotHeight * numPlotsY == textureHeight); // We currently do not support compressed atlases... SkASSERT(!GrPixelConfigIsCompressed(config)); // set up allocated plots fPlotArray = new Plot[numPlotsX * numPlotsY]; Plot* currPlot = fPlotArray; for (int y = numPlotsY-1; y >= 0; --y) { for (int x = numPlotsX-1; x >= 0; --x) { currPlot->init(y*numPlotsX+x, x, y, plotWidth, plotHeight); // build LRU list fPlotList.addToHead(currPlot); ++currPlot; } } }
size_t GrGLRenderTarget::totalBytesPerSample() const { SkASSERT(kUnknown_GrPixelConfig != fDesc.fConfig); SkASSERT(!GrPixelConfigIsCompressed(fDesc.fConfig)); size_t colorBytes = GrBytesPerPixel(fDesc.fConfig); SkASSERT(colorBytes > 0); return fDesc.fWidth * fDesc.fHeight * colorBytes; }
void GrSWMaskHelper::compressTextureData(GrTexture *texture, const GrSurfaceDesc& desc) { SkASSERT(GrPixelConfigIsCompressed(desc.fConfig)); SkASSERT(fmt_to_config(fCompressedFormat) == desc.fConfig); SkAutoDataUnref cmpData(SkTextureCompressor::CompressBitmapToFormat(fBM, fCompressedFormat)); SkASSERT(cmpData); this->sendTextureData(texture, desc, cmpData->data(), 0); }
GrTexture::GrTexture(GrGpu* gpu, LifeCycle lifeCycle, const GrSurfaceDesc& desc) : INHERITED(gpu, lifeCycle, desc) , fMipMapsStatus(kNotAllocated_MipMapsStatus) { if (!this->isExternal() && !GrPixelConfigIsCompressed(desc.fConfig) && !desc.fTextureStorageAllocator.fAllocateTextureStorage) { GrScratchKey key; GrTexturePriv::ComputeScratchKey(desc, &key); this->setScratchKey(key); } }
size_t GrTexture::gpuMemorySize() const { size_t textureSize; if (GrPixelConfigIsCompressed(fDesc.fConfig)) { textureSize = GrCompressedFormatDataSize(fDesc.fConfig, fDesc.fWidth, fDesc.fHeight); } else { textureSize = (size_t) fDesc.fWidth * fDesc.fHeight * GrBytesPerPixel(fDesc.fConfig); } if (this->texturePriv().hasMipMaps()) { // We don't have to worry about the mipmaps being a different size than // we'd expect because we never change fDesc.fWidth/fHeight. textureSize *= 2; } return textureSize; }
GrTexture* GrTextureProvider::internalRefScratchTexture(const GrSurfaceDesc& inDesc, uint32_t flags) { SkASSERT(!this->isAbandoned()); SkASSERT(!GrPixelConfigIsCompressed(inDesc.fConfig)); SkTCopyOnFirstWrite<GrSurfaceDesc> desc(inDesc); if (fGpu->caps()->reuseScratchTextures() || (desc->fFlags & kRenderTarget_GrSurfaceFlag)) { if (!(kExact_ScratchTextureFlag & flags)) { // bin by pow2 with a reasonable min const int minSize = SkTMin(16, fGpu->caps()->minTextureSize()); GrSurfaceDesc* wdesc = desc.writable(); wdesc->fWidth = SkTMax(minSize, GrNextPow2(desc->fWidth)); wdesc->fHeight = SkTMax(minSize, GrNextPow2(desc->fHeight)); } GrScratchKey key; GrTexturePriv::ComputeScratchKey(*desc, &key); uint32_t scratchFlags = 0; if (kNoPendingIO_ScratchTextureFlag & flags) { scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag; } else if (!(desc->fFlags & kRenderTarget_GrSurfaceFlag)) { // If it is not a render target then it will most likely be populated by // writePixels() which will trigger a flush if the texture has pending IO. scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag; } GrGpuResource* resource = fCache->findAndRefScratchResource(key, scratchFlags); if (resource) { GrSurface* surface = static_cast<GrSurface*>(resource); GrRenderTarget* rt = surface->asRenderTarget(); if (rt && fGpu->caps()->discardRenderTargetSupport()) { rt->discard(); } return surface->asTexture(); } } if (!(kNoCreate_ScratchTextureFlag & flags)) { return fGpu->createTexture(*desc, true, NULL, 0); } return NULL; }
GrTexture* GrTextureProvider::refScratchTexture(const GrSurfaceDesc& desc, ScratchTexMatch match, bool calledDuringFlush) { if (this->isAbandoned()) { return NULL; } // Currently we don't recycle compressed textures as scratch. if (GrPixelConfigIsCompressed(desc.fConfig)) { return NULL; } else { uint32_t flags = 0; if (kExact_ScratchTexMatch == match) { flags |= kExact_ScratchTextureFlag; } if (calledDuringFlush) { flags |= kNoPendingIO_ScratchTextureFlag; } return this->internalRefScratchTexture(desc, flags); } }
size_t GrTexture::onGpuMemorySize() const { size_t textureSize; if (GrPixelConfigIsCompressed(fDesc.fConfig)) { textureSize = GrCompressedFormatDataSize(fDesc.fConfig, fDesc.fWidth, fDesc.fHeight); } else { textureSize = (size_t) fDesc.fWidth * fDesc.fHeight * GrBytesPerPixel(fDesc.fConfig); } if (this->texturePriv().hasMipMaps()) { // We don't have to worry about the mipmaps being a different size than // we'd expect because we never change fDesc.fWidth/fHeight. textureSize += textureSize/3; } SkASSERT(!SkToBool(fDesc.fFlags & kRenderTarget_GrSurfaceFlag)); SkASSERT(textureSize <= WorseCaseSize(fDesc)); return textureSize; }
GrAtlas::GrAtlas(GrGpu* gpu, GrPixelConfig config, GrSurfaceFlags flags, const SkISize& backingTextureSize, int numPlotsX, int numPlotsY, bool batchUploads) { fGpu = SkRef(gpu); fPixelConfig = config; fFlags = flags; fBackingTextureSize = backingTextureSize; fNumPlotsX = numPlotsX; fNumPlotsY = numPlotsY; fBatchUploads = batchUploads; fTexture = NULL; int textureWidth = fBackingTextureSize.width(); int textureHeight = fBackingTextureSize.height(); int plotWidth = textureWidth / fNumPlotsX; int plotHeight = textureHeight / fNumPlotsY; SkASSERT(plotWidth * fNumPlotsX == textureWidth); SkASSERT(plotHeight * fNumPlotsY == textureHeight); // We currently do not support compressed atlases... SkASSERT(!GrPixelConfigIsCompressed(config)); // set up allocated plots size_t bpp = GrBytesPerPixel(fPixelConfig); fPlotArray = new GrPlot[(fNumPlotsX * fNumPlotsY)]; GrPlot* currPlot = fPlotArray; for (int y = numPlotsY-1; y >= 0; --y) { for (int x = numPlotsX-1; x >= 0; --x) { currPlot->init(this, y*numPlotsX+x, x, y, plotWidth, plotHeight, bpp, batchUploads); // build LRU list fPlotList.addToHead(currPlot); ++currPlot; } } }
GrTexture* GrGpu::createTexture(const GrTextureDesc& desc, const void* srcData, size_t rowBytes) { if (!this->caps()->isConfigTexturable(desc.fConfig)) { return NULL; } if ((desc.fFlags & kRenderTarget_GrTextureFlagBit) && !this->caps()->isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) { return NULL; } GrTexture *tex = NULL; if (GrPixelConfigIsCompressed(desc.fConfig)) { // We shouldn't be rendering into this SkASSERT((desc.fFlags & kRenderTarget_GrTextureFlagBit) == 0); if (!this->caps()->npotTextureTileSupport() && (!SkIsPow2(desc.fWidth) || !SkIsPow2(desc.fHeight))) { return NULL; } this->handleDirtyContext(); tex = this->onCreateCompressedTexture(desc, srcData); } else { this->handleDirtyContext(); tex = this->onCreateTexture(desc, srcData, rowBytes); if (tex && (kRenderTarget_GrTextureFlagBit & desc.fFlags) && !(kNoStencil_GrTextureFlagBit & desc.fFlags)) { SkASSERT(tex->asRenderTarget()); // TODO: defer this and attach dynamically if (!this->attachStencilBufferToRenderTarget(tex->asRenderTarget())) { tex->unref(); return NULL; } } } return tex; }
GrTexture* GrGpu::createTexture(const GrSurfaceDesc& desc, bool budgeted, const void* srcData, size_t rowBytes) { if (!this->caps()->isConfigTexturable(desc.fConfig)) { return NULL; } bool isRT = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag); if (isRT && !this->caps()->isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) { return NULL; } GrTexture *tex = NULL; if (GrPixelConfigIsCompressed(desc.fConfig)) { // We shouldn't be rendering into this SkASSERT((desc.fFlags & kRenderTarget_GrSurfaceFlag) == 0); if (!this->caps()->npotTextureTileSupport() && (!SkIsPow2(desc.fWidth) || !SkIsPow2(desc.fHeight))) { return NULL; } this->handleDirtyContext(); tex = this->onCreateCompressedTexture(desc, budgeted, srcData); } else { this->handleDirtyContext(); tex = this->onCreateTexture(desc, budgeted, srcData, rowBytes); } if (!this->caps()->reuseScratchTextures() && !isRT) { tex->resourcePriv().removeScratchKey(); } if (tex) { fStats.incTextureCreates(); if (srcData) { fStats.incTextureUploads(); } } return tex; }
bool GrGpu::readPixels(GrSurface* surface, int left, int top, int width, int height, GrPixelConfig config, void* buffer, size_t rowBytes) { this->handleDirtyContext(); // We cannot read pixels into a compressed buffer if (GrPixelConfigIsCompressed(config)) { return false; } size_t bpp = GrBytesPerPixel(config); if (!GrSurfacePriv::AdjustReadPixelParams(surface->width(), surface->height(), bpp, &left, &top, &width, &height, &buffer, &rowBytes)) { return false; } return this->onReadPixels(surface, left, top, width, height, config, buffer, rowBytes); }
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(DeferredProxyTest, reporter, ctxInfo) { GrProxyProvider* proxyProvider = ctxInfo.grContext()->priv().proxyProvider(); GrResourceProvider* resourceProvider = ctxInfo.grContext()->priv().resourceProvider(); const GrCaps& caps = *ctxInfo.grContext()->priv().caps(); int attempt = 0; // useful for debugging for (auto origin : { kBottomLeft_GrSurfaceOrigin, kTopLeft_GrSurfaceOrigin }) { for (auto widthHeight : { 100, 128, 1048576 }) { for (auto config : { kAlpha_8_GrPixelConfig, kRGB_565_GrPixelConfig, kRGBA_8888_GrPixelConfig, kRGBA_1010102_GrPixelConfig, kRGB_ETC1_GrPixelConfig }) { for (auto fit : { SkBackingFit::kExact, SkBackingFit::kApprox }) { for (auto budgeted : { SkBudgeted::kYes, SkBudgeted::kNo }) { for (auto numSamples : {1, 4, 16, 128}) { // We don't have recycling support for compressed textures if (GrPixelConfigIsCompressed(config) && SkBackingFit::kApprox == fit) { continue; } GrSurfaceDesc desc; desc.fFlags = kRenderTarget_GrSurfaceFlag; desc.fWidth = widthHeight; desc.fHeight = widthHeight; desc.fConfig = config; desc.fSampleCnt = numSamples; GrSRGBEncoded srgbEncoded; GrColorType colorType = GrPixelConfigToColorTypeAndEncoding(config, &srgbEncoded); const GrBackendFormat format = caps.getBackendFormatFromGrColorType(colorType, srgbEncoded); { sk_sp<GrTexture> tex; if (SkBackingFit::kApprox == fit) { tex = resourceProvider->createApproxTexture( desc, GrResourceProvider::Flags::kNoPendingIO); } else { tex = resourceProvider->createTexture( desc, budgeted, GrResourceProvider::Flags::kNoPendingIO); } sk_sp<GrTextureProxy> proxy = proxyProvider->createProxy(format, desc, origin, fit, budgeted); REPORTER_ASSERT(reporter, SkToBool(tex) == SkToBool(proxy)); if (proxy) { REPORTER_ASSERT(reporter, proxy->asRenderTargetProxy()); // This forces the proxy to compute and cache its // pre-instantiation size guess. Later, when it is actually // instantiated, it checks that the instantiated size is <= to // the pre-computation. If the proxy never computed its // pre-instantiation size then the check is skipped. proxy->gpuMemorySize(); check_surface(reporter, proxy.get(), origin, widthHeight, widthHeight, config, budgeted); int supportedSamples = caps.getRenderTargetSampleCount(numSamples, config); check_rendertarget(reporter, caps, resourceProvider, proxy->asRenderTargetProxy(), supportedSamples, fit, caps.maxWindowRectangles()); } } desc.fFlags = kNone_GrSurfaceFlags; { sk_sp<GrTexture> tex; if (SkBackingFit::kApprox == fit) { tex = resourceProvider->createApproxTexture( desc, GrResourceProvider::Flags::kNoPendingIO); } else { tex = resourceProvider->createTexture( desc, budgeted, GrResourceProvider::Flags::kNoPendingIO); } sk_sp<GrTextureProxy> proxy( proxyProvider->createProxy(format, desc, origin, fit, budgeted)); REPORTER_ASSERT(reporter, SkToBool(tex) == SkToBool(proxy)); if (proxy) { // This forces the proxy to compute and cache its // pre-instantiation size guess. Later, when it is actually // instantiated, it checks that the instantiated size is <= to // the pre-computation. If the proxy never computed its // pre-instantiation size then the check is skipped. proxy->gpuMemorySize(); check_surface(reporter, proxy.get(), origin, widthHeight, widthHeight, config, budgeted); check_texture(reporter, resourceProvider, proxy->asTextureProxy(), fit); } } attempt++; } } } } } } }
GrTexture* GrGpu::createTexture(const GrSurfaceDesc& origDesc, bool budgeted, const void* srcData, size_t rowBytes) { GrSurfaceDesc desc = origDesc; if (!this->caps()->isConfigTexturable(desc.fConfig)) { return nullptr; } bool isRT = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag); if (isRT && !this->caps()->isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) { return nullptr; } // We currently do not support multisampled textures if (!isRT && desc.fSampleCnt > 0) { return nullptr; } GrTexture *tex = nullptr; if (isRT) { int maxRTSize = this->caps()->maxRenderTargetSize(); if (desc.fWidth > maxRTSize || desc.fHeight > maxRTSize) { return nullptr; } } else { int maxSize = this->caps()->maxTextureSize(); if (desc.fWidth > maxSize || desc.fHeight > maxSize) { return nullptr; } } GrGpuResource::LifeCycle lifeCycle = budgeted ? GrGpuResource::kCached_LifeCycle : GrGpuResource::kUncached_LifeCycle; desc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount()); // Attempt to catch un- or wrongly initialized sample counts; SkASSERT(desc.fSampleCnt >= 0 && desc.fSampleCnt <= 64); desc.fOrigin = resolve_origin(desc.fOrigin, isRT); if (GrPixelConfigIsCompressed(desc.fConfig)) { // We shouldn't be rendering into this SkASSERT(!isRT); SkASSERT(0 == desc.fSampleCnt); if (!this->caps()->npotTextureTileSupport() && (!SkIsPow2(desc.fWidth) || !SkIsPow2(desc.fHeight))) { return nullptr; } this->handleDirtyContext(); tex = this->onCreateCompressedTexture(desc, lifeCycle, srcData); } else { this->handleDirtyContext(); tex = this->onCreateTexture(desc, lifeCycle, srcData, rowBytes); } if (!this->caps()->reuseScratchTextures() && !isRT) { tex->resourcePriv().removeScratchKey(); } if (tex) { fStats.incTextureCreates(); if (srcData) { fStats.incTextureUploads(); } } return tex; }