void SkBaseDevice::LogDrawScaleFactor(const SkMatrix& matrix, SkFilterQuality filterQuality) { #if SK_HISTOGRAMS_ENABLED enum ScaleFactor { kUpscale_ScaleFactor, kNoScale_ScaleFactor, kDownscale_ScaleFactor, kLargeDownscale_ScaleFactor, kLast_ScaleFactor = kLargeDownscale_ScaleFactor }; float rawScaleFactor = matrix.getMinScale(); ScaleFactor scaleFactor; if (rawScaleFactor < 0.5f) { scaleFactor = kLargeDownscale_ScaleFactor; } else if (rawScaleFactor < 1.0f) { scaleFactor = kDownscale_ScaleFactor; } else if (rawScaleFactor > 1.0f) { scaleFactor = kUpscale_ScaleFactor; } else { scaleFactor = kNoScale_ScaleFactor; } switch (filterQuality) { case kNone_SkFilterQuality: SK_HISTOGRAM_ENUMERATION("DrawScaleFactor.NoneFilterQuality", scaleFactor, kLast_ScaleFactor + 1); break; case kLow_SkFilterQuality: SK_HISTOGRAM_ENUMERATION("DrawScaleFactor.LowFilterQuality", scaleFactor, kLast_ScaleFactor + 1); break; case kMedium_SkFilterQuality: SK_HISTOGRAM_ENUMERATION("DrawScaleFactor.MediumFilterQuality", scaleFactor, kLast_ScaleFactor + 1); break; case kHigh_SkFilterQuality: SK_HISTOGRAM_ENUMERATION("DrawScaleFactor.HighFilterQuality", scaleFactor, kLast_ScaleFactor + 1); break; } // Also log filter quality independent scale factor. SK_HISTOGRAM_ENUMERATION("DrawScaleFactor.AnyFilterQuality", scaleFactor, kLast_ScaleFactor + 1); // Also log an overall histogram of filter quality. SK_HISTOGRAM_ENUMERATION("FilterQuality", filterQuality, kLast_SkFilterQuality + 1); #endif }
/* * We have a 5 ways to try to return a texture (in sorted order) * * 1. Check the cache for a pre-existing one * 2. Ask the generator to natively create one * 3. Ask the generator to return a compressed form that the GPU might support * 4. Ask the generator to return YUV planes, which the GPU can convert * 5. Ask the generator to return RGB(A) data, which the GPU can convert */ GrTexture* SkImageCacherator::lockTexture(GrContext* ctx, const GrUniqueKey& key, const SkImage* client, SkImage::CachingHint chint) { // Values representing the various texture lock paths we can take. Used for logging the path // taken to a histogram. enum LockTexturePath { kFailure_LockTexturePath, kPreExisting_LockTexturePath, kNative_LockTexturePath, kCompressed_LockTexturePath, kYUV_LockTexturePath, kRGBA_LockTexturePath, }; enum { kLockTexturePathCount = kRGBA_LockTexturePath + 1 }; // 1. Check the cache for a pre-existing one if (key.isValid()) { if (GrTexture* tex = ctx->textureProvider()->findAndRefTextureByUniqueKey(key)) { SK_HISTOGRAM_ENUMERATION("LockTexturePath", kPreExisting_LockTexturePath, kLockTexturePathCount); return tex; } } // 2. Ask the generator to natively create one { ScopedGenerator generator(this); SkIRect subset = SkIRect::MakeXYWH(fOrigin.x(), fOrigin.y(), fInfo.width(), fInfo.height()); if (GrTexture* tex = generator->generateTexture(ctx, &subset)) { SK_HISTOGRAM_ENUMERATION("LockTexturePath", kNative_LockTexturePath, kLockTexturePathCount); return set_key_and_return(tex, key); } } const GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(fInfo); // 3. Ask the generator to return a compressed form that the GPU might support SkAutoTUnref<SkData> data(this->refEncoded(ctx)); if (data) { GrTexture* tex = load_compressed_into_texture(ctx, data, desc); if (tex) { SK_HISTOGRAM_ENUMERATION("LockTexturePath", kCompressed_LockTexturePath, kLockTexturePathCount); return set_key_and_return(tex, key); } } // 4. Ask the generator to return YUV planes, which the GPU can convert { ScopedGenerator generator(this); Generator_GrYUVProvider provider(generator); GrTexture* tex = provider.refAsTexture(ctx, desc, true); if (tex) { SK_HISTOGRAM_ENUMERATION("LockTexturePath", kYUV_LockTexturePath, kLockTexturePathCount); return set_key_and_return(tex, key); } } // 5. Ask the generator to return RGB(A) data, which the GPU can convert SkBitmap bitmap; if (this->tryLockAsBitmap(&bitmap, client, chint)) { GrTexture* tex = GrUploadBitmapToTexture(ctx, bitmap); if (tex) { SK_HISTOGRAM_ENUMERATION("LockTexturePath", kRGBA_LockTexturePath, kLockTexturePathCount); return set_key_and_return(tex, key); } } SK_HISTOGRAM_ENUMERATION("LockTexturePath", kFailure_LockTexturePath, kLockTexturePathCount); return nullptr; }
/* * We have 4 ways to try to return a texture (in sorted order) * * 1. Check the cache for a pre-existing one * 2. Ask the generator to natively create one * 3. Ask the generator to return YUV planes, which the GPU can convert * 4. Ask the generator to return RGB(A) data, which the GPU can convert */ sk_sp<GrTextureProxy> SkImage_Lazy::lockTextureProxy(GrContext* ctx, const GrUniqueKey& origKey, SkImage::CachingHint chint, bool willBeMipped, SkColorSpace* dstColorSpace, GrTextureMaker::AllowedTexGenType genType) { // Values representing the various texture lock paths we can take. Used for logging the path // taken to a histogram. enum LockTexturePath { kFailure_LockTexturePath, kPreExisting_LockTexturePath, kNative_LockTexturePath, kCompressed_LockTexturePath, // Deprecated kYUV_LockTexturePath, kRGBA_LockTexturePath, }; enum { kLockTexturePathCount = kRGBA_LockTexturePath + 1 }; // Determine which cached format we're going to use (which may involve decoding to a different // info than the generator provides). CachedFormat format = this->chooseCacheFormat(dstColorSpace, ctx->caps()); // Fold the cache format into our texture key GrUniqueKey key; this->makeCacheKeyFromOrigKey(origKey, format, &key); GrProxyProvider* proxyProvider = ctx->contextPriv().proxyProvider(); sk_sp<GrTextureProxy> proxy; // 1. Check the cache for a pre-existing one if (key.isValid()) { proxy = proxyProvider->findOrCreateProxyByUniqueKey(key, kTopLeft_GrSurfaceOrigin); if (proxy) { SK_HISTOGRAM_ENUMERATION("LockTexturePath", kPreExisting_LockTexturePath, kLockTexturePathCount); if (!willBeMipped || GrMipMapped::kYes == proxy->mipMapped()) { return proxy; } } } // The CachedFormat is both an index for which cache "slot" we'll use to store this particular // decoded variant of the encoded data, and also a recipe for how to transform the original // info to get the one that we're going to decode to. const SkImageInfo cacheInfo = this->buildCacheInfo(format); SkImageInfo genPixelsInfo = cacheInfo; SkTransferFunctionBehavior behavior = getGeneratorBehaviorAndInfo(&genPixelsInfo); // 2. Ask the generator to natively create one if (!proxy) { ScopedGenerator generator(fSharedGenerator); if (GrTextureMaker::AllowedTexGenType::kCheap == genType && SkImageGenerator::TexGenType::kCheap != generator->onCanGenerateTexture()) { return nullptr; } if ((proxy = generator->generateTexture(ctx, genPixelsInfo, fOrigin, behavior, willBeMipped))) { SK_HISTOGRAM_ENUMERATION("LockTexturePath", kNative_LockTexturePath, kLockTexturePathCount); set_key_on_proxy(proxyProvider, proxy.get(), nullptr, key); if (!willBeMipped || GrMipMapped::kYes == proxy->mipMapped()) { return proxy; } } } // 3. Ask the generator to return YUV planes, which the GPU can convert. If we will be mipping // the texture we fall through here and have the CPU generate the mip maps for us. if (!proxy && !willBeMipped && !ctx->contextPriv().disableGpuYUVConversion()) { const GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(cacheInfo, *ctx->caps()); ScopedGenerator generator(fSharedGenerator); Generator_GrYUVProvider provider(generator); // The pixels in the texture will be in the generator's color space. If onMakeColorSpace // has been called then this will not match this image's color space. To correct this, apply // a color space conversion from the generator's color space to this image's color space. const SkColorSpace* generatorColorSpace = fSharedGenerator->fGenerator->getInfo().colorSpace(); const SkColorSpace* thisColorSpace = fInfo.colorSpace(); // TODO: Update to create the mipped surface in the YUV generator and draw the base layer // directly into the mipped surface. proxy = provider.refAsTextureProxy(ctx, desc, generatorColorSpace, thisColorSpace); if (proxy) { SK_HISTOGRAM_ENUMERATION("LockTexturePath", kYUV_LockTexturePath, kLockTexturePathCount); set_key_on_proxy(proxyProvider, proxy.get(), nullptr, key); return proxy; } } // 4. Ask the generator to return RGB(A) data, which the GPU can convert SkBitmap bitmap; if (!proxy && this->lockAsBitmap(&bitmap, chint, format, genPixelsInfo, behavior)) { if (willBeMipped) { proxy = proxyProvider->createMipMapProxyFromBitmap(bitmap, dstColorSpace); } if (!proxy) { proxy = GrUploadBitmapToTextureProxy(proxyProvider, bitmap, dstColorSpace); } if (proxy && (!willBeMipped || GrMipMapped::kYes == proxy->mipMapped())) { SK_HISTOGRAM_ENUMERATION("LockTexturePath", kRGBA_LockTexturePath, kLockTexturePathCount); set_key_on_proxy(proxyProvider, proxy.get(), nullptr, key); return proxy; } } if (proxy) { // We need a mipped proxy, but we either found a proxy earlier that wasn't mipped, generated // a native non mipped proxy, or generated a non-mipped yuv proxy. Thus we generate a new // mipped surface and copy the original proxy into the base layer. We will then let the gpu // generate the rest of the mips. SkASSERT(willBeMipped); SkASSERT(GrMipMapped::kNo == proxy->mipMapped()); if (auto mippedProxy = GrCopyBaseMipMapToTextureProxy(ctx, proxy.get())) { set_key_on_proxy(proxyProvider, mippedProxy.get(), proxy.get(), key); return mippedProxy; } // We failed to make a mipped proxy with the base copied into it. This could have // been from failure to make the proxy or failure to do the copy. Thus we will fall // back to just using the non mipped proxy; See skbug.com/7094. return proxy; } SK_HISTOGRAM_ENUMERATION("LockTexturePath", kFailure_LockTexturePath, kLockTexturePathCount); return nullptr; }