DEF_GPUTEST_FOR_RENDERING_CONTEXTS(SpecialImage_Gpu, reporter, ctxInfo) { GrContext* context = ctxInfo.grContext(); SkBitmap bm = create_bm(); const GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(bm.info(), *context->caps()); sk_sp<GrTextureProxy> proxy(GrSurfaceProxy::MakeDeferred(context->resourceProvider(), desc, SkBudgeted::kNo, bm.getPixels(), bm.rowBytes())); if (!proxy) { return; } sk_sp<SkSpecialImage> fullSImg(SkSpecialImage::MakeDeferredFromGpu( context, SkIRect::MakeWH(kFullSize, kFullSize), kNeedNewImageUniqueID_SpecialImage, proxy, nullptr)); const SkIRect& subset = SkIRect::MakeXYWH(kPad, kPad, kSmallerSize, kSmallerSize); { sk_sp<SkSpecialImage> subSImg1(SkSpecialImage::MakeDeferredFromGpu( context, subset, kNeedNewImageUniqueID_SpecialImage, std::move(proxy), nullptr)); test_image(subSImg1, reporter, context, true, kPad, kFullSize); } { sk_sp<SkSpecialImage> subSImg2(fullSImg->makeSubset(subset)); test_image(subSImg2, reporter, context, true, kPad, kFullSize); } }
// Test out the SkSpecialImage::makeTextureImage entry point DEF_GPUTEST_FOR_RENDERING_CONTEXTS(SpecialImage_MakeTexture, reporter, ctxInfo) { GrContext* context = ctxInfo.grContext(); SkBitmap bm = create_bm(); const SkIRect& subset = SkIRect::MakeXYWH(kPad, kPad, kSmallerSize, kSmallerSize); { // raster sk_sp<SkSpecialImage> rasterImage(SkSpecialImage::MakeFromRaster( SkIRect::MakeWH(kFullSize, kFullSize), bm)); { sk_sp<SkSpecialImage> fromRaster(rasterImage->makeTextureImage(context)); test_texture_backed(reporter, rasterImage, fromRaster); } { sk_sp<SkSpecialImage> subRasterImage(rasterImage->makeSubset(subset)); sk_sp<SkSpecialImage> fromSubRaster(subRasterImage->makeTextureImage(context)); test_texture_backed(reporter, subRasterImage, fromSubRaster); } } { // gpu const GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(bm.info(), *context->caps()); sk_sp<GrTextureProxy> proxy(GrSurfaceProxy::MakeDeferred(context->resourceProvider(), desc, SkBudgeted::kNo, bm.getPixels(), bm.rowBytes())); if (!proxy) { return; } sk_sp<SkSpecialImage> gpuImage(SkSpecialImage::MakeDeferredFromGpu( context, SkIRect::MakeWH(kFullSize, kFullSize), kNeedNewImageUniqueID_SpecialImage, std::move(proxy), nullptr)); { sk_sp<SkSpecialImage> fromGPU(gpuImage->makeTextureImage(context)); test_texture_backed(reporter, gpuImage, fromGPU); } { sk_sp<SkSpecialImage> subGPUImage(gpuImage->makeSubset(subset)); sk_sp<SkSpecialImage> fromSubGPU(subGPUImage->makeTextureImage(context)); test_texture_backed(reporter, subGPUImage, fromSubGPU); } } }
sk_sp<SkSpecialSurface> onMakeSurface(const SkImageInfo& info) const override { if (!fTexture->getContext()) { return nullptr; } GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(info, *fTexture->getContext()->caps()); desc.fFlags = kRenderTarget_GrSurfaceFlag; return SkSpecialSurface::MakeRenderTarget(fTexture->getContext(), desc); }
static GrTexture* create_unstretched_bitmap_texture(GrContext* ctx, const SkBitmap& origBitmap, const GrUniqueKey& optionalKey) { if (origBitmap.width() < ctx->caps()->minTextureSize() || origBitmap.height() < ctx->caps()->minTextureSize()) { return nullptr; } SkBitmap tmpBitmap; const SkBitmap* bitmap = &origBitmap; GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(bitmap->info()); const GrCaps* caps = ctx->caps(); if (kIndex_8_SkColorType == bitmap->colorType()) { if (caps->isConfigTexturable(kIndex_8_GrPixelConfig)) { size_t imageSize = GrCompressedFormatDataSize(kIndex_8_GrPixelConfig, bitmap->width(), bitmap->height()); SkAutoMalloc storage(imageSize); build_index8_data(storage.get(), origBitmap); // our compressed data will be trimmed, so pass width() for its // "rowBytes", since they are the same now. return GrCreateTextureForPixels(ctx, optionalKey, desc, origBitmap.pixelRef(), storage.get(), bitmap->width()); } else { origBitmap.copyTo(&tmpBitmap, kN32_SkColorType); // now bitmap points to our temp, which has been promoted to 32bits bitmap = &tmpBitmap; desc.fConfig = SkImageInfo2GrPixelConfig(bitmap->info()); } } else if (!bitmap->readyToDraw()) { // If the bitmap had compressed data and was then uncompressed, it'll still return // compressed data on 'refEncodedData' and upload it. Probably not good, since if // the bitmap has available pixels, then they might not be what the decompressed // data is. GrTexture *texture = load_etc1_texture(ctx, optionalKey, *bitmap, desc); if (texture) { return texture; } } GrTexture *texture = load_yuv_texture(ctx, optionalKey, *bitmap, desc); if (texture) { return texture; } SkAutoLockPixels alp(*bitmap); if (!bitmap->readyToDraw()) { return nullptr; } return GrCreateTextureForPixels(ctx, optionalKey, desc, origBitmap.pixelRef(), bitmap->getPixels(), bitmap->rowBytes()); }
/* * We have a 5 ways to try to return a texture (in sorted order) * * 1. Check the cache for a pre-existing one * 2. Ask the genreator to natively create one * 3. Ask the generator to return a compressed form that the GPU might support * 4. Ask the generator to return YUV planes, which the GPU can convert * 5. Ask the generator to return RGB(A) data, which the GPU can convert */ GrTexture* SkImageCacherator::lockUnstretchedTexture(GrContext* ctx, SkImageUsageType usage, const SkImage* client) { // textures (at least the texture-key) only support 16bit dimensions, so abort early // if we're too big. if (fInfo.width() > 0xFFFF || fInfo.height() > 0xFFFF) { return nullptr; } GrUniqueKey key; GrMakeKeyFromImageID(&key, fUniqueID, SkIRect::MakeWH(fInfo.width(), fInfo.height()), *ctx->caps(), usage); // 1. Check the cache for a pre-existing one if (GrTexture* tex = ctx->textureProvider()->findAndRefTextureByUniqueKey(key)) { return tex; } // 2. Ask the genreator to natively create one { ScopedGenerator generator(this); SkIRect subset = SkIRect::MakeXYWH(fOrigin.x(), fOrigin.y(), fInfo.width(), fInfo.height()); if (GrTexture* tex = generator->generateTexture(ctx, usage, &subset)) { return set_key_and_return(tex, key); } } const GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(fInfo); // 3. Ask the generator to return a compressed form that the GPU might support SkAutoTUnref<SkData> data(this->refEncoded()); if (data) { GrTexture* tex = load_compressed_into_texture(ctx, data, desc); if (tex) { return set_key_and_return(tex, key); } } // 4. Ask the generator to return YUV planes, which the GPU can convert { ScopedGenerator generator(this); Generator_GrYUVProvider provider(generator); GrTexture* tex = provider.refAsTexture(ctx, desc, true); if (tex) { return set_key_and_return(tex, key); } } // 5. Ask the generator to return RGB(A) data, which the GPU can convert SkBitmap bitmap; if (this->tryLockAsBitmap(&bitmap, client)) { return GrRefCachedBitmapTexture(ctx, bitmap, usage); } return nullptr; }
sk_sp<SkSpecialSurface> onMakeSurface(const SkImageInfo& info) const override { #if SK_SUPPORT_GPU GrTexture* texture = as_IB(fImage.get())->peekTexture(); if (texture) { GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(info, *texture->getContext()->caps()); desc.fFlags = kRenderTarget_GrSurfaceFlag; return SkSpecialSurface::MakeRenderTarget(texture->getContext(), desc); } #endif return SkSpecialSurface::MakeRaster(info, nullptr); }
SkSpecialSurface* onNewSurface(const SkImageInfo& info) const override { #if SK_SUPPORT_GPU GrTexture* texture = as_IB(fImage.get())->peekTexture(); if (texture) { GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(info); desc.fFlags = kRenderTarget_GrSurfaceFlag; return SkSpecialSurface::NewRenderTarget(this->proxy(), texture->getContext(), desc); } #endif return SkSpecialSurface::NewRaster(this->proxy(), info, nullptr); }
GrTexture* GrUploadBitmapToTexture(GrContext* ctx, const SkBitmap& bmp) { SkASSERT(!bmp.getTexture()); SkBitmap tmpBitmap; const SkBitmap* bitmap = &bmp; GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(bitmap->info()); const GrCaps* caps = ctx->caps(); if (kIndex_8_SkColorType == bitmap->colorType()) { if (caps->isConfigTexturable(kIndex_8_GrPixelConfig)) { size_t imageSize = GrCompressedFormatDataSize(kIndex_8_GrPixelConfig, bitmap->width(), bitmap->height()); SkAutoMalloc storage(imageSize); build_index8_data(storage.get(), bmp); // our compressed data will be trimmed, so pass width() for its // "rowBytes", since they are the same now. return ctx->textureProvider()->createTexture(desc, true, storage.get(), bitmap->width()); } else { bmp.copyTo(&tmpBitmap, kN32_SkColorType); // now bitmap points to our temp, which has been promoted to 32bits bitmap = &tmpBitmap; desc.fConfig = SkImageInfo2GrPixelConfig(bitmap->info()); } } else if (!bitmap->readyToDraw()) { // If the bitmap had compressed data and was then uncompressed, it'll still return // compressed data on 'refEncodedData' and upload it. Probably not good, since if // the bitmap has available pixels, then they might not be what the decompressed // data is. // Really?? We aren't doing this with YUV. GrTexture *texture = load_etc1_texture(ctx, *bitmap, desc); if (texture) { return texture; } } GrTexture *texture = create_texture_from_yuv(ctx, *bitmap, desc); if (texture) { return texture; } SkAutoLockPixels alp(*bitmap); if (!bitmap->readyToDraw()) { return nullptr; } return ctx->textureProvider()->createTexture(desc, true, bitmap->getPixels(), bitmap->rowBytes()); }
/* * We have a 5 ways to try to return a texture (in sorted order) * * 1. Check the cache for a pre-existing one * 2. Ask the generator to natively create one * 3. Ask the generator to return a compressed form that the GPU might support * 4. Ask the generator to return YUV planes, which the GPU can convert * 5. Ask the generator to return RGB(A) data, which the GPU can convert */ GrTexture* SkImageCacherator::lockTexture(GrContext* ctx, const GrUniqueKey& key, const SkImage* client, SkImage::CachingHint chint) { // 1. Check the cache for a pre-existing one if (key.isValid()) { if (GrTexture* tex = ctx->textureProvider()->findAndRefTextureByUniqueKey(key)) { return tex; } } // 2. Ask the generator to natively create one { ScopedGenerator generator(this); SkIRect subset = SkIRect::MakeXYWH(fOrigin.x(), fOrigin.y(), fInfo.width(), fInfo.height()); if (GrTexture* tex = generator->generateTexture(ctx, &subset)) { return set_key_and_return(tex, key); } } const GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(fInfo); // 3. Ask the generator to return a compressed form that the GPU might support SkAutoTUnref<SkData> data(this->refEncoded()); if (data) { GrTexture* tex = load_compressed_into_texture(ctx, data, desc); if (tex) { return set_key_and_return(tex, key); } } // 4. Ask the generator to return YUV planes, which the GPU can convert { ScopedGenerator generator(this); Generator_GrYUVProvider provider(generator); GrTexture* tex = provider.refAsTexture(ctx, desc, true); if (tex) { return set_key_and_return(tex, key); } } // 5. Ask the generator to return RGB(A) data, which the GPU can convert SkBitmap bitmap; if (this->tryLockAsBitmap(&bitmap, client, chint)) { GrTexture* tex = GrUploadBitmapToTexture(ctx, bitmap); if (tex) { return set_key_and_return(tex, key); } } return nullptr; }
/* * We have a 5 ways to try to return a texture (in sorted order) * * 1. Check the cache for a pre-existing one * 2. Ask the generator to natively create one * 3. Ask the generator to return a compressed form that the GPU might support * 4. Ask the generator to return YUV planes, which the GPU can convert * 5. Ask the generator to return RGB(A) data, which the GPU can convert */ GrTexture* SkImageCacherator::lockTexture(GrContext* ctx, const GrUniqueKey& key, const SkImage* client, SkImage::CachingHint chint) { // Values representing the various texture lock paths we can take. Used for logging the path // taken to a histogram. enum LockTexturePath { kFailure_LockTexturePath, kPreExisting_LockTexturePath, kNative_LockTexturePath, kCompressed_LockTexturePath, kYUV_LockTexturePath, kRGBA_LockTexturePath, }; enum { kLockTexturePathCount = kRGBA_LockTexturePath + 1 }; // 1. Check the cache for a pre-existing one if (key.isValid()) { if (GrTexture* tex = ctx->textureProvider()->findAndRefTextureByUniqueKey(key)) { SK_HISTOGRAM_ENUMERATION("LockTexturePath", kPreExisting_LockTexturePath, kLockTexturePathCount); return tex; } } // 2. Ask the generator to natively create one { ScopedGenerator generator(this); SkIRect subset = SkIRect::MakeXYWH(fOrigin.x(), fOrigin.y(), fInfo.width(), fInfo.height()); if (GrTexture* tex = generator->generateTexture(ctx, &subset)) { SK_HISTOGRAM_ENUMERATION("LockTexturePath", kNative_LockTexturePath, kLockTexturePathCount); return set_key_and_return(tex, key); } } const GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(fInfo); // 3. Ask the generator to return a compressed form that the GPU might support SkAutoTUnref<SkData> data(this->refEncoded(ctx)); if (data) { GrTexture* tex = load_compressed_into_texture(ctx, data, desc); if (tex) { SK_HISTOGRAM_ENUMERATION("LockTexturePath", kCompressed_LockTexturePath, kLockTexturePathCount); return set_key_and_return(tex, key); } } // 4. Ask the generator to return YUV planes, which the GPU can convert { ScopedGenerator generator(this); Generator_GrYUVProvider provider(generator); GrTexture* tex = provider.refAsTexture(ctx, desc, true); if (tex) { SK_HISTOGRAM_ENUMERATION("LockTexturePath", kYUV_LockTexturePath, kLockTexturePathCount); return set_key_and_return(tex, key); } } // 5. Ask the generator to return RGB(A) data, which the GPU can convert SkBitmap bitmap; if (this->tryLockAsBitmap(&bitmap, client, chint)) { GrTexture* tex = GrUploadBitmapToTexture(ctx, bitmap); if (tex) { SK_HISTOGRAM_ENUMERATION("LockTexturePath", kRGBA_LockTexturePath, kLockTexturePathCount); return set_key_and_return(tex, key); } } SK_HISTOGRAM_ENUMERATION("LockTexturePath", kFailure_LockTexturePath, kLockTexturePathCount); return nullptr; }
/* * We have 4 ways to try to return a texture (in sorted order) * * 1. Check the cache for a pre-existing one * 2. Ask the generator to natively create one * 3. Ask the generator to return YUV planes, which the GPU can convert * 4. Ask the generator to return RGB(A) data, which the GPU can convert */ sk_sp<GrTextureProxy> SkImage_Lazy::lockTextureProxy(GrContext* ctx, const GrUniqueKey& origKey, SkImage::CachingHint chint, bool willBeMipped, SkColorSpace* dstColorSpace, GrTextureMaker::AllowedTexGenType genType) { // Values representing the various texture lock paths we can take. Used for logging the path // taken to a histogram. enum LockTexturePath { kFailure_LockTexturePath, kPreExisting_LockTexturePath, kNative_LockTexturePath, kCompressed_LockTexturePath, // Deprecated kYUV_LockTexturePath, kRGBA_LockTexturePath, }; enum { kLockTexturePathCount = kRGBA_LockTexturePath + 1 }; // Determine which cached format we're going to use (which may involve decoding to a different // info than the generator provides). CachedFormat format = this->chooseCacheFormat(dstColorSpace, ctx->caps()); // Fold the cache format into our texture key GrUniqueKey key; this->makeCacheKeyFromOrigKey(origKey, format, &key); GrProxyProvider* proxyProvider = ctx->contextPriv().proxyProvider(); sk_sp<GrTextureProxy> proxy; // 1. Check the cache for a pre-existing one if (key.isValid()) { proxy = proxyProvider->findOrCreateProxyByUniqueKey(key, kTopLeft_GrSurfaceOrigin); if (proxy) { SK_HISTOGRAM_ENUMERATION("LockTexturePath", kPreExisting_LockTexturePath, kLockTexturePathCount); if (!willBeMipped || GrMipMapped::kYes == proxy->mipMapped()) { return proxy; } } } // The CachedFormat is both an index for which cache "slot" we'll use to store this particular // decoded variant of the encoded data, and also a recipe for how to transform the original // info to get the one that we're going to decode to. const SkImageInfo cacheInfo = this->buildCacheInfo(format); SkImageInfo genPixelsInfo = cacheInfo; SkTransferFunctionBehavior behavior = getGeneratorBehaviorAndInfo(&genPixelsInfo); // 2. Ask the generator to natively create one if (!proxy) { ScopedGenerator generator(fSharedGenerator); if (GrTextureMaker::AllowedTexGenType::kCheap == genType && SkImageGenerator::TexGenType::kCheap != generator->onCanGenerateTexture()) { return nullptr; } if ((proxy = generator->generateTexture(ctx, genPixelsInfo, fOrigin, behavior, willBeMipped))) { SK_HISTOGRAM_ENUMERATION("LockTexturePath", kNative_LockTexturePath, kLockTexturePathCount); set_key_on_proxy(proxyProvider, proxy.get(), nullptr, key); if (!willBeMipped || GrMipMapped::kYes == proxy->mipMapped()) { return proxy; } } } // 3. Ask the generator to return YUV planes, which the GPU can convert. If we will be mipping // the texture we fall through here and have the CPU generate the mip maps for us. if (!proxy && !willBeMipped && !ctx->contextPriv().disableGpuYUVConversion()) { const GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(cacheInfo, *ctx->caps()); ScopedGenerator generator(fSharedGenerator); Generator_GrYUVProvider provider(generator); // The pixels in the texture will be in the generator's color space. If onMakeColorSpace // has been called then this will not match this image's color space. To correct this, apply // a color space conversion from the generator's color space to this image's color space. const SkColorSpace* generatorColorSpace = fSharedGenerator->fGenerator->getInfo().colorSpace(); const SkColorSpace* thisColorSpace = fInfo.colorSpace(); // TODO: Update to create the mipped surface in the YUV generator and draw the base layer // directly into the mipped surface. proxy = provider.refAsTextureProxy(ctx, desc, generatorColorSpace, thisColorSpace); if (proxy) { SK_HISTOGRAM_ENUMERATION("LockTexturePath", kYUV_LockTexturePath, kLockTexturePathCount); set_key_on_proxy(proxyProvider, proxy.get(), nullptr, key); return proxy; } } // 4. Ask the generator to return RGB(A) data, which the GPU can convert SkBitmap bitmap; if (!proxy && this->lockAsBitmap(&bitmap, chint, format, genPixelsInfo, behavior)) { if (willBeMipped) { proxy = proxyProvider->createMipMapProxyFromBitmap(bitmap, dstColorSpace); } if (!proxy) { proxy = GrUploadBitmapToTextureProxy(proxyProvider, bitmap, dstColorSpace); } if (proxy && (!willBeMipped || GrMipMapped::kYes == proxy->mipMapped())) { SK_HISTOGRAM_ENUMERATION("LockTexturePath", kRGBA_LockTexturePath, kLockTexturePathCount); set_key_on_proxy(proxyProvider, proxy.get(), nullptr, key); return proxy; } } if (proxy) { // We need a mipped proxy, but we either found a proxy earlier that wasn't mipped, generated // a native non mipped proxy, or generated a non-mipped yuv proxy. Thus we generate a new // mipped surface and copy the original proxy into the base layer. We will then let the gpu // generate the rest of the mips. SkASSERT(willBeMipped); SkASSERT(GrMipMapped::kNo == proxy->mipMapped()); if (auto mippedProxy = GrCopyBaseMipMapToTextureProxy(ctx, proxy.get())) { set_key_on_proxy(proxyProvider, mippedProxy.get(), proxy.get(), key); return mippedProxy; } // We failed to make a mipped proxy with the base copied into it. This could have // been from failure to make the proxy or failure to do the copy. Thus we will fall // back to just using the non mipped proxy; See skbug.com/7094. return proxy; } SK_HISTOGRAM_ENUMERATION("LockTexturePath", kFailure_LockTexturePath, kLockTexturePathCount); return nullptr; }
SkSpecialSurface* onNewSurface(const SkImageInfo& info) const override { GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(info); desc.fFlags = kRenderTarget_GrSurfaceFlag; return SkSpecialSurface::NewRenderTarget(this->proxy(), fTexture->getContext(), desc); }