static void generate_bitmap_texture_desc(const SkBitmap& bitmap, GrSurfaceDesc* desc) { desc->fFlags = kNone_GrSurfaceFlags; desc->fWidth = bitmap.width(); desc->fHeight = bitmap.height(); desc->fConfig = SkImageInfo2GrPixelConfig(bitmap.info()); desc->fSampleCnt = 0; }
static void make_texture_desc(const SkImageInfo& info, GrSurfaceDesc* desc) { desc->fFlags = kNone_GrSurfaceFlags; desc->fWidth = info.width(); desc->fHeight = info.height(); desc->fConfig = SkImageInfo2GrPixelConfig(info); desc->fSampleCnt = 0; }
bool SkImage_Gpu::onReadPixels(const SkImageInfo& info, void* pixels, size_t rowBytes, int srcX, int srcY, CachingHint) const { GrPixelConfig config = SkImageInfo2GrPixelConfig(info, *fTexture->getContext()->caps()); uint32_t flags = 0; if (kUnpremul_SkAlphaType == info.alphaType() && kPremul_SkAlphaType == fAlphaType) { // let the GPU perform this transformation for us flags = GrContext::kUnpremul_PixelOpsFlag; } if (!fTexture->readPixels(srcX, srcY, info.width(), info.height(), config, pixels, rowBytes, flags)) { return false; } // do we have to manually fix-up the alpha channel? // src dst // unpremul premul fix manually // premul unpremul done by kUnpremul_PixelOpsFlag // all other combos need to change. // // Should this be handled by Ganesh? todo:? // if (kPremul_SkAlphaType == info.alphaType() && kUnpremul_SkAlphaType == fAlphaType) { apply_premul(info, pixels, rowBytes); } return true; }
void onDraw(SkCanvas* canvas) override { GrRenderTargetContext* renderTargetContext = canvas->internal_private_accessTopLayerRenderTargetContext(); if (!renderTargetContext) { skiagm::GM::DrawGpuOnlyMessage(canvas); return; } GrContext* context = canvas->getGrContext(); if (!context) { return; } GrProxyProvider* proxyProvider = context->contextPriv().proxyProvider(); sk_sp<GrTextureProxy> proxy[3]; for (int i = 0; i < 3; ++i) { int index = (0 == i) ? 0 : 1; GrSurfaceDesc desc; desc.fWidth = fBmp[index].width(); desc.fHeight = fBmp[index].height(); desc.fConfig = SkImageInfo2GrPixelConfig(fBmp[index].info(), *context->caps()); SkASSERT(kUnknown_GrPixelConfig != desc.fConfig); proxy[i] = proxyProvider->createTextureProxy( desc, SkBudgeted::kYes, fBmp[index].getPixels(), fBmp[index].rowBytes()); if (!proxy[i]) { return; } } constexpr SkScalar kDrawPad = 10.f; constexpr SkScalar kTestPad = 10.f; constexpr SkScalar kColorSpaceOffset = 36.f; SkISize sizes[3] = {{YSIZE, YSIZE}, {USIZE, USIZE}, {VSIZE, VSIZE}}; for (int space = kJPEG_SkYUVColorSpace; space <= kLastEnum_SkYUVColorSpace; ++space) { SkRect renderRect = SkRect::MakeWH(SkIntToScalar(fBmp[0].width()), SkIntToScalar(fBmp[0].height())); renderRect.outset(kDrawPad, kDrawPad); SkScalar y = kDrawPad + kTestPad + space * kColorSpaceOffset; SkScalar x = kDrawPad + kTestPad; GrPaint grPaint; grPaint.setXPFactory(GrPorterDuffXPFactory::Get(SkBlendMode::kSrc)); auto fp = GrYUVtoRGBEffect::Make(proxy[0], proxy[1], proxy[2], sizes, static_cast<SkYUVColorSpace>(space), true); if (fp) { SkMatrix viewMatrix; viewMatrix.setTranslate(x, y); grPaint.addColorFragmentProcessor(std::move(fp)); std::unique_ptr<GrDrawOp> op(GrRectOpFactory::MakeNonAAFill( std::move(grPaint), viewMatrix, renderRect, GrAAType::kNone)); renderTargetContext->priv().testingOnly_addDrawOp(std::move(op)); } } }
GrSurfaceDesc GrImageInfoToSurfaceDesc(const SkImageInfo& info) { GrSurfaceDesc desc; desc.fFlags = kNone_GrSurfaceFlags; desc.fWidth = info.width(); desc.fHeight = info.height(); desc.fConfig = SkImageInfo2GrPixelConfig(info); desc.fSampleCnt = 0; return desc; }
GrSurfaceDesc GrImageInfoToSurfaceDesc(const SkImageInfo& info, const GrCaps& caps) { GrSurfaceDesc desc; desc.fFlags = kNone_GrSurfaceFlags; desc.fOrigin = kTopLeft_GrSurfaceOrigin; desc.fWidth = info.width(); desc.fHeight = info.height(); desc.fConfig = SkImageInfo2GrPixelConfig(info, caps); desc.fSampleCnt = 1; return desc; }
static GrTexture* create_unstretched_bitmap_texture(GrContext* ctx, const SkBitmap& origBitmap, const GrUniqueKey& optionalKey) { if (origBitmap.width() < ctx->caps()->minTextureSize() || origBitmap.height() < ctx->caps()->minTextureSize()) { return nullptr; } SkBitmap tmpBitmap; const SkBitmap* bitmap = &origBitmap; GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(bitmap->info()); const GrCaps* caps = ctx->caps(); if (kIndex_8_SkColorType == bitmap->colorType()) { if (caps->isConfigTexturable(kIndex_8_GrPixelConfig)) { size_t imageSize = GrCompressedFormatDataSize(kIndex_8_GrPixelConfig, bitmap->width(), bitmap->height()); SkAutoMalloc storage(imageSize); build_index8_data(storage.get(), origBitmap); // our compressed data will be trimmed, so pass width() for its // "rowBytes", since they are the same now. return GrCreateTextureForPixels(ctx, optionalKey, desc, origBitmap.pixelRef(), storage.get(), bitmap->width()); } else { origBitmap.copyTo(&tmpBitmap, kN32_SkColorType); // now bitmap points to our temp, which has been promoted to 32bits bitmap = &tmpBitmap; desc.fConfig = SkImageInfo2GrPixelConfig(bitmap->info()); } } else if (!bitmap->readyToDraw()) { // If the bitmap had compressed data and was then uncompressed, it'll still return // compressed data on 'refEncodedData' and upload it. Probably not good, since if // the bitmap has available pixels, then they might not be what the decompressed // data is. GrTexture *texture = load_etc1_texture(ctx, optionalKey, *bitmap, desc); if (texture) { return texture; } } GrTexture *texture = load_yuv_texture(ctx, optionalKey, *bitmap, desc); if (texture) { return texture; } SkAutoLockPixels alp(*bitmap); if (!bitmap->readyToDraw()) { return nullptr; } return GrCreateTextureForPixels(ctx, optionalKey, desc, origBitmap.pixelRef(), bitmap->getPixels(), bitmap->rowBytes()); }
sk_sp<SkSpecialSurface> onMakeSurface(const SkImageInfo& info) const override { if (!fTexture->getContext()) { return nullptr; } GrPixelConfig config = SkImageInfo2GrPixelConfig(info, *fTexture->getContext()->caps()); return SkSpecialSurface::MakeRenderTarget(fTexture->getContext(), info.width(), info.height(), config, sk_ref_sp(info.colorSpace())); }
GrTexture* GrUploadBitmapToTexture(GrContext* ctx, const SkBitmap& bmp) { SkASSERT(!bmp.getTexture()); SkBitmap tmpBitmap; const SkBitmap* bitmap = &bmp; GrSurfaceDesc desc = GrImageInfoToSurfaceDesc(bitmap->info()); const GrCaps* caps = ctx->caps(); if (kIndex_8_SkColorType == bitmap->colorType()) { if (caps->isConfigTexturable(kIndex_8_GrPixelConfig)) { size_t imageSize = GrCompressedFormatDataSize(kIndex_8_GrPixelConfig, bitmap->width(), bitmap->height()); SkAutoMalloc storage(imageSize); build_index8_data(storage.get(), bmp); // our compressed data will be trimmed, so pass width() for its // "rowBytes", since they are the same now. return ctx->textureProvider()->createTexture(desc, true, storage.get(), bitmap->width()); } else { bmp.copyTo(&tmpBitmap, kN32_SkColorType); // now bitmap points to our temp, which has been promoted to 32bits bitmap = &tmpBitmap; desc.fConfig = SkImageInfo2GrPixelConfig(bitmap->info()); } } else if (!bitmap->readyToDraw()) { // If the bitmap had compressed data and was then uncompressed, it'll still return // compressed data on 'refEncodedData' and upload it. Probably not good, since if // the bitmap has available pixels, then they might not be what the decompressed // data is. // Really?? We aren't doing this with YUV. GrTexture *texture = load_etc1_texture(ctx, *bitmap, desc); if (texture) { return texture; } } GrTexture *texture = create_texture_from_yuv(ctx, *bitmap, desc); if (texture) { return texture; } SkAutoLockPixels alp(*bitmap); if (!bitmap->readyToDraw()) { return nullptr; } return ctx->textureProvider()->createTexture(desc, true, bitmap->getPixels(), bitmap->rowBytes()); }
bool SkImageCacherator::lockAsBitmap(SkBitmap* bitmap, const SkImage* client, SkImage::CachingHint chint) { if (this->tryLockAsBitmap(bitmap, client, chint)) { return check_output_bitmap(*bitmap, fUniqueID); } #if SK_SUPPORT_GPU // Try to get a texture and read it back to raster (and then cache that with our ID) SkAutoTUnref<GrTexture> tex; { ScopedGenerator generator(this); SkIRect subset = SkIRect::MakeXYWH(fOrigin.x(), fOrigin.y(), fInfo.width(), fInfo.height()); tex.reset(generator->generateTexture(nullptr, &subset)); } if (!tex) { bitmap->reset(); return false; } if (!bitmap->tryAllocPixels(fInfo)) { bitmap->reset(); return false; } const uint32_t pixelOpsFlags = 0; if (!tex->readPixels(0, 0, bitmap->width(), bitmap->height(), SkImageInfo2GrPixelConfig(fInfo), bitmap->getPixels(), bitmap->rowBytes(), pixelOpsFlags)) { bitmap->reset(); return false; } bitmap->pixelRef()->setImmutableWithID(fUniqueID); if (SkImage::kAllow_CachingHint == chint) { SkBitmapCache::Add(fUniqueID, *bitmap); if (client) { as_IB(client)->notifyAddedToCache(); } } return check_output_bitmap(*bitmap, fUniqueID); #else return false; #endif }
static SkGrPixelRef* copy_to_new_texture_pixelref(GrTexture* texture, SkColorType dstCT, SkColorProfileType dstPT, const SkIRect* subset) { if (NULL == texture || kUnknown_SkColorType == dstCT) { return NULL; } GrContext* context = texture->getContext(); if (NULL == context) { return NULL; } GrSurfaceDesc desc; SkIRect srcRect; if (!subset) { desc.fWidth = texture->width(); desc.fHeight = texture->height(); srcRect = SkIRect::MakeWH(texture->width(), texture->height()); } else { SkASSERT(SkIRect::MakeWH(texture->width(), texture->height()).contains(*subset)); // Create a new texture that is the size of subset. desc.fWidth = subset->width(); desc.fHeight = subset->height(); srcRect = *subset; } desc.fFlags = kRenderTarget_GrSurfaceFlag; desc.fConfig = SkImageInfo2GrPixelConfig(dstCT, kPremul_SkAlphaType, dstPT); GrTexture* dst = context->createTexture(desc, false, NULL, 0); if (NULL == dst) { return NULL; } // Blink is relying on the above copy being sent to GL immediately in the case when the source // is a WebGL canvas backing store. We could have a TODO to remove this flush flag, but we have // a larger TODO to remove SkGrPixelRef entirely. context->copySurface(dst->asRenderTarget(), texture, srcRect, SkIPoint::Make(0,0), GrContext::kFlushWrites_PixelOp); SkImageInfo info = SkImageInfo::Make(desc.fWidth, desc.fHeight, dstCT, kPremul_SkAlphaType, dstPT); SkGrPixelRef* pixelRef = SkNEW_ARGS(SkGrPixelRef, (info, dst)); SkSafeUnref(dst); return pixelRef; }
bool SkImageCacherator::lockAsBitmap(SkBitmap* bitmap) { const uint32_t uniqueID = fGenerator->uniqueID(); if (this->tryLockAsBitmap(bitmap)) { return check_output_bitmap(*bitmap, uniqueID); } #if SK_SUPPORT_GPU // Try to get a texture and read it back to raster (and then cache that with our ID) SkAutoTUnref<GrTexture> tex(fGenerator->generateTexture(nullptr, kUntiled_SkImageUsageType)); if (!tex) { bitmap->reset(); return false; } const SkImageInfo& info = this->info(); if (!bitmap->tryAllocPixels(info)) { bitmap->reset(); return false; } const uint32_t pixelOpsFlags = 0; if (!tex->readPixels(0, 0, bitmap->width(), bitmap->height(), SkImageInfo2GrPixelConfig(info), bitmap->getPixels(), bitmap->rowBytes(), pixelOpsFlags)) { bitmap->reset(); return false; } bitmap->pixelRef()->setImmutableWithID(uniqueID); SkBitmapCache::Add(uniqueID, *bitmap); return check_output_bitmap(*bitmap, uniqueID); #else return false; #endif }
void HelloWorldWindow::draw(SkCanvas* canvas) { drawContents(canvas); // in case we have queued drawing calls fContext->flush(); // Invalidate the window to force a redraw. Poor man's animation mechanism. this->inval(NULL); if (kRaster_DeviceType == fType) { // need to send the raster bits to the (gpu) window SkImage* snap = fSurface->newImageSnapshot(); size_t rowBytes; SkImageInfo info; const void* pixels = snap->peekPixels(&info, &rowBytes); fRenderTarget->writePixels(0, 0, snap->width(), snap->height(), SkImageInfo2GrPixelConfig(info.colorType(), info.alphaType(), info.profileType()), pixels, rowBytes, GrContext::kFlushWrites_PixelOp); SkSafeUnref(snap); } INHERITED::present(); }
int GrTextureStripAtlas::lockRow(const SkBitmap& data) { VALIDATE; if (0 == fLockedRows) { this->lockTexture(); if (!fTexture) { return -1; } } int key = data.getGenerationID(); int rowNumber = -1; int index = this->searchByKey(key); if (index >= 0) { // We already have the data in a row, so we can just return that row AtlasRow* row = fKeyTable[index]; if (0 == row->fLocks) { this->removeFromLRU(row); } ++row->fLocks; ++fLockedRows; // Since all the rows are always stored in a contiguous array, we can save the memory // required for storing row numbers and just compute it with some pointer arithmetic rowNumber = static_cast<int>(row - fRows); } else { // ~index is the index where we will insert the new key to keep things sorted index = ~index; // We don't have this data cached, so pick the least recently used row to copy into AtlasRow* row = this->getLRU(); ++fLockedRows; if (nullptr == row) { // force a flush, which should unlock all the rows; then try again fDesc.fContext->flush(); row = this->getLRU(); if (nullptr == row) { --fLockedRows; return -1; } } this->removeFromLRU(row); uint32_t oldKey = row->fKey; // If we are writing into a row that already held bitmap data, we need to remove the // reference to that genID which is stored in our sorted table of key values. if (oldKey != kEmptyAtlasRowKey) { // Find the entry in the list; if it's before the index where we plan on adding the new // entry, we decrement since it will shift elements ahead of it back by one. int oldIndex = this->searchByKey(oldKey); if (oldIndex < index) { --index; } fKeyTable.remove(oldIndex); } row->fKey = key; row->fLocks = 1; fKeyTable.insert(index, 1, &row); rowNumber = static_cast<int>(row - fRows); SkAutoLockPixels lock(data); // Pass in the kDontFlush flag, since we know we're writing to a part of this texture // that is not currently in use fTexture->writePixels(0, rowNumber * fDesc.fRowHeight, fDesc.fWidth, fDesc.fRowHeight, SkImageInfo2GrPixelConfig(data.info(), *this->getContext()->caps()), data.getPixels(), data.rowBytes(), GrContext::kDontFlush_PixelOpsFlag); } SkASSERT(rowNumber >= 0); VALIDATE; return rowNumber; }
static GrTexture* create_unstretched_bitmap_texture(GrContext* ctx, const SkBitmap& origBitmap, const GrUniqueKey& optionalKey) { if (origBitmap.width() < ctx->caps()->minTextureSize() || origBitmap.height() < ctx->caps()->minTextureSize()) { return NULL; } SkBitmap tmpBitmap; const SkBitmap* bitmap = &origBitmap; GrSurfaceDesc desc; generate_bitmap_texture_desc(*bitmap, &desc); const GrCaps* caps = ctx->caps(); if (kIndex_8_SkColorType == bitmap->colorType()) { if (caps->isConfigTexturable(kIndex_8_GrPixelConfig)) { size_t imageSize = GrCompressedFormatDataSize(kIndex_8_GrPixelConfig, bitmap->width(), bitmap->height()); SkAutoMalloc storage(imageSize); build_index8_data(storage.get(), origBitmap); // our compressed data will be trimmed, so pass width() for its // "rowBytes", since they are the same now. return create_texture_for_bmp(ctx, optionalKey, desc, origBitmap.pixelRef(), storage.get(), bitmap->width()); } else { origBitmap.copyTo(&tmpBitmap, kN32_SkColorType); // now bitmap points to our temp, which has been promoted to 32bits bitmap = &tmpBitmap; desc.fConfig = SkImageInfo2GrPixelConfig(bitmap->info()); } } // Is this an ETC1 encoded texture? #ifndef SK_IGNORE_ETC1_SUPPORT // Make sure that the underlying device supports ETC1 textures before we go ahead // and check the data. else if (caps->isConfigTexturable(kETC1_GrPixelConfig) // If the bitmap had compressed data and was then uncompressed, it'll still return // compressed data on 'refEncodedData' and upload it. Probably not good, since if // the bitmap has available pixels, then they might not be what the decompressed // data is. && !(bitmap->readyToDraw())) { GrTexture *texture = load_etc1_texture(ctx, optionalKey, *bitmap, desc); if (texture) { return texture; } } #endif // SK_IGNORE_ETC1_SUPPORT GrTexture *texture = load_yuv_texture(ctx, optionalKey, *bitmap, desc); if (texture) { return texture; } SkAutoLockPixels alp(*bitmap); if (!bitmap->readyToDraw()) { return NULL; } return create_texture_for_bmp(ctx, optionalKey, desc, origBitmap.pixelRef(), bitmap->getPixels(), bitmap->rowBytes()); }
GrPixelConfig SkImageInfo2GrPixelConfig(const SkImageInfo& info, const GrCaps& caps) { return SkImageInfo2GrPixelConfig(info.colorType(), info.colorSpace(), caps); }