bool SkBitmapProcShader::toDumpString(SkString* str) const { str->printf("BitmapShader: [%d %d %d", fRawBitmap.width(), fRawBitmap.height(), fRawBitmap.bytesPerPixel()); // add the pixelref SkPixelRef* pr = fRawBitmap.pixelRef(); if (pr) { const char* uri = pr->getURI(); if (uri) { str->appendf(" \"%s\"", uri); } } // add the (optional) matrix { SkMatrix m; if (this->getLocalMatrix(&m)) { SkString info; m.toDumpString(&info); str->appendf(" %s", info.c_str()); } } str->appendf(" [%s %s]]", gTileModeName[fState.fTileModeX], gTileModeName[fState.fTileModeY]); return true; }
sk_sp<SkImage> SkImage::MakeFromBitmap(const SkBitmap& bm) { SkPixelRef* pr = bm.pixelRef(); if (nullptr == pr) { return nullptr; } #if SK_SUPPORT_GPU if (GrTexture* tex = pr->getTexture()) { SkAutoTUnref<GrTexture> unrefCopy; if (!bm.isImmutable()) { tex = GrDeepCopyTexture(tex, SkBudgeted::kNo); if (nullptr == tex) { return nullptr; } unrefCopy.reset(tex); } const SkImageInfo info = bm.info(); return sk_make_sp<SkImage_Gpu>(info.width(), info.height(), bm.getGenerationID(), info.alphaType(), tex, sk_ref_sp(info.colorSpace()), SkBudgeted::kNo); } #endif // This will check for immutable (share or copy) return SkMakeImageFromRasterBitmap(bm); }
static SkData* encode_to_dct_data(size_t* pixelRefOffset, const SkBitmap& bitmap) { if (gJpegQuality == -1) { return NULL; } SkBitmap bm = bitmap; #if defined(SK_BUILD_FOR_MAC) // Workaround bug #1043 where bitmaps with referenced pixels cause // CGImageDestinationFinalize to crash SkBitmap copy; bitmap.deepCopyTo(©, bitmap.config()); bm = copy; #endif SkPixelRef* pr = bm.pixelRef(); if (pr != NULL) { SkData* data = pr->refEncodedData(); if (data != NULL) { *pixelRefOffset = bm.pixelRefOffset(); return data; } } *pixelRefOffset = 0; return SkImageEncoder::EncodeData(bm, SkImageEncoder::kJPEG_Type, gJpegQuality); }
bool SkBitmapProcState::lockBaseBitmap() { SkPixelRef* pr = fOrigBitmap.pixelRef(); if (pr->isLocked() || !pr->implementsDecodeInto()) { // fast-case, no need to look in our cache fScaledBitmap = fOrigBitmap; fScaledBitmap.lockPixels(); if (NULL == fScaledBitmap.getPixels()) { return false; } } else { if (!SkBitmapCache::Find(fOrigBitmap, 1, 1, &fScaledBitmap)) { if (!get_locked_pixels(fOrigBitmap, 0, &fScaledBitmap)) { return false; } // TODO: if fScaled comes back at a different width/height than fOrig, // we need to update the matrix we are using to sample from this guy. SkBitmapCache::Add(fOrigBitmap, 1, 1, fScaledBitmap); } } fBitmap = &fScaledBitmap; return true; }
SkImage* SkImage::NewFromBitmap(const SkBitmap& bm) { SkPixelRef* pr = bm.pixelRef(); if (nullptr == pr) { return nullptr; } #if SK_SUPPORT_GPU if (GrTexture* tex = pr->getTexture()) { SkAutoTUnref<GrTexture> unrefCopy; if (!bm.isImmutable()) { const bool notBudgeted = false; tex = GrDeepCopyTexture(tex, notBudgeted); if (nullptr == tex) { return nullptr; } unrefCopy.reset(tex); } const SkImageInfo info = bm.info(); return new SkImage_Gpu(info.width(), info.height(), bm.getGenerationID(), info.alphaType(), tex, 0, SkSurface::kNo_Budgeted); } #endif // This will check for immutable (share or copy) return SkNewImageFromRasterBitmap(bm, nullptr); }
void reportMemoryUsage(const SkBitmap* const& image, WTF::MemoryObjectInfo* memoryObjectInfo) { WTF::MemoryClassInfo info(memoryObjectInfo, image); memoryObjectInfo->setClassName("SkBitmap"); SkPixelRef* pixelRef = image->pixelRef(); info.addMember(pixelRef, "pixelRef"); if (pixelRef) info.addRawBuffer(pixelRef->pixels(), image->getSize(), "Pixels", "pixelRef"); }
bool SkBitmapProcState::lockBaseBitmap() { AutoScaledCacheUnlocker unlocker(&fScaledCacheID); SkPixelRef* pr = fOrigBitmap.pixelRef(); SkASSERT(NULL == fScaledCacheID); if (pr->isLocked() || !pr->implementsDecodeInto()) { // fast-case, no need to look in our cache fScaledBitmap = fOrigBitmap; fScaledBitmap.lockPixels(); if (NULL == fScaledBitmap.getPixels()) { return false; } } else { fScaledCacheID = SkScaledImageCache::FindAndLock(fOrigBitmap, SK_Scalar1, SK_Scalar1, &fScaledBitmap); if (fScaledCacheID) { fScaledBitmap.lockPixels(); if (!fScaledBitmap.getPixels()) { fScaledBitmap.unlockPixels(); // found a purged entry (discardablememory?), release it SkScaledImageCache::Unlock(fScaledCacheID); fScaledCacheID = NULL; // fall through to rebuild } } if (NULL == fScaledCacheID) { if (!get_locked_pixels(fOrigBitmap, 0, &fScaledBitmap)) { return false; } // TODO: if fScaled comes back at a different width/height than fOrig, // we need to update the matrix we are using to sample from this guy. fScaledCacheID = SkScaledImageCache::AddAndLock(fOrigBitmap, SK_Scalar1, SK_Scalar1, fScaledBitmap); if (!fScaledCacheID) { fScaledBitmap.reset(); return false; } } } fBitmap = &fScaledBitmap; unlocker.release(); return true; }
static GrTexture* create_texture_from_yuv(GrContext* ctx, const SkBitmap& bm, const GrSurfaceDesc& desc) { // Subsets are not supported, the whole pixelRef is loaded when using YUV decoding SkPixelRef* pixelRef = bm.pixelRef(); if ((nullptr == pixelRef) || (pixelRef->info().width() != bm.info().width()) || (pixelRef->info().height() != bm.info().height())) { return nullptr; } PixelRef_GrYUVProvider provider(pixelRef); return provider.refAsTexture(ctx, desc, !bm.isVolatile()); }
static bool get_locked_pixels(const SkBitmap& src, int pow2, SkBitmap* dst) { SkPixelRef* pr = src.pixelRef(); if (pr && pr->decodeInto(pow2, dst)) { return true; } /* * If decodeInto() fails, it is possibe that we have an old subclass that * does not, or cannot, implement that. In that case we fall back to the * older protocol of having the pixelRef handle the caching for us. */ *dst = src; dst->lockPixels(); return SkToBool(dst->getPixels()); }
void SkBinaryWriteBuffer::writeBitmap(const SkBitmap& bitmap) { // Record the width and height. This way if readBitmap fails a dummy bitmap can be drawn at the // right size. this->writeInt(bitmap.width()); this->writeInt(bitmap.height()); // Record information about the bitmap in one of two ways, in order of priority: // 1. If there is a function for encoding bitmaps, use it to write an encoded version of the // bitmap. After writing a boolean value of false, signifying that a heap was not used, write // the size of the encoded data. A non-zero size signifies that encoded data was written. // 2. Call SkBitmap::flatten. After writing a boolean value of false, signifying that a heap was // not used, write a zero to signify that the data was not encoded. // Write a bool to indicate that we did not use an SkBitmapHeap. That feature is deprecated. this->writeBool(false); SkPixelRef* pixelRef = bitmap.pixelRef(); if (pixelRef) { // see if the pixelref already has an encoded version SkAutoDataUnref existingData(pixelRef->refEncodedData()); if (existingData.get() != nullptr) { // Assumes that if the client did not set a serializer, they are // happy to get the encoded data. if (!fPixelSerializer || fPixelSerializer->useEncodedData(existingData->data(), existingData->size())) { write_encoded_bitmap(this, existingData, bitmap.pixelRefOrigin()); return; } } // see if the caller wants to manually encode SkAutoPixmapUnlock result; if (fPixelSerializer && bitmap.requestLock(&result)) { SkAutoDataUnref data(fPixelSerializer->encode(result.pixmap())); if (data.get() != nullptr) { // if we have to "encode" the bitmap, then we assume there is no // offset to share, since we are effectively creating a new pixelref write_encoded_bitmap(this, data, SkIPoint::Make(0, 0)); return; } } } this->writeUInt(0); // signal raw pixels SkBitmap::WriteRawPixels(this, bitmap); }
static void toString(const SkBitmap& bm, SkString* str) { str->printf("bitmap:[%d %d] %s", bm.width(), bm.height(), toString(bm.config())); SkPixelRef* pr = bm.pixelRef(); if (NULL == pr) { // show null or the explicit pixel address (rare) str->appendf(" pixels:%p", bm.getPixels()); } else { const char* uri = pr->getURI(); if (uri) { str->appendf(" uri:\"%s\"", uri); } else { str->appendf(" pixelref:%p", pr); } } }
static void Bitmap_reconfigure(JNIEnv* env, jobject clazz, jlong bitmapHandle, jint width, jint height, jint configHandle, jint allocSize, jboolean requestPremul) { SkBitmap* bitmap = reinterpret_cast<SkBitmap*>(bitmapHandle); SkColorType colorType = GraphicsJNI::legacyBitmapConfigToColorType(configHandle); // ARGB_4444 is a deprecated format, convert automatically to 8888 if (colorType == kARGB_4444_SkColorType) { colorType = kN32_SkColorType; } if (width * height * SkColorTypeBytesPerPixel(colorType) > allocSize) { // done in native as there's no way to get BytesPerPixel in Java doThrowIAE(env, "Bitmap not large enough to support new configuration"); return; } SkPixelRef* ref = bitmap->pixelRef(); ref->ref(); SkAlphaType alphaType; if (bitmap->colorType() != kRGB_565_SkColorType && bitmap->alphaType() == kOpaque_SkAlphaType) { // If the original bitmap was set to opaque, keep that setting, unless it // was 565, which is required to be opaque. alphaType = kOpaque_SkAlphaType; } else { // Otherwise respect the premultiplied request. alphaType = requestPremul ? kPremul_SkAlphaType : kUnpremul_SkAlphaType; } bitmap->setInfo(SkImageInfo::Make(width, height, colorType, alphaType)); // FIXME: Skia thinks of an SkPixelRef as having a constant SkImageInfo (except for // its alphatype), so it would make more sense from Skia's perspective to create a // new SkPixelRef. That said, libhwui uses the pointer to the SkPixelRef as a key // for its cache, so it won't realize this is the same Java Bitmap. SkImageInfo& info = const_cast<SkImageInfo&>(ref->info()); // Use the updated from the SkBitmap, which may have corrected an invalid alphatype. // (e.g. 565 non-opaque) info = bitmap->info(); bitmap->setPixelRef(ref); // notifyPixelsChanged will increment the generation ID even though the actual pixel data // hasn't been touched. This signals the renderer that the bitmap (including width, height, // colortype and alphatype) has changed. ref->notifyPixelsChanged(); ref->unref(); }
static void jni_eglCreatePixmapSurface(JNIEnv *_env, jobject _this, jobject out_sur, jobject display, jobject config, jobject native_pixmap, jintArray attrib_list) { if (display == NULL || config == NULL || native_pixmap == NULL || !validAttribList(_env, attrib_list)) { jniThrowException(_env, "java/lang/IllegalArgumentException", NULL); return; } EGLDisplay dpy = getDisplay(_env, display); EGLConfig cnf = getConfig(_env, config); jint* base = 0; SkBitmap const * nativeBitmap = (SkBitmap const *)_env->GetIntField(native_pixmap, gBitmap_NativeBitmapFieldID); SkPixelRef* ref = nativeBitmap ? nativeBitmap->pixelRef() : 0; if (ref == NULL) { jniThrowException(_env, "java/lang/IllegalArgumentException", "Bitmap has no PixelRef"); return; } SkSafeRef(ref); ref->lockPixels(); egl_native_pixmap_t pixmap; pixmap.version = sizeof(pixmap); pixmap.width = nativeBitmap->width(); pixmap.height = nativeBitmap->height(); pixmap.stride = nativeBitmap->rowBytes() / nativeBitmap->bytesPerPixel(); pixmap.format = convertPixelFormat(nativeBitmap->config()); pixmap.data = (uint8_t*)ref->pixels(); base = beginNativeAttribList(_env, attrib_list); EGLSurface sur = eglCreatePixmapSurface(dpy, cnf, &pixmap, base); endNativeAttributeList(_env, attrib_list, base); if (sur != EGL_NO_SURFACE) { _env->SetIntField(out_sur, gSurface_EGLSurfaceFieldID, (int)sur); _env->SetIntField(out_sur, gSurface_NativePixelRefFieldID, (int)ref); } else { ref->unlockPixels(); SkSafeUnref(ref); } }
sk_sp<SkPixelRef> SkMallocPixelRef::MakeWithData(const SkImageInfo& info, size_t rowBytes, sk_sp<SkColorTable> ctable, sk_sp<SkData> data) { SkASSERT(data != nullptr); if (!is_valid(info, ctable.get())) { return nullptr; } if ((rowBytes < info.minRowBytes()) || (data->size() < info.getSafeSize(rowBytes))) { return nullptr; } // must get this address before we call release void* pixels = const_cast<void*>(data->data()); SkPixelRef* pr = new SkMallocPixelRef(info, pixels, rowBytes, std::move(ctable), sk_data_releaseproc, data.release()); pr->setImmutable(); // since we were created with (immutable) data return sk_sp<SkPixelRef>(pr); }
static jboolean jni_eglDestroySurface(JNIEnv *_env, jobject _this, jobject display, jobject surface) { if (display == NULL || surface == NULL) { jniThrowException(_env, "java/lang/IllegalArgumentException", NULL); return JNI_FALSE; } EGLDisplay dpy = getDisplay(_env, display); EGLSurface sur = getSurface(_env, surface); if (sur) { SkPixelRef* ref = (SkPixelRef*)(_env->GetIntField(surface, gSurface_NativePixelRefFieldID)); if (ref) { ref->unlockPixels(); SkSafeUnref(ref); } } return eglDestroySurface(dpy, sur); }
static void Bitmap_reconfigure(JNIEnv* env, jobject clazz, jlong bitmapHandle, jint width, jint height, jint configHandle, jint allocSize) { SkBitmap* bitmap = reinterpret_cast<SkBitmap*>(bitmapHandle); SkBitmap::Config config = static_cast<SkBitmap::Config>(configHandle); if (width * height * SkBitmap::ComputeBytesPerPixel(config) > allocSize) { // done in native as there's no way to get BytesPerPixel in Java doThrowIAE(env, "Bitmap not large enough to support new configuration"); return; } SkPixelRef* ref = bitmap->pixelRef(); SkSafeRef(ref); bitmap->setConfig(config, width, height); bitmap->setPixelRef(ref); // notifyPixelsChanged will increment the generation ID even though the actual pixel data // hasn't been touched. This signals the renderer that the bitmap (including width, height, // and config) has changed. ref->notifyPixelsChanged(); SkSafeUnref(ref); }
SkBitmapRef* ImageSource::createFrameAtIndex(size_t index) { #ifdef ANDROID_ANIMATED_GIF if (m_decoder.m_gifDecoder) { ImageFrame* buffer = m_decoder.m_gifDecoder->frameBufferAtIndex(index); if (!buffer || buffer->status() == ImageFrame::FrameEmpty) return 0; SkBitmap& bitmap = buffer->bitmap(); SkPixelRef* pixelRef = bitmap.pixelRef(); if (pixelRef) pixelRef->setURI(m_decoder.m_url); return new SkBitmapRef(bitmap); } #else SkASSERT(index == 0); #endif SkASSERT(m_decoder.m_image != NULL); m_decoder.m_image->ref(); return m_decoder.m_image; }
void AssetAtlas::createEntries(Caches& caches, int64_t* map, int count) { const float width = float(mTexture->width()); const float height = float(mTexture->height()); for (int i = 0; i < count; ) { SkPixelRef* pixelRef = reinterpret_cast<SkPixelRef*>(map[i++]); // NOTE: We're converting from 64 bit signed values to 32 bit // signed values. This is guaranteed to be safe because the "x" // and "y" coordinate values are guaranteed to be representable // with 32 bits. The array is 64 bits wide so that it can carry // pointers on 64 bit architectures. const int x = static_cast<int>(map[i++]); const int y = static_cast<int>(map[i++]); // Bitmaps should never be null, we're just extra paranoid if (!pixelRef) continue; const UvMapper mapper( x / width, (x + pixelRef->info().width()) / width, y / height, (y + pixelRef->info().height()) / height); Texture* texture = new DelegateTexture(caches, mTexture); texture->blend = !SkAlphaTypeIsOpaque(pixelRef->info().alphaType()); texture->wrap(mTexture->id(), pixelRef->info().width(), pixelRef->info().height(), mTexture->format()); std::unique_ptr<Entry> entry(new Entry(pixelRef, texture, mapper, *this)); texture->uvMapper = &entry->uvMapper; mEntries.emplace(entry->pixelRef, std::move(entry)); } }
void SkPixelRef::cloneGenID(const SkPixelRef& that) { // This is subtle. We must call that.getGenerationID() to make sure its genID isn't 0. uint32_t genID = that.getGenerationID(); // Neither ID is unique any more. // (These & ~1u are actually redundant. that.getGenerationID() just did it for us.) this->fTaggedGenID.store(genID & ~1u); that. fTaggedGenID.store(genID & ~1u); // This method isn't threadsafe, so these asserts should be fine. SkASSERT(!this->genIDIsUnique()); SkASSERT(!that. genIDIsUnique()); }
static GrTexture* load_yuv_texture(GrContext* ctx, const GrUniqueKey& optionalKey, const SkBitmap& bm, const GrSurfaceDesc& desc) { // Subsets are not supported, the whole pixelRef is loaded when using YUV decoding SkPixelRef* pixelRef = bm.pixelRef(); if ((nullptr == pixelRef) || (pixelRef->info().width() != bm.info().width()) || (pixelRef->info().height() != bm.info().height())) { return nullptr; } const bool useCache = optionalKey.isValid(); PixelRef_GrYUVProvider provider(pixelRef); GrTexture* texture = provider.refAsTexture(ctx, desc, useCache); if (!texture) { return nullptr; } if (useCache) { BitmapInvalidator* listener = new BitmapInvalidator(optionalKey); pixelRef->addGenIDChangeListener(listener); ctx->textureProvider()->assignUniqueKeyToTexture(optionalKey, texture); } return texture; }
bool onGetYUVPlanes(SkISize sizes[3], void* planes[3], size_t rowBytes[3], SkYUVColorSpace* space) override { return fPR->getYUV8Planes(sizes, planes, rowBytes, space); }
void ImageSource::setData(SharedBuffer* data, bool allDataReceived) { #ifdef ANDROID_ANIMATED_GIF // This is only necessary if we allow ourselves to partially decode GIF if (m_decoder.m_gifDecoder && !m_decoder.m_gifDecoder->failed()) { m_decoder.m_gifDecoder->setData(data, allDataReceived); return; } #endif if (NULL == m_decoder.m_image #ifdef ANDROID_ANIMATED_GIF && !m_decoder.m_gifDecoder #endif ) { SkBitmap tmp; SkMemoryStream stream(data->data(), data->size(), false); SkImageDecoder* codec = SkImageDecoder::Factory(&stream); SkAutoTDelete<SkImageDecoder> ad(codec); if (!codec || !codec->decode(&stream, &tmp, SkBitmap::kNo_Config, SkImageDecoder::kDecodeBounds_Mode)) { return; } int origW = tmp.width(); int origH = tmp.height(); #ifdef ANDROID_ANIMATED_GIF // First, check to see if this is an animated GIF const Vector<char>& buffer = data->buffer(); const char* contents = buffer.data(); if (buffer.size() > 3 && strncmp(contents, "GIF8", 4) == 0 && should_use_animated_gif(origW, origH)) { // This means we are looking at a GIF, so create special // GIF Decoder // Need to wait for all data received if we are assigning an // allocator (which we are not at the moment). if (!m_decoder.m_gifDecoder /*&& allDataReceived*/) m_decoder.m_gifDecoder = new GIFImageDecoder(); if (!m_decoder.m_gifDecoder->failed()) m_decoder.m_gifDecoder->setData(data, allDataReceived); return; } #endif int sampleSize = computeSampleSize(tmp); if (sampleSize > 1) { codec->setSampleSize(sampleSize); stream.rewind(); if (!codec->decode(&stream, &tmp, SkBitmap::kNo_Config, SkImageDecoder::kDecodeBounds_Mode)) { return; } } m_decoder.m_image = new PrivateAndroidImageSourceRec(tmp, origW, origH, sampleSize); // SkDebugf("----- started: [%d %d] %s\n", origW, origH, m_decoder.m_url.c_str()); } PrivateAndroidImageSourceRec* decoder = m_decoder.m_image; if (allDataReceived && !decoder->fAllDataReceived) { decoder->fAllDataReceived = true; SkBitmap* bm = &decoder->bitmap(); SkPixelRef* ref = convertToRLE(bm, data->data(), data->size()); if (ref) { bm->setPixelRef(ref)->unref(); } else { BitmapAllocatorAndroid alloc(data, decoder->fSampleSize); if (!alloc.allocPixelRef(bm, NULL)) { return; } ref = bm->pixelRef(); } // we promise to never change the pixels (makes picture recording fast) ref->setImmutable(); // give it the URL if we have one ref->setURI(m_decoder.m_url); } }
static GrTexture* load_yuv_texture(GrContext* ctx, const GrUniqueKey& optionalKey, const SkBitmap& bm, const GrSurfaceDesc& desc) { // Subsets are not supported, the whole pixelRef is loaded when using YUV decoding SkPixelRef* pixelRef = bm.pixelRef(); if ((NULL == pixelRef) || (pixelRef->info().width() != bm.info().width()) || (pixelRef->info().height() != bm.info().height())) { return NULL; } const bool useCache = optionalKey.isValid(); SkYUVPlanesCache::Info yuvInfo; SkAutoTUnref<SkCachedData> cachedData; SkAutoMalloc storage; if (useCache) { cachedData.reset(SkYUVPlanesCache::FindAndRef(pixelRef->getGenerationID(), &yuvInfo)); } void* planes[3]; if (cachedData.get()) { planes[0] = (void*)cachedData->data(); planes[1] = (uint8_t*)planes[0] + yuvInfo.fSizeInMemory[0]; planes[2] = (uint8_t*)planes[1] + yuvInfo.fSizeInMemory[1]; } else { // Fetch yuv plane sizes for memory allocation. Here, width and height can be // rounded up to JPEG block size and be larger than the image's width and height. if (!pixelRef->getYUV8Planes(yuvInfo.fSize, NULL, NULL, NULL)) { return NULL; } // Allocate the memory for YUV size_t totalSize(0); for (int i = 0; i < 3; ++i) { yuvInfo.fRowBytes[i] = yuvInfo.fSize[i].fWidth; yuvInfo.fSizeInMemory[i] = yuvInfo.fRowBytes[i] * yuvInfo.fSize[i].fHeight; totalSize += yuvInfo.fSizeInMemory[i]; } if (useCache) { cachedData.reset(SkResourceCache::NewCachedData(totalSize)); planes[0] = cachedData->writable_data(); } else { storage.reset(totalSize); planes[0] = storage.get(); } planes[1] = (uint8_t*)planes[0] + yuvInfo.fSizeInMemory[0]; planes[2] = (uint8_t*)planes[1] + yuvInfo.fSizeInMemory[1]; // Get the YUV planes and update plane sizes to actual image size if (!pixelRef->getYUV8Planes(yuvInfo.fSize, planes, yuvInfo.fRowBytes, &yuvInfo.fColorSpace)) { return NULL; } if (useCache) { // Decoding is done, cache the resulting YUV planes SkYUVPlanesCache::Add(pixelRef->getGenerationID(), cachedData, &yuvInfo); } } GrSurfaceDesc yuvDesc; yuvDesc.fConfig = kAlpha_8_GrPixelConfig; SkAutoTUnref<GrTexture> yuvTextures[3]; for (int i = 0; i < 3; ++i) { yuvDesc.fWidth = yuvInfo.fSize[i].fWidth; yuvDesc.fHeight = yuvInfo.fSize[i].fHeight; bool needsExactTexture = (yuvDesc.fWidth != yuvInfo.fSize[0].fWidth) || (yuvDesc.fHeight != yuvInfo.fSize[0].fHeight); if (needsExactTexture) { yuvTextures[i].reset(ctx->textureProvider()->createTexture(yuvDesc, true)); } else { yuvTextures[i].reset(ctx->textureProvider()->createApproxTexture(yuvDesc)); } if (!yuvTextures[i] || !yuvTextures[i]->writePixels(0, 0, yuvDesc.fWidth, yuvDesc.fHeight, yuvDesc.fConfig, planes[i], yuvInfo.fRowBytes[i])) { return NULL; } } GrSurfaceDesc rtDesc = desc; rtDesc.fFlags = rtDesc.fFlags | kRenderTarget_GrSurfaceFlag; GrTexture* result = create_texture_for_bmp(ctx, optionalKey, rtDesc, pixelRef, NULL, 0); if (!result) { return NULL; } GrRenderTarget* renderTarget = result->asRenderTarget(); SkASSERT(renderTarget); GrPaint paint; SkAutoTUnref<GrFragmentProcessor> yuvToRgbProcessor(GrYUVtoRGBEffect::Create(paint.getProcessorDataManager(), yuvTextures[0], yuvTextures[1], yuvTextures[2], yuvInfo.fSize, yuvInfo.fColorSpace)); paint.addColorProcessor(yuvToRgbProcessor); SkRect r = SkRect::MakeWH(SkIntToScalar(yuvInfo.fSize[0].fWidth), SkIntToScalar(yuvInfo.fSize[0].fHeight)); GrDrawContext* drawContext = ctx->drawContext(); if (!drawContext) { return NULL; } drawContext->drawRect(renderTarget, GrClip::WideOpen(), paint, SkMatrix::I(), r); return result; }
// since we "may" create a purgeable imageref, we require the stream be ref'able // i.e. dynamically allocated, since its lifetime may exceed the current stack // frame. static jobject doDecode(JNIEnv* env, SkStream* stream, jobject padding, jobject options, bool allowPurgeable, bool forcePurgeable = false, bool applyScale = false, float scale = 1.0f) { int sampleSize = 1; SkImageDecoder::Mode mode = SkImageDecoder::kDecodePixels_Mode; SkBitmap::Config prefConfig = SkBitmap::kARGB_8888_Config; bool doDither = true; bool isMutable = false; bool willScale = applyScale && scale != 1.0f; bool isPurgeable = !willScale && (forcePurgeable || (allowPurgeable && optionsPurgeable(env, options))); bool preferQualityOverSpeed = false; jobject javaBitmap = NULL; if (options != NULL) { sampleSize = env->GetIntField(options, gOptions_sampleSizeFieldID); if (optionsJustBounds(env, options)) { mode = SkImageDecoder::kDecodeBounds_Mode; } // initialize these, in case we fail later on env->SetIntField(options, gOptions_widthFieldID, -1); env->SetIntField(options, gOptions_heightFieldID, -1); env->SetObjectField(options, gOptions_mimeFieldID, 0); jobject jconfig = env->GetObjectField(options, gOptions_configFieldID); prefConfig = GraphicsJNI::getNativeBitmapConfig(env, jconfig); isMutable = env->GetBooleanField(options, gOptions_mutableFieldID); doDither = env->GetBooleanField(options, gOptions_ditherFieldID); preferQualityOverSpeed = env->GetBooleanField(options, gOptions_preferQualityOverSpeedFieldID); javaBitmap = env->GetObjectField(options, gOptions_bitmapFieldID); } if (willScale && javaBitmap != NULL) { return nullObjectReturn("Cannot pre-scale a reused bitmap"); } SkImageDecoder* decoder = SkImageDecoder::Factory(stream); if (decoder == NULL) { return nullObjectReturn("SkImageDecoder::Factory returned null"); } decoder->setSampleSize(sampleSize); decoder->setDitherImage(doDither); decoder->setPreferQualityOverSpeed(preferQualityOverSpeed); NinePatchPeeker peeker(decoder); JavaPixelAllocator javaAllocator(env); SkBitmap* bitmap; if (javaBitmap == NULL) { bitmap = new SkBitmap; } else { if (sampleSize != 1) { return nullObjectReturn("SkImageDecoder: Cannot reuse bitmap with sampleSize != 1"); } bitmap = (SkBitmap*) env->GetIntField(javaBitmap, gBitmap_nativeBitmapFieldID); // config of supplied bitmap overrules config set in options prefConfig = bitmap->getConfig(); } SkAutoTDelete<SkImageDecoder> add(decoder); SkAutoTDelete<SkBitmap> adb(bitmap, javaBitmap == NULL); decoder->setPeeker(&peeker); if (!isPurgeable) { decoder->setAllocator(&javaAllocator); } AutoDecoderCancel adc(options, decoder); // To fix the race condition in case "requestCancelDecode" // happens earlier than AutoDecoderCancel object is added // to the gAutoDecoderCancelMutex linked list. if (options != NULL && env->GetBooleanField(options, gOptions_mCancelID)) { return nullObjectReturn("gOptions_mCancelID"); } SkImageDecoder::Mode decodeMode = mode; if (isPurgeable) { decodeMode = SkImageDecoder::kDecodeBounds_Mode; } SkBitmap* decoded; if (willScale) { decoded = new SkBitmap; } else { decoded = bitmap; } SkAutoTDelete<SkBitmap> adb2(willScale ? decoded : NULL); if (!decoder->decode(stream, decoded, prefConfig, decodeMode, javaBitmap != NULL)) { return nullObjectReturn("decoder->decode returned false"); } int scaledWidth = decoded->width(); int scaledHeight = decoded->height(); if (willScale && mode != SkImageDecoder::kDecodeBounds_Mode) { scaledWidth = int(scaledWidth * scale + 0.5f); scaledHeight = int(scaledHeight * scale + 0.5f); } // update options (if any) if (options != NULL) { env->SetIntField(options, gOptions_widthFieldID, scaledWidth); env->SetIntField(options, gOptions_heightFieldID, scaledHeight); env->SetObjectField(options, gOptions_mimeFieldID, getMimeTypeString(env, decoder->getFormat())); } // if we're in justBounds mode, return now (skip the java bitmap) if (mode == SkImageDecoder::kDecodeBounds_Mode) { return NULL; } jbyteArray ninePatchChunk = NULL; if (peeker.fPatch != NULL) { if (willScale) { scaleNinePatchChunk(peeker.fPatch, scale); } size_t ninePatchArraySize = peeker.fPatch->serializedSize(); ninePatchChunk = env->NewByteArray(ninePatchArraySize); if (ninePatchChunk == NULL) { return nullObjectReturn("ninePatchChunk == null"); } jbyte* array = (jbyte*) env->GetPrimitiveArrayCritical(ninePatchChunk, NULL); if (array == NULL) { return nullObjectReturn("primitive array == null"); } peeker.fPatch->serialize(array); env->ReleasePrimitiveArrayCritical(ninePatchChunk, array, 0); } jintArray layoutBounds = NULL; if (peeker.fLayoutBounds != NULL) { layoutBounds = env->NewIntArray(4); if (layoutBounds == NULL) { return nullObjectReturn("layoutBounds == null"); } jint scaledBounds[4]; if (willScale) { for (int i=0; i<4; i++) { scaledBounds[i] = (jint)((((jint*)peeker.fLayoutBounds)[i]*scale) + .5f); } } else { memcpy(scaledBounds, (jint*)peeker.fLayoutBounds, sizeof(scaledBounds)); } env->SetIntArrayRegion(layoutBounds, 0, 4, scaledBounds); if (javaBitmap != NULL) { env->SetObjectField(javaBitmap, gBitmap_layoutBoundsFieldID, layoutBounds); } } if (willScale) { // This is weird so let me explain: we could use the scale parameter // directly, but for historical reasons this is how the corresponding // Dalvik code has always behaved. We simply recreate the behavior here. // The result is slightly different from simply using scale because of // the 0.5f rounding bias applied when computing the target image size const float sx = scaledWidth / float(decoded->width()); const float sy = scaledHeight / float(decoded->height()); SkBitmap::Config config = decoded->config(); switch (config) { case SkBitmap::kNo_Config: case SkBitmap::kIndex8_Config: case SkBitmap::kRLE_Index8_Config: config = SkBitmap::kARGB_8888_Config; break; default: break; } bitmap->setConfig(config, scaledWidth, scaledHeight); bitmap->setIsOpaque(decoded->isOpaque()); if (!bitmap->allocPixels(&javaAllocator, NULL)) { return nullObjectReturn("allocation failed for scaled bitmap"); } bitmap->eraseColor(0); SkPaint paint; paint.setFilterBitmap(true); SkCanvas canvas(*bitmap); canvas.scale(sx, sy); canvas.drawBitmap(*decoded, 0.0f, 0.0f, &paint); // Save off the unscaled version of bitmap to be used in later // transformations if it would reduce memory pressure. Only do // so if it is being upscaled more than 50%, is bigger than // 256x256, and not too big to be keeping a copy of (<1MB). const int numUnscaledPixels = decoded->width() * decoded->height(); if (sx > 1.5 && numUnscaledPixels > 65536 && numUnscaledPixels < 262144) { bitmap->setUnscaledBitmap(decoded); adb2.detach(); //responsibility for freeing decoded's memory is //transferred to bitmap's destructor } } if (padding) { if (peeker.fPatch != NULL) { GraphicsJNI::set_jrect(env, padding, peeker.fPatch->paddingLeft, peeker.fPatch->paddingTop, peeker.fPatch->paddingRight, peeker.fPatch->paddingBottom); } else { GraphicsJNI::set_jrect(env, padding, -1, -1, -1, -1); } } SkPixelRef* pr; if (isPurgeable) { pr = installPixelRef(bitmap, stream, sampleSize, doDither); } else { // if we get here, we're in kDecodePixels_Mode and will therefore // already have a pixelref installed. pr = bitmap->pixelRef(); } if (pr == NULL) { return nullObjectReturn("Got null SkPixelRef"); } if (!isMutable) { // promise we will never change our pixels (great for sharing and pictures) pr->setImmutable(); } // detach bitmap from its autodeleter, since we want to own it now adb.detach(); if (javaBitmap != NULL) { // If a java bitmap was passed in for reuse, pass it back return javaBitmap; } // now create the java bitmap return GraphicsJNI::createBitmap(env, bitmap, javaAllocator.getStorageObj(), isMutable, ninePatchChunk, layoutBounds, -1); }
uint32_t onGetID() override { return fPR->getGenerationID(); }
bool onGetYUVSizes(SkISize sizes[3]) override { return fPR->getYUV8Planes(sizes, nullptr, nullptr, nullptr); }
void ImageSource::setData(SharedBuffer* data, bool allDataReceived) { #ifdef ANDROID_ANIMATED_GIF // This is only necessary if we allow ourselves to partially decode GIF bool disabledAnimatedGif = false; if (m_decoder.m_gifDecoder && !m_decoder.m_gifDecoder->failed()) { m_decoder.m_gifDecoder->setData(data, allDataReceived); if (!allDataReceived || m_decoder.m_gifDecoder->frameCount() != 1) return; disabledAnimatedGif = true; delete m_decoder.m_gifDecoder; m_decoder.m_gifDecoder = 0; } #endif if (NULL == m_decoder.m_image #ifdef ANDROID_ANIMATED_GIF && !m_decoder.m_gifDecoder #endif ) { SkBitmap tmp; SkMemoryStream stream(data->data(), data->size(), false); SkImageDecoder* codec = SkImageDecoder::Factory(&stream); if (!codec) return; SkAutoTDelete<SkImageDecoder> ad(codec); // FOR KITKAT MR2 INTEGRATION //codec->setPrefConfigTable(gPrefConfigTable); SkImageDecoder::PrefConfigTable configTable; configTable.fPrefFor_8Index_NoAlpha_src = gPrefConfigTable[0]; configTable.fPrefFor_8Index_YesAlpha_src = gPrefConfigTable[1]; configTable.fPrefFor_8Gray_src = SkBitmap::kNo_Config; configTable.fPrefFor_8bpc_NoAlpha_src = gPrefConfigTable[4]; configTable.fPrefFor_8bpc_YesAlpha_src = gPrefConfigTable[5]; codec->setPrefConfigTable(configTable); // FOR KITKAT MR2 INTEGRATION if (!codec->decode(&stream, &tmp, SkImageDecoder::kDecodeBounds_Mode)) return; int origW = tmp.width(); int origH = tmp.height(); #ifdef ANDROID_ANIMATED_GIF // First, check to see if this is an animated GIF const char* contents = data->data(); if (data->size() > 3 && strncmp(contents, "GIF8", 4) == 0 && should_use_animated_gif(origW, origH) && !disabledAnimatedGif) { // This means we are looking at a GIF, so create special // GIF Decoder // Need to wait for all data received if we are assigning an // allocator (which we are not at the moment). if (!m_decoder.m_gifDecoder /*&& allDataReceived*/) m_decoder.m_gifDecoder = new GIFImageDecoder(m_alphaOption, m_gammaAndColorProfileOption); int frameCount = 0; if (!m_decoder.m_gifDecoder->failed()) { m_decoder.m_gifDecoder->setData(data, allDataReceived); if (!allDataReceived) return; frameCount = m_decoder.m_gifDecoder->frameCount(); } if (frameCount != 1) return; delete m_decoder.m_gifDecoder; m_decoder.m_gifDecoder = 0; } #endif int sampleSize = computeSampleSize(tmp); if (sampleSize > 1) { codec->setSampleSize(sampleSize); stream.rewind(); if (!codec->decode(&stream, &tmp, SkImageDecoder::kDecodeBounds_Mode)) { return; } } m_decoder.m_image = new PrivateAndroidImageSourceRec(tmp, origW, origH, sampleSize); // SkDebugf("----- started: [%d %d] %s\n", origW, origH, m_decoder.m_url.c_str()); } PrivateAndroidImageSourceRec* decoder = m_decoder.m_image; if (allDataReceived && decoder && !decoder->fAllDataReceived) { decoder->fAllDataReceived = true; SkBitmap* bm = &decoder->bitmap(); // 4.2 Merge BEGIN << //Following code removed in 4.2 // SkPixelRef* ref = convertToRLE(bm, data->data(), data->size()); //4.2 Merge : removed in 4.2 // if (ref) { // bm->setPixelRef(ref)->unref(); // } else { // 4.2 Merge END >> BitmapAllocatorAndroid alloc(data, decoder->fSampleSize); if (!alloc.allocPixelRef(bm, NULL)) { return; } SkPixelRef* ref = bm->pixelRef();//4.2 Merge // }//4.3 Merge : removed in 4.2 // we promise to never change the pixels (makes picture recording fast) ref->setImmutable(); // give it the URL if we have one ref->setURI(m_decoder.m_url); } }
// since we "may" create a purgeable imageref, we require the stream be ref'able // i.e. dynamically allocated, since its lifetime may exceed the current stack // frame. static jobject doDecode(JNIEnv* env, SkStream* stream, jobject padding, jobject options, bool allowPurgeable, bool forcePurgeable = false) { int sampleSize = 1; SkImageDecoder::Mode mode = SkImageDecoder::kDecodePixels_Mode; SkBitmap::Config prefConfig = SkBitmap::kNo_Config; bool doDither = true; bool isPurgeable = forcePurgeable || (allowPurgeable && optionsPurgeable(env, options)); bool reportSizeToVM = optionsReportSizeToVM(env, options); bool preferQualityOverSpeed = false; if (NULL != options) { sampleSize = env->GetIntField(options, gOptions_sampleSizeFieldID); if (env->GetBooleanField(options, gOptions_justBoundsFieldID)) { mode = SkImageDecoder::kDecodeBounds_Mode; } // initialize these, in case we fail later on env->SetIntField(options, gOptions_widthFieldID, -1); env->SetIntField(options, gOptions_heightFieldID, -1); env->SetObjectField(options, gOptions_mimeFieldID, 0); jobject jconfig = env->GetObjectField(options, gOptions_configFieldID); prefConfig = GraphicsJNI::getNativeBitmapConfig(env, jconfig); doDither = env->GetBooleanField(options, gOptions_ditherFieldID); preferQualityOverSpeed = env->GetBooleanField(options, gOptions_preferQualityOverSpeedFieldID); } SkImageDecoder* decoder = SkImageDecoder::Factory(stream); if (NULL == decoder) { return nullObjectReturn("SkImageDecoder::Factory returned null"); } decoder->setSampleSize(sampleSize); decoder->setDitherImage(doDither); decoder->setPreferQualityOverSpeed(preferQualityOverSpeed); NinePatchPeeker peeker(decoder); JavaPixelAllocator javaAllocator(env, reportSizeToVM); SkBitmap* bitmap = new SkBitmap; Res_png_9patch dummy9Patch; SkAutoTDelete<SkImageDecoder> add(decoder); SkAutoTDelete<SkBitmap> adb(bitmap); decoder->setPeeker(&peeker); if (!isPurgeable) { decoder->setAllocator(&javaAllocator); } AutoDecoderCancel adc(options, decoder); // To fix the race condition in case "requestCancelDecode" // happens earlier than AutoDecoderCancel object is added // to the gAutoDecoderCancelMutex linked list. if (NULL != options && env->GetBooleanField(options, gOptions_mCancelID)) { return nullObjectReturn("gOptions_mCancelID");; } SkImageDecoder::Mode decodeMode = mode; if (isPurgeable) { decodeMode = SkImageDecoder::kDecodeBounds_Mode; } if (!decoder->decode(stream, bitmap, prefConfig, decodeMode)) { return nullObjectReturn("decoder->decode returned false"); } // update options (if any) if (NULL != options) { env->SetIntField(options, gOptions_widthFieldID, bitmap->width()); env->SetIntField(options, gOptions_heightFieldID, bitmap->height()); // TODO: set the mimeType field with the data from the codec. // but how to reuse a set of strings, rather than allocating new one // each time? env->SetObjectField(options, gOptions_mimeFieldID, getMimeTypeString(env, decoder->getFormat())); } // if we're in justBounds mode, return now (skip the java bitmap) if (SkImageDecoder::kDecodeBounds_Mode == mode) { return NULL; } jbyteArray ninePatchChunk = NULL; if (peeker.fPatchIsValid) { size_t ninePatchArraySize = peeker.fPatch->serializedSize(); ninePatchChunk = env->NewByteArray(ninePatchArraySize); if (NULL == ninePatchChunk) { return nullObjectReturn("ninePatchChunk == null"); } jbyte* array = (jbyte*)env->GetPrimitiveArrayCritical(ninePatchChunk, NULL); if (NULL == array) { return nullObjectReturn("primitive array == null"); } peeker.fPatch->serialize(array); env->ReleasePrimitiveArrayCritical(ninePatchChunk, array, 0); } // detach bitmap from its autotdeleter, since we want to own it now adb.detach(); if (padding) { if (peeker.fPatchIsValid) { GraphicsJNI::set_jrect(env, padding, peeker.fPatch->paddingLeft, peeker.fPatch->paddingTop, peeker.fPatch->paddingRight, peeker.fPatch->paddingBottom); } else { GraphicsJNI::set_jrect(env, padding, -1, -1, -1, -1); } } SkPixelRef* pr; if (isPurgeable) { pr = installPixelRef(bitmap, stream, sampleSize, doDither); } else { // if we get here, we're in kDecodePixels_Mode and will therefore // already have a pixelref installed. pr = bitmap->pixelRef(); } // promise we will never change our pixels (great for sharing and pictures) pr->setImmutable(); // now create the java bitmap return GraphicsJNI::createBitmap(env, bitmap, false, ninePatchChunk); }
BitmapGlue* BitmapFactoryGlue::doDecode(SkStream* stream, Options& options, bool allowPurgeable, bool forcePurgeable) { int sampleSize = 1; SkImageDecoder::Mode mode = SkImageDecoder::kDecodePixels_Mode; SkBitmap::Config prefConfig = SkBitmap::kARGB_8888_Config; bool doDither = true; bool isMutable = false; bool isPurgeable = forcePurgeable || (allowPurgeable && options.isPurgeable); BitmapGlue* javaBitmap = NULL; sampleSize = options.sampleSize; if (options.justDecodeBounds) mode = SkImageDecoder::kDecodeBounds_Mode; // initialize these, in case we fail later on options.width = -1; options.height = -1; prefConfig = options.config; isMutable = options.isMutable; doDither = options.doDither; javaBitmap = options.bitmap; SkImageDecoder* decoder = SkImageDecoder::Factory(stream); if (NULL == decoder) return NULL; decoder->setSampleSize(sampleSize); decoder->setDitherImage(doDither); BitmapGlue* bitmap; if (javaBitmap == NULL) { bitmap = new BitmapGlue; } else { if (sampleSize != 1) { return NULL; } bitmap = javaBitmap; // config of supplied bitmap overrules config set in options prefConfig = bitmap->getConfig(); } SkAutoTDelete<SkImageDecoder> add(decoder); SkAutoTDelete<SkBitmap> adb(bitmap, (javaBitmap == NULL)); SkImageDecoder::Mode decodeMode = mode; if (isPurgeable) { decodeMode = SkImageDecoder::kDecodeBounds_Mode; } if (!decoder->decode(stream, bitmap, prefConfig, decodeMode)) return NULL; // update options (if any) options.width = bitmap->width(); options.height = bitmap->height(); // if we're in justBounds mode, return now (skip the java bitmap) if (SkImageDecoder::kDecodeBounds_Mode == mode) { return NULL; } // detach bitmap from its autodeleter, since we want to own it now adb.detach(); SkPixelRef* pr; if (isPurgeable) { pr = installPixelRef(bitmap, stream, sampleSize, doDither); } else { // if we get here, we're in kDecodePixels_Mode and will therefore // already have a pixelref installed. pr = bitmap->pixelRef(); } if (!isMutable) { // promise we will never change our pixels (great for sharing and pictures) pr->setImmutable(); } if (javaBitmap != NULL) { // If a java bitmap was passed in for reuse, pass it back return javaBitmap; } // now create the java bitmap return bitmap; }
// since we "may" create a purgeable imageref, we require the stream be ref'able // i.e. dynamically allocated, since its lifetime may exceed the current stack // frame. static jobject doDecode(JNIEnv* env, SkStreamRewindable* stream, jobject padding, jobject options, bool allowPurgeable, bool forcePurgeable = false) { int sampleSize = 1; SkImageDecoder::Mode mode = SkImageDecoder::kDecodePixels_Mode; SkBitmap::Config prefConfig = SkBitmap::kARGB_8888_Config; bool doDither = true; bool isMutable = false; float scale = 1.0f; bool isPurgeable = forcePurgeable || (allowPurgeable && optionsPurgeable(env, options)); bool preferQualityOverSpeed = false; bool requireUnpremultiplied = false; jobject javaBitmap = NULL; if (options != NULL) { sampleSize = env->GetIntField(options, gOptions_sampleSizeFieldID); if (optionsJustBounds(env, options)) { mode = SkImageDecoder::kDecodeBounds_Mode; } // initialize these, in case we fail later on env->SetIntField(options, gOptions_widthFieldID, -1); env->SetIntField(options, gOptions_heightFieldID, -1); env->SetObjectField(options, gOptions_mimeFieldID, 0); jobject jconfig = env->GetObjectField(options, gOptions_configFieldID); prefConfig = GraphicsJNI::getNativeBitmapConfig(env, jconfig); isMutable = env->GetBooleanField(options, gOptions_mutableFieldID); doDither = env->GetBooleanField(options, gOptions_ditherFieldID); preferQualityOverSpeed = env->GetBooleanField(options, gOptions_preferQualityOverSpeedFieldID); requireUnpremultiplied = !env->GetBooleanField(options, gOptions_premultipliedFieldID); javaBitmap = env->GetObjectField(options, gOptions_bitmapFieldID); if (env->GetBooleanField(options, gOptions_scaledFieldID)) { const int density = env->GetIntField(options, gOptions_densityFieldID); const int targetDensity = env->GetIntField(options, gOptions_targetDensityFieldID); const int screenDensity = env->GetIntField(options, gOptions_screenDensityFieldID); if (density != 0 && targetDensity != 0 && density != screenDensity) { scale = (float) targetDensity / density; } } } const bool willScale = scale != 1.0f; isPurgeable &= !willScale; SkImageDecoder* decoder = SkImageDecoder::Factory(stream); if (decoder == NULL) { return nullObjectReturn("SkImageDecoder::Factory returned null"); } decoder->setSampleSize(sampleSize); decoder->setDitherImage(doDither); decoder->setPreferQualityOverSpeed(preferQualityOverSpeed); decoder->setRequireUnpremultipliedColors(requireUnpremultiplied); SkBitmap* outputBitmap = NULL; unsigned int existingBufferSize = 0; if (javaBitmap != NULL) { outputBitmap = (SkBitmap*) env->GetIntField(javaBitmap, gBitmap_nativeBitmapFieldID); if (outputBitmap->isImmutable()) { ALOGW("Unable to reuse an immutable bitmap as an image decoder target."); javaBitmap = NULL; outputBitmap = NULL; } else { existingBufferSize = GraphicsJNI::getBitmapAllocationByteCount(env, javaBitmap); } } SkAutoTDelete<SkBitmap> adb(outputBitmap == NULL ? new SkBitmap : NULL); if (outputBitmap == NULL) outputBitmap = adb.get(); NinePatchPeeker peeker(decoder); decoder->setPeeker(&peeker); SkImageDecoder::Mode decodeMode = isPurgeable ? SkImageDecoder::kDecodeBounds_Mode : mode; JavaPixelAllocator javaAllocator(env); RecyclingPixelAllocator recyclingAllocator(outputBitmap->pixelRef(), existingBufferSize); ScaleCheckingAllocator scaleCheckingAllocator(scale, existingBufferSize); SkBitmap::Allocator* outputAllocator = (javaBitmap != NULL) ? (SkBitmap::Allocator*)&recyclingAllocator : (SkBitmap::Allocator*)&javaAllocator; if (decodeMode != SkImageDecoder::kDecodeBounds_Mode) { if (!willScale) { // If the java allocator is being used to allocate the pixel memory, the decoder // need not write zeroes, since the memory is initialized to 0. decoder->setSkipWritingZeroes(outputAllocator == &javaAllocator); decoder->setAllocator(outputAllocator); } else if (javaBitmap != NULL) { // check for eventual scaled bounds at allocation time, so we don't decode the bitmap // only to find the scaled result too large to fit in the allocation decoder->setAllocator(&scaleCheckingAllocator); } } // Only setup the decoder to be deleted after its stack-based, refcounted // components (allocators, peekers, etc) are declared. This prevents RefCnt // asserts from firing due to the order objects are deleted from the stack. SkAutoTDelete<SkImageDecoder> add(decoder); AutoDecoderCancel adc(options, decoder); // To fix the race condition in case "requestCancelDecode" // happens earlier than AutoDecoderCancel object is added // to the gAutoDecoderCancelMutex linked list. if (options != NULL && env->GetBooleanField(options, gOptions_mCancelID)) { return nullObjectReturn("gOptions_mCancelID"); } SkBitmap decodingBitmap; if (!decoder->decode(stream, &decodingBitmap, prefConfig, decodeMode)) { return nullObjectReturn("decoder->decode returned false"); } int scaledWidth = decodingBitmap.width(); int scaledHeight = decodingBitmap.height(); if (willScale && mode != SkImageDecoder::kDecodeBounds_Mode) { scaledWidth = int(scaledWidth * scale + 0.5f); scaledHeight = int(scaledHeight * scale + 0.5f); } // update options (if any) if (options != NULL) { env->SetIntField(options, gOptions_widthFieldID, scaledWidth); env->SetIntField(options, gOptions_heightFieldID, scaledHeight); env->SetObjectField(options, gOptions_mimeFieldID, getMimeTypeString(env, decoder->getFormat())); } // if we're in justBounds mode, return now (skip the java bitmap) if (mode == SkImageDecoder::kDecodeBounds_Mode) { return NULL; } jbyteArray ninePatchChunk = NULL; if (peeker.fPatch != NULL) { if (willScale) { scaleNinePatchChunk(peeker.fPatch, scale); } size_t ninePatchArraySize = peeker.fPatch->serializedSize(); ninePatchChunk = env->NewByteArray(ninePatchArraySize); if (ninePatchChunk == NULL) { return nullObjectReturn("ninePatchChunk == null"); } jbyte* array = (jbyte*) env->GetPrimitiveArrayCritical(ninePatchChunk, NULL); if (array == NULL) { return nullObjectReturn("primitive array == null"); } peeker.fPatch->serialize(array); env->ReleasePrimitiveArrayCritical(ninePatchChunk, array, 0); } jintArray layoutBounds = NULL; if (peeker.fLayoutBounds != NULL) { layoutBounds = env->NewIntArray(4); if (layoutBounds == NULL) { return nullObjectReturn("layoutBounds == null"); } jint scaledBounds[4]; if (willScale) { for (int i=0; i<4; i++) { scaledBounds[i] = (jint)((((jint*)peeker.fLayoutBounds)[i]*scale) + .5f); } } else { memcpy(scaledBounds, (jint*)peeker.fLayoutBounds, sizeof(scaledBounds)); } env->SetIntArrayRegion(layoutBounds, 0, 4, scaledBounds); if (javaBitmap != NULL) { env->SetObjectField(javaBitmap, gBitmap_layoutBoundsFieldID, layoutBounds); } } if (willScale) { // This is weird so let me explain: we could use the scale parameter // directly, but for historical reasons this is how the corresponding // Dalvik code has always behaved. We simply recreate the behavior here. // The result is slightly different from simply using scale because of // the 0.5f rounding bias applied when computing the target image size const float sx = scaledWidth / float(decodingBitmap.width()); const float sy = scaledHeight / float(decodingBitmap.height()); // TODO: avoid copying when scaled size equals decodingBitmap size SkBitmap::Config config = configForScaledOutput(decodingBitmap.config()); outputBitmap->setConfig(config, scaledWidth, scaledHeight); outputBitmap->setIsOpaque(decodingBitmap.isOpaque()); if (!outputBitmap->allocPixels(outputAllocator, NULL)) { return nullObjectReturn("allocation failed for scaled bitmap"); } // If outputBitmap's pixels are newly allocated by Java, there is no need // to erase to 0, since the pixels were initialized to 0. if (outputAllocator != &javaAllocator) { outputBitmap->eraseColor(0); } SkPaint paint; paint.setFilterBitmap(true); SkCanvas canvas(*outputBitmap); canvas.scale(sx, sy); canvas.drawBitmap(decodingBitmap, 0.0f, 0.0f, &paint); } else { outputBitmap->swap(decodingBitmap); } if (padding) { if (peeker.fPatch != NULL) { GraphicsJNI::set_jrect(env, padding, peeker.fPatch->paddingLeft, peeker.fPatch->paddingTop, peeker.fPatch->paddingRight, peeker.fPatch->paddingBottom); } else { GraphicsJNI::set_jrect(env, padding, -1, -1, -1, -1); } } SkPixelRef* pr; if (isPurgeable) { pr = installPixelRef(outputBitmap, stream, sampleSize, doDither); } else { // if we get here, we're in kDecodePixels_Mode and will therefore // already have a pixelref installed. pr = outputBitmap->pixelRef(); } if (pr == NULL) { return nullObjectReturn("Got null SkPixelRef"); } if (!isMutable && javaBitmap == NULL) { // promise we will never change our pixels (great for sharing and pictures) pr->setImmutable(); } // detach bitmap from its autodeleter, since we want to own it now adb.detach(); if (javaBitmap != NULL) { bool isPremultiplied = !requireUnpremultiplied; GraphicsJNI::reinitBitmap(env, javaBitmap, outputBitmap, isPremultiplied); outputBitmap->notifyPixelsChanged(); // If a java bitmap was passed in for reuse, pass it back return javaBitmap; } int bitmapCreateFlags = 0x0; if (isMutable) bitmapCreateFlags |= GraphicsJNI::kBitmapCreateFlag_Mutable; if (!requireUnpremultiplied) bitmapCreateFlags |= GraphicsJNI::kBitmapCreateFlag_Premultiplied; // now create the java bitmap return GraphicsJNI::createBitmap(env, outputBitmap, javaAllocator.getStorageObj(), bitmapCreateFlags, ninePatchChunk, layoutBounds, -1); }