Exemplo n.º 1
0
void
Decoder::Write(const char* aBuffer, uint32_t aCount)
{
  PROFILER_LABEL("ImageDecoder", "Write");

  // We're strict about decoder errors
  NS_ABORT_IF_FALSE(!HasDecoderError(),
                    "Not allowed to make more decoder calls after error!");

  // If a data error occured, just ignore future data
  if (HasDataError())
    return;

  if (IsSizeDecode() && HasSize()) {
    // More data came in since we found the size. We have nothing to do here.
    return;
  }

  // Pass the data along to the implementation
  WriteInternal(aBuffer, aCount);

  // If we're a synchronous decoder and we need a new frame to proceed, let's
  // create one and call it again.
  while (mSynchronous && NeedsNewFrame() && !HasDataError()) {
    nsresult rv = AllocateFrame();

    if (NS_SUCCEEDED(rv)) {
      // Tell the decoder to use the data it saved when it asked for a new frame.
      WriteInternal(nullptr, 0);
    }
  }
}
Exemplo n.º 2
0
void
Decoder::Write(const char* aBuffer, uint32_t aCount, DecodeStrategy aStrategy)
{
  PROFILER_LABEL("ImageDecoder", "Write",
    js::ProfileEntry::Category::GRAPHICS);

  MOZ_ASSERT(NS_IsMainThread() || aStrategy == DecodeStrategy::ASYNC);

  // We're strict about decoder errors
  MOZ_ASSERT(!HasDecoderError(),
             "Not allowed to make more decoder calls after error!");

  // Begin recording telemetry data.
  TimeStamp start = TimeStamp::Now();
  mChunkCount++;

  // Keep track of the total number of bytes written.
  mBytesDecoded += aCount;

  // If we're flushing data, clear the flag.
  if (aBuffer == nullptr && aCount == 0) {
    MOZ_ASSERT(mNeedsToFlushData, "Flushing when we don't need to");
    mNeedsToFlushData = false;
  }

  // If a data error occured, just ignore future data.
  if (HasDataError())
    return;

  if (IsSizeDecode() && HasSize()) {
    // More data came in since we found the size. We have nothing to do here.
    return;
  }

  // Pass the data along to the implementation
  WriteInternal(aBuffer, aCount, aStrategy);

  // If we're a synchronous decoder and we need a new frame to proceed, let's
  // create one and call it again.
  if (aStrategy == DecodeStrategy::SYNC) {
    while (NeedsNewFrame() && !HasDataError()) {
      nsresult rv = AllocateFrame();

      if (NS_SUCCEEDED(rv)) {
        // Use the data we saved when we asked for a new frame.
        WriteInternal(nullptr, 0, aStrategy);
      }

      mNeedsToFlushData = false;
    }
  }

  // Finish telemetry.
  mDecodeTime += (TimeStamp::Now() - start);
}
Exemplo n.º 3
0
// CreateFrame() is used for both simple and animated images
void nsPNGDecoder::CreateFrame(png_uint_32 x_offset, png_uint_32 y_offset,
                               int32_t width, int32_t height,
                               gfx::SurfaceFormat format)
{
  MOZ_ASSERT(HasSize());

  if (format == gfx::SurfaceFormat::B8G8R8A8) {
    PostHasTransparency();
  }

  // Our first full frame is automatically created by the image decoding
  // infrastructure. Just use it as long as it matches up.
  nsIntRect neededRect(x_offset, y_offset, width, height);
  nsRefPtr<imgFrame> currentFrame = GetCurrentFrame();
  if (!currentFrame->GetRect().IsEqualEdges(neededRect)) {
    if (mNumFrames == 0) {
      // We need padding on the first frame, which means that we don't draw into
      // part of the image at all. Report that as transparency.
      PostHasTransparency();
    }

    NeedNewFrame(mNumFrames, x_offset, y_offset, width, height, format);
  } else if (mNumFrames != 0) {
    NeedNewFrame(mNumFrames, x_offset, y_offset, width, height, format);
  }

  mFrameRect = neededRect;

  MOZ_LOG(GetPNGDecoderAccountingLog(), LogLevel::Debug,
         ("PNGDecoderAccounting: nsPNGDecoder::CreateFrame -- created "
          "image frame with %dx%d pixels in container %p",
          width, height,
          &mImage));

#ifdef PNG_APNG_SUPPORTED
  if (png_get_valid(mPNG, mInfo, PNG_INFO_acTL)) {
    mAnimInfo = AnimFrameInfo(mPNG, mInfo);

    if (mAnimInfo.mDispose == DisposalMethod::CLEAR) {
      // We may have to display the background under this image during
      // animation playback, so we regard it as transparent.
      PostHasTransparency();
    }
  }
#endif
}
Exemplo n.º 4
0
//******************************************************************************
void
nsGIFDecoder2::BeginImageFrame(uint16_t aDepth)
{
  MOZ_ASSERT(HasSize());

  gfx::SurfaceFormat format;
  if (mGIFStruct.is_transparent) {
    format = gfx::SurfaceFormat::B8G8R8A8;
    PostHasTransparency();
  } else {
    format = gfx::SurfaceFormat::B8G8R8X8;
  }

  // Use correct format, RGB for first frame, PAL for following frames
  // and include transparency to allow for optimization of opaque images
  if (mGIFStruct.images_decoded) {
    // Image data is stored with original depth and palette
    NeedNewFrame(mGIFStruct.images_decoded, mGIFStruct.x_offset,
                 mGIFStruct.y_offset, mGIFStruct.width, mGIFStruct.height,
                 format, aDepth);
  } else {
    nsRefPtr<imgFrame> currentFrame = GetCurrentFrame();

    // Our first full frame is automatically created by the image decoding
    // infrastructure. Just use it as long as it matches up.
    if (!currentFrame->GetRect().IsEqualEdges(nsIntRect(mGIFStruct.x_offset,
                                                        mGIFStruct.y_offset,
                                                        mGIFStruct.width,
                                                        mGIFStruct.height))) {

      // We need padding on the first frame, which means that we don't draw into
      // part of the image at all. Report that as transparency.
      PostHasTransparency();

      // Regardless of depth of input, image is decoded into 24bit RGB
      NeedNewFrame(mGIFStruct.images_decoded, mGIFStruct.x_offset,
                   mGIFStruct.y_offset, mGIFStruct.width, mGIFStruct.height,
                   format);
    }
  }

  mCurrentFrameIndex = mGIFStruct.images_decoded;
}
Exemplo n.º 5
0
//******************************************************************************
nsresult
nsGIFDecoder2::BeginImageFrame(uint16_t aDepth)
{
  MOZ_ASSERT(HasSize());

  gfx::SurfaceFormat format;
  if (mGIFStruct.is_transparent) {
    format = gfx::SurfaceFormat::B8G8R8A8;
    PostHasTransparency();
  } else {
    format = gfx::SurfaceFormat::B8G8R8X8;
  }

  nsIntRect frameRect(mGIFStruct.x_offset, mGIFStruct.y_offset,
                      mGIFStruct.width, mGIFStruct.height);

  // Use correct format, RGB for first frame, PAL for following frames
  // and include transparency to allow for optimization of opaque images
  nsresult rv = NS_OK;
  if (mGIFStruct.images_decoded) {
    // Image data is stored with original depth and palette.
    rv = AllocateFrame(mGIFStruct.images_decoded, GetSize(),
                       frameRect, format, aDepth);
  } else {
    if (!nsIntRect(nsIntPoint(), GetSize()).IsEqualEdges(frameRect)) {
      // We need padding on the first frame, which means that we don't draw into
      // part of the image at all. Report that as transparency.
      PostHasTransparency();
    }

    // Regardless of depth of input, the first frame is decoded into 24bit RGB.
    rv = AllocateFrame(mGIFStruct.images_decoded, GetSize(),
                       frameRect, format);
  }

  mCurrentFrameIndex = mGIFStruct.images_decoded;

  return rv;
}
Exemplo n.º 6
0
//******************************************************************************
void nsGIFDecoder2::BeginImageFrame(uint16_t aDepth)
{
  gfx::SurfaceFormat format;
  if (mGIFStruct.is_transparent)
    format = gfx::SurfaceFormat::B8G8R8A8;
  else
    format = gfx::SurfaceFormat::B8G8R8X8;

  MOZ_ASSERT(HasSize());

  // Use correct format, RGB for first frame, PAL for following frames
  // and include transparency to allow for optimization of opaque images
  if (mGIFStruct.images_decoded) {
    // Image data is stored with original depth and palette
    NeedNewFrame(mGIFStruct.images_decoded, mGIFStruct.x_offset,
                 mGIFStruct.y_offset, mGIFStruct.width, mGIFStruct.height,
                 format, aDepth);
  }

  // Our first full frame is automatically created by the image decoding
  // infrastructure. Just use it as long as it matches up.
  else if (!GetCurrentFrame()->GetRect().IsEqualEdges(nsIntRect(mGIFStruct.x_offset,
                                                                mGIFStruct.y_offset,
                                                                mGIFStruct.width,
                                                                mGIFStruct.height))) {
    // Regardless of depth of input, image is decoded into 24bit RGB
    NeedNewFrame(mGIFStruct.images_decoded, mGIFStruct.x_offset,
                 mGIFStruct.y_offset, mGIFStruct.width, mGIFStruct.height,
                 format);
  } else {
    // Our preallocated frame matches up, with the possible exception of alpha.
    if (format == gfx::SurfaceFormat::B8G8R8X8) {
      GetCurrentFrame()->SetHasNoAlpha();
    }
  }

  mCurrentFrameIndex = mGIFStruct.images_decoded;
}
Exemplo n.º 7
0
// CreateFrame() is used for both simple and animated images
void nsPNGDecoder::CreateFrame(png_uint_32 x_offset, png_uint_32 y_offset,
                               int32_t width, int32_t height,
                               gfx::SurfaceFormat format)
{
  // Our first full frame is automatically created by the image decoding
  // infrastructure. Just use it as long as it matches up.
  MOZ_ASSERT(HasSize());
  if (mNumFrames != 0 ||
      !GetCurrentFrame()->GetRect().IsEqualEdges(nsIntRect(x_offset, y_offset, width, height))) {
    NeedNewFrame(mNumFrames, x_offset, y_offset, width, height, format);
  } else if (mNumFrames == 0) {
    // Our preallocated frame matches up, with the possible exception of alpha.
    if (format == gfx::SurfaceFormat::B8G8R8X8) {
      GetCurrentFrame()->SetHasNoAlpha();
    }
  }

  mFrameRect.x = x_offset;
  mFrameRect.y = y_offset;
  mFrameRect.width = width;
  mFrameRect.height = height;

  PR_LOG(GetPNGDecoderAccountingLog(), PR_LOG_DEBUG,
         ("PNGDecoderAccounting: nsPNGDecoder::CreateFrame -- created "
          "image frame with %dx%d pixels in container %p",
          width, height,
          &mImage));

  mFrameHasNoAlpha = true;

#ifdef PNG_APNG_SUPPORTED
  if (png_get_valid(mPNG, mInfo, PNG_INFO_acTL)) {
    mAnimInfo = AnimFrameInfo(mPNG, mInfo);
  }
#endif
}
Exemplo n.º 8
0
void
Decoder::Write(const char* aBuffer, uint32_t aCount)
{
  PROFILER_LABEL("ImageDecoder", "Write",
    js::ProfileEntry::Category::GRAPHICS);

  MOZ_ASSERT(aBuffer);
  MOZ_ASSERT(aCount > 0);

  // We're strict about decoder errors
  MOZ_ASSERT(!HasDecoderError(),
             "Not allowed to make more decoder calls after error!");

  // Begin recording telemetry data.
  TimeStamp start = TimeStamp::Now();
  mChunkCount++;

  // Keep track of the total number of bytes written.
  mBytesDecoded += aCount;

  // If a data error occured, just ignore future data.
  if (HasDataError()) {
    return;
  }

  if (IsMetadataDecode() && HasSize()) {
    // More data came in since we found the size. We have nothing to do here.
    return;
  }

  // Pass the data along to the implementation.
  WriteInternal(aBuffer, aCount);

  // Finish telemetry.
  mDecodeTime += (TimeStamp::Now() - start);
}
Exemplo n.º 9
0
void
nsICODecoder::WriteInternal(const char* aBuffer, uint32_t aCount, DecodeStrategy aStrategy)
{
  NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!");

  if (!aCount) {
    if (mContainedDecoder) {
      WriteToContainedDecoder(aBuffer, aCount, aStrategy);
    }
    return;
  }

  while (aCount && (mPos < ICONCOUNTOFFSET)) { // Skip to the # of icons.
    if (mPos == 2) { // if the third byte is 1: This is an icon, 2: a cursor
      if ((*aBuffer != 1) && (*aBuffer != 2)) {
        PostDataError();
        return;
      }
      mIsCursor = (*aBuffer == 2);
    }
    mPos++; aBuffer++; aCount--;
  }

  if (mPos == ICONCOUNTOFFSET && aCount >= 2) {
    mNumIcons = LittleEndian::readUint16(reinterpret_cast<const uint16_t*>(aBuffer));
    aBuffer += 2;
    mPos += 2;
    aCount -= 2;
  }

  if (mNumIcons == 0)
    return; // Nothing to do.

  uint16_t colorDepth = 0;
  nsIntSize prefSize = mImage.GetRequestedResolution();
  if (prefSize.width == 0 && prefSize.height == 0) {
    prefSize.SizeTo(PREFICONSIZE, PREFICONSIZE);
  }

  // A measure of the difference in size between the entry we've found
  // and the requested size. We will choose the smallest image that is
  // >= requested size (i.e. we assume it's better to downscale a larger
  // icon than to upscale a smaller one).
  int32_t diff = INT_MIN;

  // Loop through each entry's dir entry
  while (mCurrIcon < mNumIcons) { 
    if (mPos >= DIRENTRYOFFSET + (mCurrIcon * sizeof(mDirEntryArray)) && 
        mPos < DIRENTRYOFFSET + ((mCurrIcon + 1) * sizeof(mDirEntryArray))) {
      uint32_t toCopy = sizeof(mDirEntryArray) - 
                        (mPos - DIRENTRYOFFSET - mCurrIcon * sizeof(mDirEntryArray));
      if (toCopy > aCount) {
        toCopy = aCount;
      }
      memcpy(mDirEntryArray + sizeof(mDirEntryArray) - toCopy, aBuffer, toCopy);
      mPos += toCopy;
      aCount -= toCopy;
      aBuffer += toCopy;
    }
    if (aCount == 0)
      return; // Need more data

    IconDirEntry e;
    if (mPos == (DIRENTRYOFFSET + ICODIRENTRYSIZE) + 
                (mCurrIcon * sizeof(mDirEntryArray))) {
      mCurrIcon++;
      ProcessDirEntry(e);
      // We can't use GetRealWidth and GetRealHeight here because those operate
      // on mDirEntry, here we are going through each item in the directory.
      // Calculate the delta between this image's size and the desired size,
      // so we can see if it is better than our current-best option.
      // In the case of several equally-good images, we use the last one.
      int32_t delta = (e.mWidth == 0 ? 256 : e.mWidth) - prefSize.width +
                      (e.mHeight == 0 ? 256 : e.mHeight) - prefSize.height;
      if (e.mBitCount >= colorDepth &&
          ((diff < 0 && delta >= diff) || (delta >= 0 && delta <= diff))) {
        diff = delta;
        mImageOffset = e.mImageOffset;

        // ensure mImageOffset is >= size of the direntry headers (bug #245631)
        uint32_t minImageOffset = DIRENTRYOFFSET + 
                                  mNumIcons * sizeof(mDirEntryArray);
        if (mImageOffset < minImageOffset) {
          PostDataError();
          return;
        }

        colorDepth = e.mBitCount;
        memcpy(&mDirEntry, &e, sizeof(IconDirEntry));
      }
    }
  }

  if (mPos < mImageOffset) {
    // Skip to (or at least towards) the desired image offset
    uint32_t toSkip = mImageOffset - mPos;
    if (toSkip > aCount)
      toSkip = aCount;

    mPos    += toSkip;
    aBuffer += toSkip;
    aCount  -= toSkip;
  }

  // If we are within the first PNGSIGNATURESIZE bytes of the image data,
  // then we have either a BMP or a PNG.  We use the first PNGSIGNATURESIZE
  // bytes to determine which one we have.
  if (mCurrIcon == mNumIcons && mPos >= mImageOffset && 
      mPos < mImageOffset + PNGSIGNATURESIZE)
  {
    uint32_t toCopy = PNGSIGNATURESIZE - (mPos - mImageOffset);
    if (toCopy > aCount) {
      toCopy = aCount;
    }

    memcpy(mSignature + (mPos - mImageOffset), aBuffer, toCopy);
    mPos += toCopy;
    aCount -= toCopy;
    aBuffer += toCopy;

    mIsPNG = !memcmp(mSignature, nsPNGDecoder::pngSignatureBytes, 
                     PNGSIGNATURESIZE);
    if (mIsPNG) {
      mContainedDecoder = new nsPNGDecoder(mImage);
      mContainedDecoder->SetObserver(mObserver);
      mContainedDecoder->SetSizeDecode(IsSizeDecode());
      mContainedDecoder->InitSharedDecoder(mImageData, mImageDataLength,
                                           mColormap, mColormapSize,
                                           mCurrentFrame);
      if (!WriteToContainedDecoder(mSignature, PNGSIGNATURESIZE, aStrategy)) {
        return;
      }
    }
  }

  // If we have a PNG, let the PNG decoder do all of the rest of the work
  if (mIsPNG && mContainedDecoder && mPos >= mImageOffset + PNGSIGNATURESIZE) {
    if (!WriteToContainedDecoder(aBuffer, aCount, aStrategy)) {
      return;
    }

    if (!HasSize() && mContainedDecoder->HasSize()) {
      PostSize(mContainedDecoder->GetImageMetadata().GetWidth(),
               mContainedDecoder->GetImageMetadata().GetHeight());
    }

    mPos += aCount;
    aBuffer += aCount;
    aCount = 0;

    // Raymond Chen says that 32bpp only are valid PNG ICOs
    // http://blogs.msdn.com/b/oldnewthing/archive/2010/10/22/10079192.aspx
    if (!IsSizeDecode() &&
        !static_cast<nsPNGDecoder*>(mContainedDecoder.get())->IsValidICO()) {
      PostDataError();
    }
    return;
  }

  // We've processed all of the icon dir entries and are within the 
  // bitmap info size
  if (!mIsPNG && mCurrIcon == mNumIcons && mPos >= mImageOffset && 
      mPos >= mImageOffset + PNGSIGNATURESIZE && 
      mPos < mImageOffset + BITMAPINFOSIZE) {

    // As we were decoding, we did not know if we had a PNG signature or the
    // start of a bitmap information header.  At this point we know we had
    // a bitmap information header and not a PNG signature, so fill the bitmap
    // information header with the data it should already have.
    memcpy(mBIHraw, mSignature, PNGSIGNATURESIZE);

    // We've found the icon.
    uint32_t toCopy = sizeof(mBIHraw) - (mPos - mImageOffset);
    if (toCopy > aCount)
      toCopy = aCount;

    memcpy(mBIHraw + (mPos - mImageOffset), aBuffer, toCopy);
    mPos += toCopy;
    aCount -= toCopy;
    aBuffer += toCopy;
  }

  // If we have a BMP inside the ICO and we have read the BIH header
  if (!mIsPNG && mPos == mImageOffset + BITMAPINFOSIZE) {

    // Make sure we have a sane value for the bitmap information header
    int32_t bihSize = ExtractBIHSizeFromBitmap(reinterpret_cast<int8_t*>(mBIHraw));
    if (bihSize != BITMAPINFOSIZE) {
      PostDataError();
      return;
    }
    // We are extracting the BPP from the BIH header as it should be trusted 
    // over the one we have from the icon header
    mBPP = ExtractBPPFromBitmap(reinterpret_cast<int8_t*>(mBIHraw));
    
    // Init the bitmap decoder which will do most of the work for us
    // It will do everything except the AND mask which isn't present in bitmaps
    // bmpDecoder is for local scope ease, it will be freed by mContainedDecoder
    nsBMPDecoder *bmpDecoder = new nsBMPDecoder(mImage);
    mContainedDecoder = bmpDecoder;
    bmpDecoder->SetUseAlphaData(true);
    mContainedDecoder->SetObserver(mObserver);
    mContainedDecoder->SetSizeDecode(IsSizeDecode());
    mContainedDecoder->InitSharedDecoder(mImageData, mImageDataLength,
                                         mColormap, mColormapSize,
                                         mCurrentFrame);

    // The ICO format when containing a BMP does not include the 14 byte
    // bitmap file header. To use the code of the BMP decoder we need to 
    // generate this header ourselves and feed it to the BMP decoder.
    int8_t bfhBuffer[BMPFILEHEADERSIZE];
    if (!FillBitmapFileHeaderBuffer(bfhBuffer)) {
      PostDataError();
      return;
    }
    if (!WriteToContainedDecoder((const char*)bfhBuffer, sizeof(bfhBuffer), aStrategy)) {
      return;
    }

    // Setup the cursor hot spot if one is present
    SetHotSpotIfCursor();

    // Fix the ICO height from the BIH.
    // Fix the height on the BIH to be /2 so our BMP decoder will understand.
    if (!FixBitmapHeight(reinterpret_cast<int8_t*>(mBIHraw))) {
      PostDataError();
      return;
    }

    // Fix the ICO width from the BIH.
    if (!FixBitmapWidth(reinterpret_cast<int8_t*>(mBIHraw))) {
      PostDataError();
      return;
    }

    // Write out the BMP's bitmap info header
    if (!WriteToContainedDecoder(mBIHraw, sizeof(mBIHraw), aStrategy)) {
      return;
    }

    PostSize(mContainedDecoder->GetImageMetadata().GetWidth(),
             mContainedDecoder->GetImageMetadata().GetHeight());

    // We have the size. If we're doing a size decode, we got what
    // we came for.
    if (IsSizeDecode())
      return;

    // Sometimes the ICO BPP header field is not filled out
    // so we should trust the contained resource over our own
    // information.
    mBPP = bmpDecoder->GetBitsPerPixel();

    // Check to make sure we have valid color settings
    uint16_t numColors = GetNumColors();
    if (numColors == (uint16_t)-1) {
      PostDataError();
      return;
    }
  }

  // If we have a BMP
  if (!mIsPNG && mContainedDecoder && mPos >= mImageOffset + BITMAPINFOSIZE) {
    uint16_t numColors = GetNumColors();
    if (numColors == (uint16_t)-1) {
      PostDataError();
      return;
    }
    // Feed the actual image data (not including headers) into the BMP decoder
    uint32_t bmpDataOffset = mDirEntry.mImageOffset + BITMAPINFOSIZE;
    uint32_t bmpDataEnd = mDirEntry.mImageOffset + BITMAPINFOSIZE + 
                          static_cast<nsBMPDecoder*>(mContainedDecoder.get())->GetCompressedImageSize() +
                          4 * numColors;

    // If we are feeding in the core image data, but we have not yet
    // reached the ICO's 'AND buffer mask'
    if (mPos >= bmpDataOffset && mPos < bmpDataEnd) {

      // Figure out how much data the BMP decoder wants
      uint32_t toFeed = bmpDataEnd - mPos;
      if (toFeed > aCount) {
        toFeed = aCount;
      }

      if (!WriteToContainedDecoder(aBuffer, toFeed, aStrategy)) {
        return;
      }

      mPos += toFeed;
      aCount -= toFeed;
      aBuffer += toFeed;
    }
  
    // If the bitmap is fully processed, treat any left over data as the ICO's
    // 'AND buffer mask' which appears after the bitmap resource.
    if (!mIsPNG && mPos >= bmpDataEnd) {
      // There may be an optional AND bit mask after the data.  This is
      // only used if the alpha data is not already set. The alpha data 
      // is used for 32bpp bitmaps as per the comment in ICODecoder.h
      // The alpha mask should be checked in all other cases.
      if (static_cast<nsBMPDecoder*>(mContainedDecoder.get())->GetBitsPerPixel() != 32 || 
          !static_cast<nsBMPDecoder*>(mContainedDecoder.get())->HasAlphaData()) {
        uint32_t rowSize = ((GetRealWidth() + 31) / 32) * 4; // + 31 to round up
        if (mPos == bmpDataEnd) {
          mPos++;
          mRowBytes = 0;
          mCurLine = GetRealHeight();
          mRow = (uint8_t*)moz_realloc(mRow, rowSize);
          if (!mRow) {
            PostDecoderError(NS_ERROR_OUT_OF_MEMORY);
            return;
          }
        }

        // Ensure memory has been allocated before decoding.
        NS_ABORT_IF_FALSE(mRow, "mRow is null");
        if (!mRow) {
          PostDataError();
          return;
        }

        while (mCurLine > 0 && aCount > 0) {
          uint32_t toCopy = std::min(rowSize - mRowBytes, aCount);
          if (toCopy) {
            memcpy(mRow + mRowBytes, aBuffer, toCopy);
            aCount -= toCopy;
            aBuffer += toCopy;
            mRowBytes += toCopy;
          }
          if (rowSize == mRowBytes) {
            mCurLine--;
            mRowBytes = 0;

            uint32_t* imageData = 
              static_cast<nsBMPDecoder*>(mContainedDecoder.get())->GetImageData();
            if (!imageData) {
              PostDataError();
              return;
            }
            uint32_t* decoded = imageData + mCurLine * GetRealWidth();
            uint32_t* decoded_end = decoded + GetRealWidth();
            uint8_t* p = mRow, *p_end = mRow + rowSize; 
            while (p < p_end) {
              uint8_t idx = *p++;
              for (uint8_t bit = 0x80; bit && decoded<decoded_end; bit >>= 1) {
                // Clear pixel completely for transparency.
                if (idx & bit) {
                  *decoded = 0;
                }
                decoded++;
              }
            }
          }
        }
      }
Exemplo n.º 10
0
void
nsWEBPDecoder::WriteInternal(const char *aBuffer, uint32_t aCount)
{
  MOZ_ASSERT(!HasError(), "Shouldn't call WriteInternal after error!");

  const uint8_t* buf = (const uint8_t*)aBuffer;
  VP8StatusCode rv = WebPIAppend(mDecoder, buf, aCount);
  if (rv == VP8_STATUS_OUT_OF_MEMORY) {
    PostDecoderError(NS_ERROR_OUT_OF_MEMORY);
    return;
  } else if (rv == VP8_STATUS_INVALID_PARAM ||
             rv == VP8_STATUS_BITSTREAM_ERROR) {
    PostDataError();
    return;
  } else if (rv == VP8_STATUS_UNSUPPORTED_FEATURE ||
             rv == VP8_STATUS_USER_ABORT) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  // Catch any remaining erroneous return value.
  if (rv != VP8_STATUS_OK && rv != VP8_STATUS_SUSPENDED) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  int lastLineRead = -1;
  int height = 0;
  int width = 0;
  int stride = 0;

  mData = WebPIDecGetRGB(mDecoder, &lastLineRead, &width, &height, &stride);

  // The only valid format for WebP decoding for both alpha and non-alpha
  // images is BGRA, where Opaque images have an A of 255.
  // Assume transparency for all images.
  // XXX: This could be compositor-optimized by doing a one-time check for
  // all-255 alpha pixels, but that might interfere with progressive
  // decoding. Probably not worth it?
  PostHasTransparency();
  
  if (lastLineRead == -1 || !mData)
    return;

  if (width <= 0 || height <= 0) {
    PostDataError();
    return;
  }

  if (!HasSize())
    PostSize(width, height);

  if (IsSizeDecode())
    return;

  if (!mImageData) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  // Transfer from mData to mImageData
  if (lastLineRead > mLastLine) {
    for (int line = mLastLine; line < lastLineRead; line++) {
      for (int pix = 0; pix < width; pix++) {
        // RGBA -> BGRA
        uint32_t DataOffset = 4 * (line * width + pix);
        mImageData[DataOffset+0] = mData[DataOffset+2];
        mImageData[DataOffset+1] = mData[DataOffset+1];
        mImageData[DataOffset+2] = mData[DataOffset+0];
        mImageData[DataOffset+3] = mData[DataOffset+3];
      }
    } 

    // Invalidate
    nsIntRect r(0, mLastLine, width, lastLineRead);
    PostInvalidation(r);
  }

  mLastLine = lastLineRead;
  return;
}
Exemplo n.º 11
0
void
nsWEBPDecoder::WriteInternal(const char *aBuffer, uint32_t aCount, DecodeStrategy)
{
  NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!");

  const uint8_t* buf = (const uint8_t*)aBuffer;
  VP8StatusCode rv = WebPIAppend(mDecoder, buf, aCount);
  if (rv == VP8_STATUS_OUT_OF_MEMORY) {
    PostDecoderError(NS_ERROR_OUT_OF_MEMORY);
    return;
  } else if (rv == VP8_STATUS_INVALID_PARAM ||
             rv == VP8_STATUS_BITSTREAM_ERROR) {
    PostDataError();
    return;
  } else if (rv == VP8_STATUS_UNSUPPORTED_FEATURE ||
             rv == VP8_STATUS_USER_ABORT) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  // Catch any remaining erroneous return value.
  if (rv != VP8_STATUS_OK && rv != VP8_STATUS_SUSPENDED) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  int lastLineRead = -1;
  int height = 0;
  int width = 0;
  int stride = 0;

  mData = WebPIDecGetRGB(mDecoder, &lastLineRead, &width, &height, &stride);

  if (lastLineRead == -1 || !mData)
    return;

  if (width <= 0 || height <= 0) {
    PostDataError();
    return;
  }

  if (!HasSize())
    PostSize(width, height);

  if (IsSizeDecode())
    return;

  uint32_t imagelength;
  // First incremental Image data chunk. Special handling required.
  if (mLastLine == 0 && lastLineRead > 0) {
    imgFrame* aFrame;
    nsresult res = mImage.EnsureFrame(0, 0, 0, width, height,
                                       gfxASurface::ImageFormatARGB32,
                                       (uint8_t**)&mImageData, &imagelength, &aFrame);
    if (NS_FAILED(res) || !mImageData) {
      PostDecoderError(NS_ERROR_FAILURE);
      return;
    }
  }

  if (!mImageData) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  if (lastLineRead > mLastLine) {
    for (int line = mLastLine; line < lastLineRead; line++) {
      uint32_t *cptr32 = (uint32_t*)(mImageData + (line * width));
      uint8_t *cptr8 = mData + (line * stride);
      for (int pix = 0; pix < width; pix++, cptr8 += 4) {
	// if((cptr8[3] != 0) && (cptr8[0] != 0) && (cptr8[1] != 0) && (cptr8[2] != 0))
	   *cptr32++ = gfxPackedPixel(cptr8[3], cptr8[0], cptr8[1], cptr8[2]);
      }
    }

    // Invalidate
    nsIntRect r(0, mLastLine, width, lastLineRead);
    PostInvalidation(r);
  }

  mLastLine = lastLineRead;
  return;
}