コード例 #1
0
ファイル: nsPNGDecoder.cpp プロジェクト: hobinjk/gecko-dev
// CreateFrame() is used for both simple and animated images
void nsPNGDecoder::CreateFrame(png_uint_32 x_offset, png_uint_32 y_offset,
                               int32_t width, int32_t height,
                               gfx::SurfaceFormat format)
{
  MOZ_ASSERT(HasSize());

  if (format == gfx::SurfaceFormat::B8G8R8A8) {
    PostHasTransparency();
  }

  // Our first full frame is automatically created by the image decoding
  // infrastructure. Just use it as long as it matches up.
  nsIntRect neededRect(x_offset, y_offset, width, height);
  nsRefPtr<imgFrame> currentFrame = GetCurrentFrame();
  if (!currentFrame->GetRect().IsEqualEdges(neededRect)) {
    if (mNumFrames == 0) {
      // We need padding on the first frame, which means that we don't draw into
      // part of the image at all. Report that as transparency.
      PostHasTransparency();
    }

    NeedNewFrame(mNumFrames, x_offset, y_offset, width, height, format);
  } else if (mNumFrames != 0) {
    NeedNewFrame(mNumFrames, x_offset, y_offset, width, height, format);
  }

  mFrameRect = neededRect;

  MOZ_LOG(GetPNGDecoderAccountingLog(), LogLevel::Debug,
         ("PNGDecoderAccounting: nsPNGDecoder::CreateFrame -- created "
          "image frame with %dx%d pixels in container %p",
          width, height,
          &mImage));

#ifdef PNG_APNG_SUPPORTED
  if (png_get_valid(mPNG, mInfo, PNG_INFO_acTL)) {
    mAnimInfo = AnimFrameInfo(mPNG, mInfo);

    if (mAnimInfo.mDispose == DisposalMethod::CLEAR) {
      // We may have to display the background under this image during
      // animation playback, so we regard it as transparent.
      PostHasTransparency();
    }
  }
#endif
}
コード例 #2
0
ファイル: Decoder.cpp プロジェクト: TheGuy82/gecko-dev
void
Decoder::CompleteDecode()
{
  // Implementation-specific finalization
  BeforeFinishInternal();
  if (!HasError()) {
    FinishInternal();
  } else {
    FinishWithErrorInternal();
  }

  // If the implementation left us mid-frame, finish that up.
  if (mInFrame && !HasError()) {
    PostFrameStop();
  }

  // If PostDecodeDone() has not been called, and this decoder wasn't aborted
  // early because of low-memory conditions or losing a race with another
  // decoder, we need to send teardown notifications (and report an error to the
  // console later).
  if (!IsMetadataDecode() && !mDecodeDone && !WasAborted()) {
    mShouldReportError = true;

    // If we only have a data error, we're usable if we have at least one
    // complete frame.
    if (!HasDecoderError() && GetCompleteFrameCount() > 0) {
      // We're usable, so do exactly what we should have when the decoder
      // completed.

      // Not writing to the entire frame may have left us transparent.
      PostHasTransparency();

      if (mInFrame) {
        PostFrameStop();
      }
      PostDecodeDone();
    } else {
      // We're not usable. Record some final progress indicating the error.
      if (!IsMetadataDecode()) {
        mProgress |= FLAG_DECODE_COMPLETE;
      }
      mProgress |= FLAG_HAS_ERROR;
    }
  }

  if (mDecodeDone && !IsMetadataDecode()) {
    MOZ_ASSERT(HasError() || mCurrentFrame, "Should have an error or a frame");

    // If this image wasn't animated and isn't a transient image, mark its frame
    // as optimizable. We don't support optimizing animated images and
    // optimizing transient images isn't worth it.
    if (!HasAnimation() &&
        !(mDecoderFlags & DecoderFlags::IMAGE_IS_TRANSIENT) &&
        mCurrentFrame) {
      mCurrentFrame->SetOptimizable();
    }
  }
}
コード例 #3
0
ファイル: nsGIFDecoder2.cpp プロジェクト: LordJZ/gecko-dev
//******************************************************************************
void
nsGIFDecoder2::BeginImageFrame(uint16_t aDepth)
{
  MOZ_ASSERT(HasSize());

  gfx::SurfaceFormat format;
  if (mGIFStruct.is_transparent) {
    format = gfx::SurfaceFormat::B8G8R8A8;
    PostHasTransparency();
  } else {
    format = gfx::SurfaceFormat::B8G8R8X8;
  }

  // Use correct format, RGB for first frame, PAL for following frames
  // and include transparency to allow for optimization of opaque images
  if (mGIFStruct.images_decoded) {
    // Image data is stored with original depth and palette
    NeedNewFrame(mGIFStruct.images_decoded, mGIFStruct.x_offset,
                 mGIFStruct.y_offset, mGIFStruct.width, mGIFStruct.height,
                 format, aDepth);
  } else {
    nsRefPtr<imgFrame> currentFrame = GetCurrentFrame();

    // Our first full frame is automatically created by the image decoding
    // infrastructure. Just use it as long as it matches up.
    if (!currentFrame->GetRect().IsEqualEdges(nsIntRect(mGIFStruct.x_offset,
                                                        mGIFStruct.y_offset,
                                                        mGIFStruct.width,
                                                        mGIFStruct.height))) {

      // We need padding on the first frame, which means that we don't draw into
      // part of the image at all. Report that as transparency.
      PostHasTransparency();

      // Regardless of depth of input, image is decoded into 24bit RGB
      NeedNewFrame(mGIFStruct.images_decoded, mGIFStruct.x_offset,
                   mGIFStruct.y_offset, mGIFStruct.width, mGIFStruct.height,
                   format);
    }
  }

  mCurrentFrameIndex = mGIFStruct.images_decoded;
}
コード例 #4
0
ファイル: nsGIFDecoder2.cpp プロジェクト: hoosteeno/gecko-dev
//******************************************************************************
nsresult
nsGIFDecoder2::BeginImageFrame(uint16_t aDepth)
{
  MOZ_ASSERT(HasSize());

  gfx::SurfaceFormat format;
  if (mGIFStruct.is_transparent) {
    format = gfx::SurfaceFormat::B8G8R8A8;
    PostHasTransparency();
  } else {
    format = gfx::SurfaceFormat::B8G8R8X8;
  }

  nsIntRect frameRect(mGIFStruct.x_offset, mGIFStruct.y_offset,
                      mGIFStruct.width, mGIFStruct.height);

  // Use correct format, RGB for first frame, PAL for following frames
  // and include transparency to allow for optimization of opaque images
  nsresult rv = NS_OK;
  if (mGIFStruct.images_decoded) {
    // Image data is stored with original depth and palette.
    rv = AllocateFrame(mGIFStruct.images_decoded, GetSize(),
                       frameRect, format, aDepth);
  } else {
    if (!nsIntRect(nsIntPoint(), GetSize()).IsEqualEdges(frameRect)) {
      // We need padding on the first frame, which means that we don't draw into
      // part of the image at all. Report that as transparency.
      PostHasTransparency();
    }

    // Regardless of depth of input, the first frame is decoded into 24bit RGB.
    rv = AllocateFrame(mGIFStruct.images_decoded, GetSize(),
                       frameRect, format);
  }

  mCurrentFrameIndex = mGIFStruct.images_decoded;

  return rv;
}
コード例 #5
0
LexerTransition<nsIconDecoder::State>
nsIconDecoder::ReadHeader(const char* aData)
{
  // Grab the width and height.
  uint8_t width  = uint8_t(aData[0]);
  uint8_t height = uint8_t(aData[1]);

  // The input is 32bpp, so we expect 4 bytes of data per pixel.
  mBytesPerRow = width * 4;

  // Post our size to the superclass.
  PostSize(width, height);

  // Icons have alpha.
  PostHasTransparency();

  // If we're doing a metadata decode, we're done.
  if (IsMetadataDecode()) {
    return Transition::TerminateSuccess();
  }

  MOZ_ASSERT(!mImageData, "Already have a buffer allocated?");
  Maybe<SurfacePipe> pipe =
    SurfacePipeFactory::CreateSurfacePipe(this, 0, Size(), OutputSize(),
                                          FullFrame(), SurfaceFormat::B8G8R8A8,
                                          SurfacePipeFlags());
  if (!pipe) {
    return Transition::TerminateFailure();
  }

  mPipe = Move(*pipe);

  MOZ_ASSERT(mImageData, "Should have a buffer now");

  return Transition::To(State::ROW_OF_PIXELS, mBytesPerRow);
}
コード例 #6
0
ファイル: nsICODecoder.cpp プロジェクト: paulmadore/luckyde
void
nsICODecoder::WriteInternal(const char* aBuffer, uint32_t aCount)
{
  MOZ_ASSERT(!HasError(), "Shouldn't call WriteInternal after error!");
  MOZ_ASSERT(aBuffer);
  MOZ_ASSERT(aCount > 0);

  while (aCount && (mPos < ICONCOUNTOFFSET)) { // Skip to the # of icons.
    if (mPos == 2) { // if the third byte is 1: This is an icon, 2: a cursor
      if ((*aBuffer != 1) && (*aBuffer != 2)) {
        PostDataError();
        return;
      }
      mIsCursor = (*aBuffer == 2);
    }
    mPos++; aBuffer++; aCount--;
  }

  if (mPos == ICONCOUNTOFFSET && aCount >= 2) {
    mNumIcons =
      LittleEndian::readUint16(reinterpret_cast<const uint16_t*>(aBuffer));
    aBuffer += 2;
    mPos += 2;
    aCount -= 2;
  }

  if (mNumIcons == 0) {
    return; // Nothing to do.
  }

  uint16_t colorDepth = 0;

  // If we didn't get a #-moz-resolution, default to PREFICONSIZE.
  if (mResolution.width == 0 && mResolution.height == 0) {
    mResolution.SizeTo(PREFICONSIZE, PREFICONSIZE);
  }

  // A measure of the difference in size between the entry we've found
  // and the requested size. We will choose the smallest image that is
  // >= requested size (i.e. we assume it's better to downscale a larger
  // icon than to upscale a smaller one).
  int32_t diff = INT_MIN;

  // Loop through each entry's dir entry
  while (mCurrIcon < mNumIcons) {
    if (mPos >= DIRENTRYOFFSET + (mCurrIcon * sizeof(mDirEntryArray)) &&
        mPos < DIRENTRYOFFSET + ((mCurrIcon + 1) * sizeof(mDirEntryArray))) {
      uint32_t toCopy = sizeof(mDirEntryArray) -
                        (mPos - DIRENTRYOFFSET - mCurrIcon *
                         sizeof(mDirEntryArray));
      if (toCopy > aCount) {
        toCopy = aCount;
      }
      memcpy(mDirEntryArray + sizeof(mDirEntryArray) - toCopy, aBuffer, toCopy);
      mPos += toCopy;
      aCount -= toCopy;
      aBuffer += toCopy;
    }
    if (aCount == 0) {
      return; // Need more data
    }

    IconDirEntry e;
    if (mPos == (DIRENTRYOFFSET + ICODIRENTRYSIZE) +
                (mCurrIcon * sizeof(mDirEntryArray))) {
      mCurrIcon++;
      ProcessDirEntry(e);
      // We can't use GetRealWidth and GetRealHeight here because those operate
      // on mDirEntry, here we are going through each item in the directory.
      // Calculate the delta between this image's size and the desired size,
      // so we can see if it is better than our current-best option.
      // In the case of several equally-good images, we use the last one.
      int32_t delta = (e.mWidth == 0 ? 256 : e.mWidth) - mResolution.width +
                      (e.mHeight == 0 ? 256 : e.mHeight) - mResolution.height;
      if (e.mBitCount >= colorDepth &&
          ((diff < 0 && delta >= diff) || (delta >= 0 && delta <= diff))) {
        diff = delta;
        mImageOffset = e.mImageOffset;

        // ensure mImageOffset is >= size of the direntry headers (bug #245631)
        uint32_t minImageOffset = DIRENTRYOFFSET +
                                  mNumIcons * sizeof(mDirEntryArray);
        if (mImageOffset < minImageOffset) {
          PostDataError();
          return;
        }

        colorDepth = e.mBitCount;
        memcpy(&mDirEntry, &e, sizeof(IconDirEntry));
      }
    }
  }

  if (mPos < mImageOffset) {
    // Skip to (or at least towards) the desired image offset
    uint32_t toSkip = mImageOffset - mPos;
    if (toSkip > aCount) {
      toSkip = aCount;
    }

    mPos    += toSkip;
    aBuffer += toSkip;
    aCount  -= toSkip;
  }

  // If we are within the first PNGSIGNATURESIZE bytes of the image data,
  // then we have either a BMP or a PNG.  We use the first PNGSIGNATURESIZE
  // bytes to determine which one we have.
  if (mCurrIcon == mNumIcons && mPos >= mImageOffset &&
      mPos < mImageOffset + PNGSIGNATURESIZE) {
    uint32_t toCopy = PNGSIGNATURESIZE - (mPos - mImageOffset);
    if (toCopy > aCount) {
      toCopy = aCount;
    }

    memcpy(mSignature + (mPos - mImageOffset), aBuffer, toCopy);
    mPos += toCopy;
    aCount -= toCopy;
    aBuffer += toCopy;

    mIsPNG = !memcmp(mSignature, nsPNGDecoder::pngSignatureBytes,
                     PNGSIGNATURESIZE);
    if (mIsPNG) {
      mContainedDecoder = new nsPNGDecoder(mImage);
      mContainedDecoder->SetMetadataDecode(IsMetadataDecode());
      mContainedDecoder->SetSendPartialInvalidations(mSendPartialInvalidations);
      if (mFirstFrameDecode) {
        mContainedDecoder->SetIsFirstFrameDecode();
      }
      mContainedDecoder->Init();
      if (!WriteToContainedDecoder(mSignature, PNGSIGNATURESIZE)) {
        return;
      }
    }
  }

  // If we have a PNG, let the PNG decoder do all of the rest of the work
  if (mIsPNG && mContainedDecoder && mPos >= mImageOffset + PNGSIGNATURESIZE) {
    if (!WriteToContainedDecoder(aBuffer, aCount)) {
      return;
    }

    if (!HasSize() && mContainedDecoder->HasSize()) {
      PostSize(mContainedDecoder->GetImageMetadata().GetWidth(),
               mContainedDecoder->GetImageMetadata().GetHeight());
    }

    mPos += aCount;
    aBuffer += aCount;
    aCount = 0;

    // Raymond Chen says that 32bpp only are valid PNG ICOs
    // http://blogs.msdn.com/b/oldnewthing/archive/2010/10/22/10079192.aspx
    if (!IsMetadataDecode() &&
        !static_cast<nsPNGDecoder*>(mContainedDecoder.get())->IsValidICO()) {
      PostDataError();
    }
    return;
  }

  // We've processed all of the icon dir entries and are within the
  // bitmap info size
  if (!mIsPNG && mCurrIcon == mNumIcons && mPos >= mImageOffset &&
      mPos >= mImageOffset + PNGSIGNATURESIZE &&
      mPos < mImageOffset + BITMAPINFOSIZE) {

    // As we were decoding, we did not know if we had a PNG signature or the
    // start of a bitmap information header.  At this point we know we had
    // a bitmap information header and not a PNG signature, so fill the bitmap
    // information header with the data it should already have.
    memcpy(mBIHraw, mSignature, PNGSIGNATURESIZE);

    // We've found the icon.
    uint32_t toCopy = sizeof(mBIHraw) - (mPos - mImageOffset);
    if (toCopy > aCount) {
      toCopy = aCount;
    }

    memcpy(mBIHraw + (mPos - mImageOffset), aBuffer, toCopy);
    mPos += toCopy;
    aCount -= toCopy;
    aBuffer += toCopy;
  }

  // If we have a BMP inside the ICO and we have read the BIH header
  if (!mIsPNG && mPos == mImageOffset + BITMAPINFOSIZE) {

    // Make sure we have a sane value for the bitmap information header
    int32_t bihSize = ExtractBIHSizeFromBitmap(reinterpret_cast<int8_t*>
                                               (mBIHraw));
    if (bihSize != BITMAPINFOSIZE) {
      PostDataError();
      return;
    }
    // We are extracting the BPP from the BIH header as it should be trusted
    // over the one we have from the icon header
    mBPP = ExtractBPPFromBitmap(reinterpret_cast<int8_t*>(mBIHraw));

    // Init the bitmap decoder which will do most of the work for us
    // It will do everything except the AND mask which isn't present in bitmaps
    // bmpDecoder is for local scope ease, it will be freed by mContainedDecoder
    nsBMPDecoder* bmpDecoder = new nsBMPDecoder(mImage);
    mContainedDecoder = bmpDecoder;
    bmpDecoder->SetUseAlphaData(true);
    mContainedDecoder->SetMetadataDecode(IsMetadataDecode());
    mContainedDecoder->SetSendPartialInvalidations(mSendPartialInvalidations);
    if (mFirstFrameDecode) {
      mContainedDecoder->SetIsFirstFrameDecode();
    }
    mContainedDecoder->Init();

    // The ICO format when containing a BMP does not include the 14 byte
    // bitmap file header. To use the code of the BMP decoder we need to
    // generate this header ourselves and feed it to the BMP decoder.
    int8_t bfhBuffer[BMPFILEHEADERSIZE];
    if (!FillBitmapFileHeaderBuffer(bfhBuffer)) {
      PostDataError();
      return;
    }
    if (!WriteToContainedDecoder((const char*)bfhBuffer, sizeof(bfhBuffer))) {
      return;
    }

    // Setup the cursor hot spot if one is present
    SetHotSpotIfCursor();

    // Fix the ICO height from the BIH.
    // Fix the height on the BIH to be /2 so our BMP decoder will understand.
    if (!FixBitmapHeight(reinterpret_cast<int8_t*>(mBIHraw))) {
      PostDataError();
      return;
    }

    // Fix the ICO width from the BIH.
    if (!FixBitmapWidth(reinterpret_cast<int8_t*>(mBIHraw))) {
      PostDataError();
      return;
    }

    // Write out the BMP's bitmap info header
    if (!WriteToContainedDecoder(mBIHraw, sizeof(mBIHraw))) {
      return;
    }

    PostSize(mContainedDecoder->GetImageMetadata().GetWidth(),
             mContainedDecoder->GetImageMetadata().GetHeight());

    // We have the size. If we're doing a metadata decode, we're done.
    if (IsMetadataDecode()) {
      return;
    }

    // Sometimes the ICO BPP header field is not filled out
    // so we should trust the contained resource over our own
    // information.
    mBPP = bmpDecoder->GetBitsPerPixel();

    // Check to make sure we have valid color settings
    uint16_t numColors = GetNumColors();
    if (numColors == (uint16_t)-1) {
      PostDataError();
      return;
    }
  }

  // If we have a BMP
  if (!mIsPNG && mContainedDecoder && mPos >= mImageOffset + BITMAPINFOSIZE) {
    uint16_t numColors = GetNumColors();
    if (numColors == (uint16_t)-1) {
      PostDataError();
      return;
    }
    // Feed the actual image data (not including headers) into the BMP decoder
    uint32_t bmpDataOffset = mDirEntry.mImageOffset + BITMAPINFOSIZE;
    uint32_t bmpDataEnd = mDirEntry.mImageOffset + BITMAPINFOSIZE +
                          static_cast<nsBMPDecoder*>(mContainedDecoder.get())->
                            GetCompressedImageSize() +
                          4 * numColors;

    // If we are feeding in the core image data, but we have not yet
    // reached the ICO's 'AND buffer mask'
    if (mPos >= bmpDataOffset && mPos < bmpDataEnd) {

      // Figure out how much data the BMP decoder wants
      uint32_t toFeed = bmpDataEnd - mPos;
      if (toFeed > aCount) {
        toFeed = aCount;
      }

      if (!WriteToContainedDecoder(aBuffer, toFeed)) {
        return;
      }

      mPos += toFeed;
      aCount -= toFeed;
      aBuffer += toFeed;
    }

    // If the bitmap is fully processed, treat any left over data as the ICO's
    // 'AND buffer mask' which appears after the bitmap resource.
    if (!mIsPNG && mPos >= bmpDataEnd) {
      // There may be an optional AND bit mask after the data.  This is
      // only used if the alpha data is not already set. The alpha data
      // is used for 32bpp bitmaps as per the comment in ICODecoder.h
      // The alpha mask should be checked in all other cases.
      if (static_cast<nsBMPDecoder*>(mContainedDecoder.get())->
            GetBitsPerPixel() != 32 ||
          !static_cast<nsBMPDecoder*>(mContainedDecoder.get())->
            HasAlphaData()) {
        uint32_t rowSize = ((GetRealWidth() + 31) / 32) * 4; // + 31 to round up
        if (mPos == bmpDataEnd) {
          mPos++;
          mRowBytes = 0;
          mCurLine = GetRealHeight();
          mRow = (uint8_t*)realloc(mRow, rowSize);
          if (!mRow) {
            PostDecoderError(NS_ERROR_OUT_OF_MEMORY);
            return;
          }
        }

        // Ensure memory has been allocated before decoding.
        MOZ_ASSERT(mRow, "mRow is null");
        if (!mRow) {
          PostDataError();
          return;
        }

        uint8_t sawTransparency = 0;

        while (mCurLine > 0 && aCount > 0) {
          uint32_t toCopy = std::min(rowSize - mRowBytes, aCount);
          if (toCopy) {
            memcpy(mRow + mRowBytes, aBuffer, toCopy);
            aCount -= toCopy;
            aBuffer += toCopy;
            mRowBytes += toCopy;
          }
          if (rowSize == mRowBytes) {
            mCurLine--;
            mRowBytes = 0;

            uint32_t* imageData =
              static_cast<nsBMPDecoder*>(mContainedDecoder.get())->
                                           GetImageData();
            if (!imageData) {
              PostDataError();
              return;
            }
            uint32_t* decoded = imageData + mCurLine * GetRealWidth();
            uint32_t* decoded_end = decoded + GetRealWidth();
            uint8_t* p = mRow;
            uint8_t* p_end = mRow + rowSize;
            while (p < p_end) {
              uint8_t idx = *p++;
              sawTransparency |= idx;
              for (uint8_t bit = 0x80; bit && decoded<decoded_end; bit >>= 1) {
                // Clear pixel completely for transparency.
                if (idx & bit) {
                  *decoded = 0;
                }
                decoded++;
              }
            }
          }
        }

        // If any bits are set in sawTransparency, then we know at least one
        // pixel was transparent.
        if (sawTransparency) {
            PostHasTransparency();
        }
      }
コード例 #7
0
void
nsIconDecoder::WriteInternal(const char* aBuffer, uint32_t aCount,
                             DecodeStrategy)
{
  NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!");

  // We put this here to avoid errors about crossing initialization with case
  // jumps on linux.
  uint32_t bytesToRead = 0;

  // Loop until the input data is gone
  while (aCount > 0) {
    switch (mState) {
      case iconStateStart:

        // Grab the width
        mWidth = (uint8_t)*aBuffer;

        // Book Keeping
        aBuffer++;
        aCount--;
        mState = iconStateHaveHeight;
        break;

      case iconStateHaveHeight:

        // Grab the Height
        mHeight = (uint8_t)*aBuffer;

        // Post our size to the superclass
        PostSize(mWidth, mHeight);

        PostHasTransparency();

        if (HasError()) {
          // Setting the size led to an error.
          mState = iconStateFinished;
          return;
        }

        // If We're doing a size decode, we're done
        if (IsSizeDecode()) {
          mState = iconStateFinished;
          break;
        }

        if (!mImageData) {
          PostDecoderError(NS_ERROR_OUT_OF_MEMORY);
          return;
        }

        // Book Keeping
        aBuffer++;
        aCount--;
        mState = iconStateReadPixels;
        break;

      case iconStateReadPixels: {

        // How many bytes are we reading?
        bytesToRead = std::min(aCount, mImageDataLength - mPixBytesRead);

        // Copy the bytes
        memcpy(mImageData + mPixBytesRead, aBuffer, bytesToRead);

        // Performance isn't critical here, so our update rectangle is
        // always the full icon
        nsIntRect r(0, 0, mWidth, mHeight);

        // Invalidate
        PostInvalidation(r);

        // Book Keeping
        aBuffer += bytesToRead;
        aCount -= bytesToRead;
        mPixBytesRead += bytesToRead;

        // If we've got all the pixel bytes, we're finished
        if (mPixBytesRead == mImageDataLength) {
          PostFrameStop();
          PostDecodeDone();
          mState = iconStateFinished;
        }
        break;
      }

      case iconStateFinished:

        // Consume all excess data silently
        aCount = 0;

        break;
    }
  }
}
コード例 #8
0
ファイル: nsGIFDecoder2.cpp プロジェクト: hoosteeno/gecko-dev
void
nsGIFDecoder2::WriteInternal(const char* aBuffer, uint32_t aCount)
{
  MOZ_ASSERT(!HasError(), "Shouldn't call WriteInternal after error!");

  // These variables changed names; renaming would make a much bigger patch :(
  const uint8_t* buf = (const uint8_t*)aBuffer;
  uint32_t len = aCount;

  const uint8_t* q = buf;

  // Add what we have sofar to the block
  // If previous call to me left something in the hold first complete current
  // block, or if we are filling the colormaps, first complete the colormap
  uint8_t* p =
    (mGIFStruct.state ==
      gif_global_colormap) ? (uint8_t*) mGIFStruct.global_colormap :
        (mGIFStruct.state == gif_image_colormap) ? (uint8_t*) mColormap :
          (mGIFStruct.bytes_in_hold) ? mGIFStruct.hold : nullptr;

  if (len == 0 && buf == nullptr) {
    // We've just gotten the frame we asked for. Time to use the data we
    // stashed away.
    len = mGIFStruct.bytes_in_hold;
    q = buf = p;
  } else if (p) {
    // Add what we have sofar to the block
    uint32_t l = std::min(len, mGIFStruct.bytes_to_consume);
    memcpy(p+mGIFStruct.bytes_in_hold, buf, l);

    if (l < mGIFStruct.bytes_to_consume) {
      // Not enough in 'buf' to complete current block, get more
      mGIFStruct.bytes_in_hold += l;
      mGIFStruct.bytes_to_consume -= l;
      return;
    }
    // Point 'q' to complete block in hold (or in colormap)
    q = p;
  }

  // Invariant:
  //    'q' is start of current to be processed block (hold, colormap or buf)
  //    'bytes_to_consume' is number of bytes to consume from 'buf'
  //    'buf' points to the bytes to be consumed from the input buffer
  //    'len' is number of bytes left in input buffer from position 'buf'.
  //    At entrance of the for loop will 'buf' will be moved 'bytes_to_consume'
  //    to point to next buffer, 'len' is adjusted accordingly.
  //    So that next round in for loop, q gets pointed to the next buffer.

  for (;len >= mGIFStruct.bytes_to_consume; q=buf, mGIFStruct.bytes_in_hold = 0)
  {
    // Eat the current block from the buffer, q keeps pointed at current block
    buf += mGIFStruct.bytes_to_consume;
    len -= mGIFStruct.bytes_to_consume;

    switch (mGIFStruct.state) {
    case gif_lzw:
      if (!DoLzw(q)) {
        mGIFStruct.state = gif_error;
        break;
      }
      GETN(1, gif_sub_block);
      break;

    case gif_lzw_start: {
      // Make sure the transparent pixel is transparent in the colormap
      if (mGIFStruct.is_transparent) {
        // Save old value so we can restore it later
        if (mColormap == mGIFStruct.global_colormap) {
            mOldColor = mColormap[mGIFStruct.tpixel];
        }
        mColormap[mGIFStruct.tpixel] = 0;
      }

      // Initialize LZW parser/decoder
      mGIFStruct.datasize = *q;
      const int clear_code = ClearCode();
      if (mGIFStruct.datasize > MAX_LZW_BITS ||
          clear_code >= MAX_BITS) {
        mGIFStruct.state = gif_error;
        break;
      }

      mGIFStruct.avail = clear_code + 2;
      mGIFStruct.oldcode = -1;
      mGIFStruct.codesize = mGIFStruct.datasize + 1;
      mGIFStruct.codemask = (1 << mGIFStruct.codesize) - 1;
      mGIFStruct.datum = mGIFStruct.bits = 0;

      // init the tables
      for (int i = 0; i < clear_code; i++) {
        mGIFStruct.suffix[i] = i;
      }

      mGIFStruct.stackp = mGIFStruct.stack;

      GETN(1, gif_sub_block);
    }
    break;

    // All GIF files begin with "GIF87a" or "GIF89a"
    case gif_type:
      if (!strncmp((char*)q, "GIF89a", 6)) {
        mGIFStruct.version = 89;
      } else if (!strncmp((char*)q, "GIF87a", 6)) {
        mGIFStruct.version = 87;
      } else {
        mGIFStruct.state = gif_error;
        break;
      }
      GETN(7, gif_global_header);
      break;

    case gif_global_header:
      // This is the height and width of the "screen" or
      // frame into which images are rendered.  The
      // individual images can be smaller than the
      // screen size and located with an origin anywhere
      // within the screen.

      mGIFStruct.screen_width = GETINT16(q);
      mGIFStruct.screen_height = GETINT16(q + 2);
      mGIFStruct.global_colormap_depth = (q[4]&0x07) + 1;

      if (IsSizeDecode()) {
        MOZ_ASSERT(!mGIFOpen, "Gif should not be open at this point");
        PostSize(mGIFStruct.screen_width, mGIFStruct.screen_height);
        return;
      }

      // screen_bgcolor is not used
      //mGIFStruct.screen_bgcolor = q[5];
      // q[6] = Pixel Aspect Ratio
      //   Not used
      //   float aspect = (float)((q[6] + 15) / 64.0);

      if (q[4] & 0x80) {
        // Get the global colormap
        const uint32_t size = (3 << mGIFStruct.global_colormap_depth);
        if (len < size) {
          // Use 'hold' pattern to get the global colormap
          GETN(size, gif_global_colormap);
          break;
        }
        // Copy everything, go to colormap state to do CMS correction
        memcpy(mGIFStruct.global_colormap, buf, size);
        buf += size;
        len -= size;
        GETN(0, gif_global_colormap);
        break;
      }

      GETN(1, gif_image_start);
      break;

    case gif_global_colormap:
      // Everything is already copied into global_colormap
      // Convert into Cairo colors including CMS transformation
      ConvertColormap(mGIFStruct.global_colormap,
                      1<<mGIFStruct.global_colormap_depth);
      GETN(1, gif_image_start);
      break;

    case gif_image_start:
      switch (*q) {
        case GIF_TRAILER:
          mGIFStruct.state = gif_done;
          break;

        case GIF_EXTENSION_INTRODUCER:
          GETN(2, gif_extension);
          break;

        case GIF_IMAGE_SEPARATOR:
          GETN(9, gif_image_header);
          break;

        default:
          // If we get anything other than GIF_IMAGE_SEPARATOR,
          // GIF_EXTENSION_INTRODUCER, or GIF_TRAILER, there is extraneous data
          // between blocks. The GIF87a spec tells us to keep reading
          // until we find an image separator, but GIF89a says such
          // a file is corrupt. We follow GIF89a and bail out.
          if (mGIFStruct.images_decoded > 0) {
            // The file is corrupt, but one or more images have
            // been decoded correctly. In this case, we proceed
            // as if the file were correctly terminated and set
            // the state to gif_done, so the GIF will display.
            mGIFStruct.state = gif_done;
          } else {
            // No images decoded, there is nothing to display.
            mGIFStruct.state = gif_error;
          }
      }
      break;

    case gif_extension:
      mGIFStruct.bytes_to_consume = q[1];
      if (mGIFStruct.bytes_to_consume) {
        switch (*q) {
        case GIF_GRAPHIC_CONTROL_LABEL:
          // The GIF spec mandates that the GIFControlExtension header block
          // length is 4 bytes, and the parser for this block reads 4 bytes,
          // so we must enforce that the buffer contains at least this many
          // bytes. If the GIF specifies a different length, we allow that, so
          // long as it's larger; the additional data will simply be ignored.
          mGIFStruct.state = gif_control_extension;
          mGIFStruct.bytes_to_consume =
            std::max(mGIFStruct.bytes_to_consume, 4u);
          break;

        // The GIF spec also specifies the lengths of the following two
        // extensions' headers (as 12 and 11 bytes, respectively). Because
        // we ignore the plain text extension entirely and sanity-check the
        // actual length of the application extension header before reading it,
        // we allow GIFs to deviate from these values in either direction. This
        // is important for real-world compatibility, as GIFs in the wild exist
        // with application extension headers that are both shorter and longer
        // than 11 bytes.
        case GIF_APPLICATION_EXTENSION_LABEL:
          mGIFStruct.state = gif_application_extension;
          break;

        case GIF_PLAIN_TEXT_LABEL:
          mGIFStruct.state = gif_skip_block;
          break;

        case GIF_COMMENT_LABEL:
          mGIFStruct.state = gif_consume_comment;
          break;

        default:
          mGIFStruct.state = gif_skip_block;
        }
      } else {
        GETN(1, gif_image_start);
      }
      break;

    case gif_consume_block:
      if (!*q) {
        GETN(1, gif_image_start);
      } else {
        GETN(*q, gif_skip_block);
      }
      break;

    case gif_skip_block:
      GETN(1, gif_consume_block);
      break;

    case gif_control_extension:
      mGIFStruct.is_transparent = *q & 0x1;
      mGIFStruct.tpixel = q[3];
      mGIFStruct.disposal_method = ((*q) >> 2) & 0x7;

      if (mGIFStruct.disposal_method == 4) {
        // Some specs say 3rd bit (value 4), other specs say value 3.
        // Let's choose 3 (the more popular).
        mGIFStruct.disposal_method = 3;
      } else if (mGIFStruct.disposal_method > 4) {
        // This GIF is using a disposal method which is undefined in the spec.
        // Treat it as DisposalMethod::NOT_SPECIFIED.
        mGIFStruct.disposal_method = 0;
      }

      {
        DisposalMethod method = DisposalMethod(mGIFStruct.disposal_method);
        if (method == DisposalMethod::CLEAR_ALL ||
            method == DisposalMethod::CLEAR) {
          // We may have to display the background under this image during
          // animation playback, so we regard it as transparent.
          PostHasTransparency();
        }
      }

      mGIFStruct.delay_time = GETINT16(q + 1) * 10;
      GETN(1, gif_consume_block);
      break;

    case gif_comment_extension:
      if (*q) {
        GETN(*q, gif_consume_comment);
      } else {
        GETN(1, gif_image_start);
      }
      break;

    case gif_consume_comment:
      GETN(1, gif_comment_extension);
      break;

    case gif_application_extension:
      // Check for netscape application extension
      if (mGIFStruct.bytes_to_consume == 11 &&
          (!strncmp((char*)q, "NETSCAPE2.0", 11) ||
           !strncmp((char*)q, "ANIMEXTS1.0", 11))) {
        GETN(1, gif_netscape_extension_block);
      } else {
        GETN(1, gif_consume_block);
      }
      break;

    // Netscape-specific GIF extension: animation looping
    case gif_netscape_extension_block:
      if (*q) {
        // We might need to consume 3 bytes in
        // gif_consume_netscape_extension, so make sure we have at least that.
        GETN(std::max(3, static_cast<int>(*q)), gif_consume_netscape_extension);
      } else {
        GETN(1, gif_image_start);
      }
      break;

    // Parse netscape-specific application extensions
    case gif_consume_netscape_extension:
      switch (q[0] & 7) {
        case 1:
          // Loop entire animation specified # of times.  Only read the
          // loop count during the first iteration.
          mGIFStruct.loop_count = GETINT16(q + 1);
          GETN(1, gif_netscape_extension_block);
          break;

        case 2:
          // Wait for specified # of bytes to enter buffer

          // Don't do this, this extension doesn't exist (isn't used at all)
          // and doesn't do anything, as our streaming/buffering takes care
          // of it all...
          // See: http://semmix.pl/color/exgraf/eeg24.htm
          GETN(1, gif_netscape_extension_block);
          break;

        default:
          // 0,3-7 are yet to be defined netscape extension codes
          mGIFStruct.state = gif_error;
      }
      break;

    case gif_image_header: {
      // Get image offsets, with respect to the screen origin
      mGIFStruct.x_offset = GETINT16(q);
      mGIFStruct.y_offset = GETINT16(q + 2);

      // Get image width and height.
      mGIFStruct.width  = GETINT16(q + 4);
      mGIFStruct.height = GETINT16(q + 6);

      if (!mGIFStruct.images_decoded) {
        // Work around broken GIF files where the logical screen
        // size has weird width or height.  We assume that GIF87a
        // files don't contain animations.
        if ((mGIFStruct.screen_height < mGIFStruct.height) ||
            (mGIFStruct.screen_width < mGIFStruct.width) ||
            (mGIFStruct.version == 87)) {
          mGIFStruct.screen_height = mGIFStruct.height;
          mGIFStruct.screen_width = mGIFStruct.width;
          mGIFStruct.x_offset = 0;
          mGIFStruct.y_offset = 0;
        }
        // Create the image container with the right size.
        BeginGIF();
        if (HasError()) {
          // Setting the size led to an error.
          mGIFStruct.state = gif_error;
          return;
        }

        // If we were doing a size decode, we're done
        if (IsSizeDecode()) {
          return;
        }
      }

      // Work around more broken GIF files that have zero image width or height
      if (!mGIFStruct.height || !mGIFStruct.width) {
        mGIFStruct.height = mGIFStruct.screen_height;
        mGIFStruct.width = mGIFStruct.screen_width;
        if (!mGIFStruct.height || !mGIFStruct.width) {
          mGIFStruct.state = gif_error;
          break;
        }
      }

      // Depth of colors is determined by colormap
      // (q[8] & 0x80) indicates local colormap
      // bits per pixel is (q[8]&0x07 + 1) when local colormap is set
      uint32_t depth = mGIFStruct.global_colormap_depth;
      if (q[8] & 0x80) {
        depth = (q[8]&0x07) + 1;
      }
      uint32_t realDepth = depth;
      while (mGIFStruct.tpixel >= (1 << realDepth) && (realDepth < 8)) {
        realDepth++;
      }
      // Mask to limit the color values within the colormap
      mColorMask = 0xFF >> (8 - realDepth);

      if (NS_FAILED(BeginImageFrame(realDepth))) {
        mGIFStruct.state = gif_error;
        return;
      }

      // FALL THROUGH
    }

    case gif_image_header_continue: {
      // While decoders can reuse frames, we unconditionally increment
      // mGIFStruct.images_decoded when we're done with a frame, so we both can
      // and need to zero out the colormap and image data after every new frame.
      memset(mImageData, 0, mImageDataLength);
      if (mColormap) {
        memset(mColormap, 0, mColormapSize);
      }

      if (!mGIFStruct.images_decoded) {
        // Send a onetime invalidation for the first frame if it has a y-axis
        // offset. Otherwise, the area may never be refreshed and the
        // placeholder will remain on the screen. (Bug 37589)
        if (mGIFStruct.y_offset > 0) {
          nsIntRect r(0, 0, mGIFStruct.screen_width, mGIFStruct.y_offset);
          PostInvalidation(r);
        }
      }

      if (q[8] & 0x40) {
        mGIFStruct.interlaced = true;
        mGIFStruct.ipass = 1;
      } else {
        mGIFStruct.interlaced = false;
        mGIFStruct.ipass = 0;
      }

      // Only apply the Haeberli display hack on the first frame
      mGIFStruct.progressive_display = (mGIFStruct.images_decoded == 0);

      // Clear state from last image
      mGIFStruct.irow = 0;
      mGIFStruct.rows_remaining = mGIFStruct.height;
      mGIFStruct.rowp = mImageData;

      // Depth of colors is determined by colormap
      // (q[8] & 0x80) indicates local colormap
      // bits per pixel is (q[8]&0x07 + 1) when local colormap is set
      uint32_t depth = mGIFStruct.global_colormap_depth;
      if (q[8] & 0x80) {
        depth = (q[8]&0x07) + 1;
      }
      uint32_t realDepth = depth;
      while (mGIFStruct.tpixel >= (1 << realDepth) && (realDepth < 8)) {
        realDepth++;
      }
      // has a local colormap?
      if (q[8] & 0x80) {
        mGIFStruct.local_colormap_size = 1 << depth;
        if (!mGIFStruct.images_decoded) {
          // First frame has local colormap, allocate space for it
          // as the image frame doesn't have its own palette
          mColormapSize = sizeof(uint32_t) << realDepth;
          if (!mGIFStruct.local_colormap) {
            mGIFStruct.local_colormap = (uint32_t*)moz_xmalloc(mColormapSize);
          }
          mColormap = mGIFStruct.local_colormap;
        }
        const uint32_t size = 3 << depth;
        if (mColormapSize > size) {
          // Clear the notfilled part of the colormap
          memset(((uint8_t*)mColormap) + size, 0, mColormapSize - size);
        }
        if (len < size) {
          // Use 'hold' pattern to get the image colormap
          GETN(size, gif_image_colormap);
          break;
        }
        // Copy everything, go to colormap state to do CMS correction
        memcpy(mColormap, buf, size);
        buf += size;
        len -= size;
        GETN(0, gif_image_colormap);
        break;
      } else {
        // Switch back to the global palette
        if (mGIFStruct.images_decoded) {
          // Copy global colormap into the palette of current frame
          memcpy(mColormap, mGIFStruct.global_colormap, mColormapSize);
        } else {
          mColormap = mGIFStruct.global_colormap;
        }
      }
      GETN(1, gif_lzw_start);
    }
    break;

    case gif_image_colormap:
      // Everything is already copied into local_colormap
      // Convert into Cairo colors including CMS transformation
      ConvertColormap(mColormap, mGIFStruct.local_colormap_size);
      GETN(1, gif_lzw_start);
      break;

    case gif_sub_block:
      mGIFStruct.count = *q;
      if (mGIFStruct.count) {
        // Still working on the same image: Process next LZW data block
        // Make sure there are still rows left. If the GIF data
        // is corrupt, we may not get an explicit terminator.
        if (!mGIFStruct.rows_remaining) {
#ifdef DONT_TOLERATE_BROKEN_GIFS
          mGIFStruct.state = gif_error;
          break;
#else
          // This is an illegal GIF, but we remain tolerant.
          GETN(1, gif_sub_block);
#endif
          if (mGIFStruct.count == GIF_TRAILER) {
            // Found a terminator anyway, so consider the image done
            GETN(1, gif_done);
            break;
          }
        }
        GETN(mGIFStruct.count, gif_lzw);
      } else {
        // See if there are any more images in this sequence.
        EndImageFrame();
        GETN(1, gif_image_start);
      }
      break;

    case gif_done:
      MOZ_ASSERT(!IsSizeDecode(), "Size decodes shouldn't reach gif_done");
      FinishInternal();
      goto done;

    case gif_error:
      PostDataError();
      return;

    // We shouldn't ever get here.
    default:
      MOZ_ASSERT_UNREACHABLE("Unexpected mGIFStruct.state");
      PostDecoderError(NS_ERROR_UNEXPECTED);
      return;
    }
  }

  // if an error state is set but no data remains, code flow reaches here
  if (mGIFStruct.state == gif_error) {
      PostDataError();
      return;
  }

  // Copy the leftover into mGIFStruct.hold
  if (len) {
    // Add what we have sofar to the block
    if (mGIFStruct.state != gif_global_colormap &&
        mGIFStruct.state != gif_image_colormap) {
      if (!SetHold(buf, len)) {
        PostDataError();
        return;
      }
    } else {
      uint8_t* p = (mGIFStruct.state == gif_global_colormap) ?
                    (uint8_t*)mGIFStruct.global_colormap :
                    (uint8_t*)mColormap;
      memcpy(p, buf, len);
      mGIFStruct.bytes_in_hold = len;
    }

    mGIFStruct.bytes_to_consume -= len;
  }

// We want to flush before returning if we're on the first frame
done:
  if (!mGIFStruct.images_decoded) {
    FlushImageData();
    mLastFlushedRow = mCurrentRow;
    mLastFlushedPass = mCurrentPass;
  }
}
コード例 #9
0
ファイル: nsWEBPDecoder.cpp プロジェクト: AOSC-Dev/Pale-Moon
void
nsWEBPDecoder::WriteInternal(const char *aBuffer, uint32_t aCount)
{
  MOZ_ASSERT(!HasError(), "Shouldn't call WriteInternal after error!");

  const uint8_t* buf = (const uint8_t*)aBuffer;
  VP8StatusCode rv = WebPIAppend(mDecoder, buf, aCount);
  if (rv == VP8_STATUS_OUT_OF_MEMORY) {
    PostDecoderError(NS_ERROR_OUT_OF_MEMORY);
    return;
  } else if (rv == VP8_STATUS_INVALID_PARAM ||
             rv == VP8_STATUS_BITSTREAM_ERROR) {
    PostDataError();
    return;
  } else if (rv == VP8_STATUS_UNSUPPORTED_FEATURE ||
             rv == VP8_STATUS_USER_ABORT) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  // Catch any remaining erroneous return value.
  if (rv != VP8_STATUS_OK && rv != VP8_STATUS_SUSPENDED) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  int lastLineRead = -1;
  int height = 0;
  int width = 0;
  int stride = 0;

  mData = WebPIDecGetRGB(mDecoder, &lastLineRead, &width, &height, &stride);

  // The only valid format for WebP decoding for both alpha and non-alpha
  // images is BGRA, where Opaque images have an A of 255.
  // Assume transparency for all images.
  // XXX: This could be compositor-optimized by doing a one-time check for
  // all-255 alpha pixels, but that might interfere with progressive
  // decoding. Probably not worth it?
  PostHasTransparency();
  
  if (lastLineRead == -1 || !mData)
    return;

  if (width <= 0 || height <= 0) {
    PostDataError();
    return;
  }

  if (!HasSize())
    PostSize(width, height);

  if (IsSizeDecode())
    return;

  if (!mImageData) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  // Transfer from mData to mImageData
  if (lastLineRead > mLastLine) {
    for (int line = mLastLine; line < lastLineRead; line++) {
      for (int pix = 0; pix < width; pix++) {
        // RGBA -> BGRA
        uint32_t DataOffset = 4 * (line * width + pix);
        mImageData[DataOffset+0] = mData[DataOffset+2];
        mImageData[DataOffset+1] = mData[DataOffset+1];
        mImageData[DataOffset+2] = mData[DataOffset+0];
        mImageData[DataOffset+3] = mData[DataOffset+3];
      }
    } 

    // Invalidate
    nsIntRect r(0, mLastLine, width, lastLineRead);
    PostInvalidation(r);
  }

  mLastLine = lastLineRead;
  return;
}