Пример #1
0
void
nsWEBPDecoder::InitInternal()
{
  if (!WebPInitDecBuffer(&mDecBuf)) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }
  mLastLine = 0;
  mDecBuf.colorspace = MODE_rgbA;
  mDecoder = WebPINewDecoder(&mDecBuf);
  if (!mDecoder) {
    PostDecoderError(NS_ERROR_FAILURE);
  }
}
Пример #2
0
void
nsJPEGDecoder::InitInternal()
{
  mCMSMode = gfxPlatform::GetCMSMode();
  if ((mDecodeFlags & DECODER_NO_COLORSPACE_CONVERSION) != 0)
    mCMSMode = eCMSMode_Off;

  /* We set up the normal JPEG error routines, then override error_exit. */
  mInfo.err = jpeg_std_error(&mErr.pub);
  /*   mInfo.err = jpeg_std_error(&mErr.pub); */
  mErr.pub.error_exit = my_error_exit;
  /* Establish the setjmp return context for my_error_exit to use. */
  if (setjmp(mErr.setjmp_buffer)) {
    /* If we get here, the JPEG code has signaled an error.
     * We need to clean up the JPEG object, close the input file, and return.
     */
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  /* Step 1: allocate and initialize JPEG decompression object */
  jpeg_create_decompress(&mInfo);
  /* Set the source manager */
  mInfo.src = &mSourceMgr;

  /* Step 2: specify data source (eg, a file) */

  /* Setup callback functions. */
  mSourceMgr.init_source = init_source;
  mSourceMgr.fill_input_buffer = fill_input_buffer;
  mSourceMgr.skip_input_data = skip_input_data;
  mSourceMgr.resync_to_restart = jpeg_resync_to_restart;
  mSourceMgr.term_source = term_source;

  /* Record app markers for ICC data */
  for (uint32_t m = 0; m < 16; m++)
    jpeg_save_markers(&mInfo, JPEG_APP0 + m, 0xFFFF);
}
Пример #3
0
void
nsICODecoder::WriteInternal(const char* aBuffer, uint32_t aCount, DecodeStrategy aStrategy)
{
  NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!");

  if (!aCount) {
    if (mContainedDecoder) {
      WriteToContainedDecoder(aBuffer, aCount, aStrategy);
    }
    return;
  }

  while (aCount && (mPos < ICONCOUNTOFFSET)) { // Skip to the # of icons.
    if (mPos == 2) { // if the third byte is 1: This is an icon, 2: a cursor
      if ((*aBuffer != 1) && (*aBuffer != 2)) {
        PostDataError();
        return;
      }
      mIsCursor = (*aBuffer == 2);
    }
    mPos++; aBuffer++; aCount--;
  }

  if (mPos == ICONCOUNTOFFSET && aCount >= 2) {
    mNumIcons = LittleEndian::readUint16(reinterpret_cast<const uint16_t*>(aBuffer));
    aBuffer += 2;
    mPos += 2;
    aCount -= 2;
  }

  if (mNumIcons == 0)
    return; // Nothing to do.

  uint16_t colorDepth = 0;
  nsIntSize prefSize = mImage.GetRequestedResolution();
  if (prefSize.width == 0 && prefSize.height == 0) {
    prefSize.SizeTo(PREFICONSIZE, PREFICONSIZE);
  }

  // A measure of the difference in size between the entry we've found
  // and the requested size. We will choose the smallest image that is
  // >= requested size (i.e. we assume it's better to downscale a larger
  // icon than to upscale a smaller one).
  int32_t diff = INT_MIN;

  // Loop through each entry's dir entry
  while (mCurrIcon < mNumIcons) { 
    if (mPos >= DIRENTRYOFFSET + (mCurrIcon * sizeof(mDirEntryArray)) && 
        mPos < DIRENTRYOFFSET + ((mCurrIcon + 1) * sizeof(mDirEntryArray))) {
      uint32_t toCopy = sizeof(mDirEntryArray) - 
                        (mPos - DIRENTRYOFFSET - mCurrIcon * sizeof(mDirEntryArray));
      if (toCopy > aCount) {
        toCopy = aCount;
      }
      memcpy(mDirEntryArray + sizeof(mDirEntryArray) - toCopy, aBuffer, toCopy);
      mPos += toCopy;
      aCount -= toCopy;
      aBuffer += toCopy;
    }
    if (aCount == 0)
      return; // Need more data

    IconDirEntry e;
    if (mPos == (DIRENTRYOFFSET + ICODIRENTRYSIZE) + 
                (mCurrIcon * sizeof(mDirEntryArray))) {
      mCurrIcon++;
      ProcessDirEntry(e);
      // We can't use GetRealWidth and GetRealHeight here because those operate
      // on mDirEntry, here we are going through each item in the directory.
      // Calculate the delta between this image's size and the desired size,
      // so we can see if it is better than our current-best option.
      // In the case of several equally-good images, we use the last one.
      int32_t delta = (e.mWidth == 0 ? 256 : e.mWidth) - prefSize.width +
                      (e.mHeight == 0 ? 256 : e.mHeight) - prefSize.height;
      if (e.mBitCount >= colorDepth &&
          ((diff < 0 && delta >= diff) || (delta >= 0 && delta <= diff))) {
        diff = delta;
        mImageOffset = e.mImageOffset;

        // ensure mImageOffset is >= size of the direntry headers (bug #245631)
        uint32_t minImageOffset = DIRENTRYOFFSET + 
                                  mNumIcons * sizeof(mDirEntryArray);
        if (mImageOffset < minImageOffset) {
          PostDataError();
          return;
        }

        colorDepth = e.mBitCount;
        memcpy(&mDirEntry, &e, sizeof(IconDirEntry));
      }
    }
  }

  if (mPos < mImageOffset) {
    // Skip to (or at least towards) the desired image offset
    uint32_t toSkip = mImageOffset - mPos;
    if (toSkip > aCount)
      toSkip = aCount;

    mPos    += toSkip;
    aBuffer += toSkip;
    aCount  -= toSkip;
  }

  // If we are within the first PNGSIGNATURESIZE bytes of the image data,
  // then we have either a BMP or a PNG.  We use the first PNGSIGNATURESIZE
  // bytes to determine which one we have.
  if (mCurrIcon == mNumIcons && mPos >= mImageOffset && 
      mPos < mImageOffset + PNGSIGNATURESIZE)
  {
    uint32_t toCopy = PNGSIGNATURESIZE - (mPos - mImageOffset);
    if (toCopy > aCount) {
      toCopy = aCount;
    }

    memcpy(mSignature + (mPos - mImageOffset), aBuffer, toCopy);
    mPos += toCopy;
    aCount -= toCopy;
    aBuffer += toCopy;

    mIsPNG = !memcmp(mSignature, nsPNGDecoder::pngSignatureBytes, 
                     PNGSIGNATURESIZE);
    if (mIsPNG) {
      mContainedDecoder = new nsPNGDecoder(mImage);
      mContainedDecoder->SetObserver(mObserver);
      mContainedDecoder->SetSizeDecode(IsSizeDecode());
      mContainedDecoder->InitSharedDecoder(mImageData, mImageDataLength,
                                           mColormap, mColormapSize,
                                           mCurrentFrame);
      if (!WriteToContainedDecoder(mSignature, PNGSIGNATURESIZE, aStrategy)) {
        return;
      }
    }
  }

  // If we have a PNG, let the PNG decoder do all of the rest of the work
  if (mIsPNG && mContainedDecoder && mPos >= mImageOffset + PNGSIGNATURESIZE) {
    if (!WriteToContainedDecoder(aBuffer, aCount, aStrategy)) {
      return;
    }

    if (!HasSize() && mContainedDecoder->HasSize()) {
      PostSize(mContainedDecoder->GetImageMetadata().GetWidth(),
               mContainedDecoder->GetImageMetadata().GetHeight());
    }

    mPos += aCount;
    aBuffer += aCount;
    aCount = 0;

    // Raymond Chen says that 32bpp only are valid PNG ICOs
    // http://blogs.msdn.com/b/oldnewthing/archive/2010/10/22/10079192.aspx
    if (!IsSizeDecode() &&
        !static_cast<nsPNGDecoder*>(mContainedDecoder.get())->IsValidICO()) {
      PostDataError();
    }
    return;
  }

  // We've processed all of the icon dir entries and are within the 
  // bitmap info size
  if (!mIsPNG && mCurrIcon == mNumIcons && mPos >= mImageOffset && 
      mPos >= mImageOffset + PNGSIGNATURESIZE && 
      mPos < mImageOffset + BITMAPINFOSIZE) {

    // As we were decoding, we did not know if we had a PNG signature or the
    // start of a bitmap information header.  At this point we know we had
    // a bitmap information header and not a PNG signature, so fill the bitmap
    // information header with the data it should already have.
    memcpy(mBIHraw, mSignature, PNGSIGNATURESIZE);

    // We've found the icon.
    uint32_t toCopy = sizeof(mBIHraw) - (mPos - mImageOffset);
    if (toCopy > aCount)
      toCopy = aCount;

    memcpy(mBIHraw + (mPos - mImageOffset), aBuffer, toCopy);
    mPos += toCopy;
    aCount -= toCopy;
    aBuffer += toCopy;
  }

  // If we have a BMP inside the ICO and we have read the BIH header
  if (!mIsPNG && mPos == mImageOffset + BITMAPINFOSIZE) {

    // Make sure we have a sane value for the bitmap information header
    int32_t bihSize = ExtractBIHSizeFromBitmap(reinterpret_cast<int8_t*>(mBIHraw));
    if (bihSize != BITMAPINFOSIZE) {
      PostDataError();
      return;
    }
    // We are extracting the BPP from the BIH header as it should be trusted 
    // over the one we have from the icon header
    mBPP = ExtractBPPFromBitmap(reinterpret_cast<int8_t*>(mBIHraw));
    
    // Init the bitmap decoder which will do most of the work for us
    // It will do everything except the AND mask which isn't present in bitmaps
    // bmpDecoder is for local scope ease, it will be freed by mContainedDecoder
    nsBMPDecoder *bmpDecoder = new nsBMPDecoder(mImage);
    mContainedDecoder = bmpDecoder;
    bmpDecoder->SetUseAlphaData(true);
    mContainedDecoder->SetObserver(mObserver);
    mContainedDecoder->SetSizeDecode(IsSizeDecode());
    mContainedDecoder->InitSharedDecoder(mImageData, mImageDataLength,
                                         mColormap, mColormapSize,
                                         mCurrentFrame);

    // The ICO format when containing a BMP does not include the 14 byte
    // bitmap file header. To use the code of the BMP decoder we need to 
    // generate this header ourselves and feed it to the BMP decoder.
    int8_t bfhBuffer[BMPFILEHEADERSIZE];
    if (!FillBitmapFileHeaderBuffer(bfhBuffer)) {
      PostDataError();
      return;
    }
    if (!WriteToContainedDecoder((const char*)bfhBuffer, sizeof(bfhBuffer), aStrategy)) {
      return;
    }

    // Setup the cursor hot spot if one is present
    SetHotSpotIfCursor();

    // Fix the ICO height from the BIH.
    // Fix the height on the BIH to be /2 so our BMP decoder will understand.
    if (!FixBitmapHeight(reinterpret_cast<int8_t*>(mBIHraw))) {
      PostDataError();
      return;
    }

    // Fix the ICO width from the BIH.
    if (!FixBitmapWidth(reinterpret_cast<int8_t*>(mBIHraw))) {
      PostDataError();
      return;
    }

    // Write out the BMP's bitmap info header
    if (!WriteToContainedDecoder(mBIHraw, sizeof(mBIHraw), aStrategy)) {
      return;
    }

    PostSize(mContainedDecoder->GetImageMetadata().GetWidth(),
             mContainedDecoder->GetImageMetadata().GetHeight());

    // We have the size. If we're doing a size decode, we got what
    // we came for.
    if (IsSizeDecode())
      return;

    // Sometimes the ICO BPP header field is not filled out
    // so we should trust the contained resource over our own
    // information.
    mBPP = bmpDecoder->GetBitsPerPixel();

    // Check to make sure we have valid color settings
    uint16_t numColors = GetNumColors();
    if (numColors == (uint16_t)-1) {
      PostDataError();
      return;
    }
  }

  // If we have a BMP
  if (!mIsPNG && mContainedDecoder && mPos >= mImageOffset + BITMAPINFOSIZE) {
    uint16_t numColors = GetNumColors();
    if (numColors == (uint16_t)-1) {
      PostDataError();
      return;
    }
    // Feed the actual image data (not including headers) into the BMP decoder
    uint32_t bmpDataOffset = mDirEntry.mImageOffset + BITMAPINFOSIZE;
    uint32_t bmpDataEnd = mDirEntry.mImageOffset + BITMAPINFOSIZE + 
                          static_cast<nsBMPDecoder*>(mContainedDecoder.get())->GetCompressedImageSize() +
                          4 * numColors;

    // If we are feeding in the core image data, but we have not yet
    // reached the ICO's 'AND buffer mask'
    if (mPos >= bmpDataOffset && mPos < bmpDataEnd) {

      // Figure out how much data the BMP decoder wants
      uint32_t toFeed = bmpDataEnd - mPos;
      if (toFeed > aCount) {
        toFeed = aCount;
      }

      if (!WriteToContainedDecoder(aBuffer, toFeed, aStrategy)) {
        return;
      }

      mPos += toFeed;
      aCount -= toFeed;
      aBuffer += toFeed;
    }
  
    // If the bitmap is fully processed, treat any left over data as the ICO's
    // 'AND buffer mask' which appears after the bitmap resource.
    if (!mIsPNG && mPos >= bmpDataEnd) {
      // There may be an optional AND bit mask after the data.  This is
      // only used if the alpha data is not already set. The alpha data 
      // is used for 32bpp bitmaps as per the comment in ICODecoder.h
      // The alpha mask should be checked in all other cases.
      if (static_cast<nsBMPDecoder*>(mContainedDecoder.get())->GetBitsPerPixel() != 32 || 
          !static_cast<nsBMPDecoder*>(mContainedDecoder.get())->HasAlphaData()) {
        uint32_t rowSize = ((GetRealWidth() + 31) / 32) * 4; // + 31 to round up
        if (mPos == bmpDataEnd) {
          mPos++;
          mRowBytes = 0;
          mCurLine = GetRealHeight();
          mRow = (uint8_t*)moz_realloc(mRow, rowSize);
          if (!mRow) {
            PostDecoderError(NS_ERROR_OUT_OF_MEMORY);
            return;
          }
        }

        // Ensure memory has been allocated before decoding.
        NS_ABORT_IF_FALSE(mRow, "mRow is null");
        if (!mRow) {
          PostDataError();
          return;
        }

        while (mCurLine > 0 && aCount > 0) {
          uint32_t toCopy = std::min(rowSize - mRowBytes, aCount);
          if (toCopy) {
            memcpy(mRow + mRowBytes, aBuffer, toCopy);
            aCount -= toCopy;
            aBuffer += toCopy;
            mRowBytes += toCopy;
          }
          if (rowSize == mRowBytes) {
            mCurLine--;
            mRowBytes = 0;

            uint32_t* imageData = 
              static_cast<nsBMPDecoder*>(mContainedDecoder.get())->GetImageData();
            if (!imageData) {
              PostDataError();
              return;
            }
            uint32_t* decoded = imageData + mCurLine * GetRealWidth();
            uint32_t* decoded_end = decoded + GetRealWidth();
            uint8_t* p = mRow, *p_end = mRow + rowSize; 
            while (p < p_end) {
              uint8_t idx = *p++;
              for (uint8_t bit = 0x80; bit && decoded<decoded_end; bit >>= 1) {
                // Clear pixel completely for transparency.
                if (idx & bit) {
                  *decoded = 0;
                }
                decoded++;
              }
            }
          }
        }
      }
Пример #4
0
void
nsJPEGDecoder::WriteInternal(const char* aBuffer, uint32_t aCount,
                             DecodeStrategy)
{
  mSegment = (const JOCTET*)aBuffer;
  mSegmentLen = aCount;

  NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!");

  // Return here if there is a fatal error within libjpeg.
  nsresult error_code;
  // This cast to nsresult makes sense because setjmp() returns whatever we
  // passed to longjmp(), which was actually an nsresult.
  if ((error_code = (nsresult)setjmp(mErr.setjmp_buffer)) != NS_OK) {
    if (error_code == NS_ERROR_FAILURE) {
      PostDataError();
      // Error due to corrupt stream - return NS_OK and consume silently
      // so that ImageLib doesn't throw away a partial image load
      mState = JPEG_SINK_NON_JPEG_TRAILER;
      PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG,
             ("} (setjmp returned NS_ERROR_FAILURE)"));
      return;
    } else {
      // Error due to reasons external to the stream (probably out of
      // memory) - let ImageLib attempt to clean up, even though
      // mozilla is seconds away from falling flat on its face.
      PostDecoderError(error_code);
      mState = JPEG_ERROR;
      PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG,
             ("} (setjmp returned an error)"));
      return;
    }
  }

  PR_LOG(GetJPEGLog(), PR_LOG_DEBUG,
         ("[this=%p] nsJPEGDecoder::Write -- processing JPEG data\n", this));

  switch (mState) {
    case JPEG_HEADER: {
      LOG_SCOPE(GetJPEGLog(), "nsJPEGDecoder::Write -- entering JPEG_HEADER"
                " case");

      // Step 3: read file parameters with jpeg_read_header()
      if (jpeg_read_header(&mInfo, TRUE) == JPEG_SUSPENDED) {
        PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG,
               ("} (JPEG_SUSPENDED)"));
        return; // I/O suspension
      }

      int sampleSize = mImage.GetRequestedSampleSize();
      if (sampleSize > 0) {
        mInfo.scale_num = 1;
        mInfo.scale_denom = sampleSize;
      }

      // Used to set up image size so arrays can be allocated
      jpeg_calc_output_dimensions(&mInfo);

      // Post our size to the superclass
      PostSize(mInfo.output_width, mInfo.output_height,
               ReadOrientationFromEXIF());
      if (HasError()) {
        // Setting the size led to an error.
        mState = JPEG_ERROR;
        return;
      }

      // If we're doing a size decode, we're done.
      if (IsSizeDecode()) {
        return;
      }

      // We're doing a full decode.
      if (mCMSMode != eCMSMode_Off &&
          (mInProfile = GetICCProfile(mInfo)) != nullptr) {
        uint32_t profileSpace = qcms_profile_get_color_space(mInProfile);
        bool mismatch = false;

#ifdef DEBUG_tor
      fprintf(stderr, "JPEG profileSpace: 0x%08X\n", profileSpace);
#endif
      switch (mInfo.jpeg_color_space) {
        case JCS_GRAYSCALE:
          if (profileSpace == icSigRgbData) {
            mInfo.out_color_space = JCS_RGB;
          } else if (profileSpace != icSigGrayData) {
            mismatch = true;
          }
          break;
        case JCS_RGB:
          if (profileSpace != icSigRgbData) {
            mismatch =  true;
          }
          break;
        case JCS_YCbCr:
          if (profileSpace == icSigRgbData) {
            mInfo.out_color_space = JCS_RGB;
          } else {
            // qcms doesn't support ycbcr
            mismatch = true;
          }
          break;
        case JCS_CMYK:
        case JCS_YCCK:
            // qcms doesn't support cmyk
            mismatch = true;
          break;
        default:
          mState = JPEG_ERROR;
          PostDataError();
          PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG,
                 ("} (unknown colorpsace (1))"));
          return;
      }

      if (!mismatch) {
        qcms_data_type type;
        switch (mInfo.out_color_space) {
          case JCS_GRAYSCALE:
            type = QCMS_DATA_GRAY_8;
            break;
          case JCS_RGB:
            type = QCMS_DATA_RGB_8;
            break;
          default:
            mState = JPEG_ERROR;
            PostDataError();
            PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG,
                   ("} (unknown colorpsace (2))"));
            return;
        }
#if 0
        // We don't currently support CMYK profiles. The following
        // code dealt with lcms types. Add something like this
        // back when we gain support for CMYK.

        // Adobe Photoshop writes YCCK/CMYK files with inverted data
        if (mInfo.out_color_space == JCS_CMYK) {
          type |= FLAVOR_SH(mInfo.saw_Adobe_marker ? 1 : 0);
        }
#endif

        if (gfxPlatform::GetCMSOutputProfile()) {

          // Calculate rendering intent.
          int intent = gfxPlatform::GetRenderingIntent();
          if (intent == -1) {
            intent = qcms_profile_get_rendering_intent(mInProfile);
          }

          // Create the color management transform.
          mTransform = qcms_transform_create(mInProfile,
                                          type,
                                          gfxPlatform::GetCMSOutputProfile(),
                                          QCMS_DATA_RGB_8,
                                          (qcms_intent)intent);
        }
      } else {
#ifdef DEBUG_tor
        fprintf(stderr, "ICM profile colorspace mismatch\n");
#endif
      }
    }

    if (!mTransform) {
      switch (mInfo.jpeg_color_space) {
        case JCS_GRAYSCALE:
        case JCS_RGB:
        case JCS_YCbCr:
          // if we're not color managing we can decode directly to
          // MOZ_JCS_EXT_NATIVE_ENDIAN_XRGB
          if (mCMSMode != eCMSMode_All) {
              mInfo.out_color_space = MOZ_JCS_EXT_NATIVE_ENDIAN_XRGB;
              mInfo.out_color_components = 4;
          } else {
              mInfo.out_color_space = JCS_RGB;
          }
          break;
        case JCS_CMYK:
        case JCS_YCCK:
          // libjpeg can convert from YCCK to CMYK, but not to RGB
          mInfo.out_color_space = JCS_CMYK;
          break;
        default:
          mState = JPEG_ERROR;
          PostDataError();
          PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG,
                 ("} (unknown colorpsace (3))"));
          return;
          break;
      }
    }

    // Don't allocate a giant and superfluous memory buffer
    // when not doing a progressive decode.
    mInfo.buffered_image = mDecodeStyle == PROGRESSIVE &&
                           jpeg_has_multiple_scans(&mInfo);

    if (!mImageData) {
      mState = JPEG_ERROR;
      PostDecoderError(NS_ERROR_OUT_OF_MEMORY);
      PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG,
             ("} (could not initialize image frame)"));
      return;
    }

    PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG,
           ("        JPEGDecoderAccounting: nsJPEGDecoder::"
            "Write -- created image frame with %ux%u pixels",
            mInfo.output_width, mInfo.output_height));

    mState = JPEG_START_DECOMPRESS;
  }

  case JPEG_START_DECOMPRESS: {
    LOG_SCOPE(GetJPEGLog(), "nsJPEGDecoder::Write -- entering"
                            " JPEG_START_DECOMPRESS case");
    // Step 4: set parameters for decompression

    // FIXME -- Should reset dct_method and dither mode
    // for final pass of progressive JPEG

    mInfo.dct_method =  JDCT_ISLOW;
    mInfo.dither_mode = JDITHER_FS;
    mInfo.do_fancy_upsampling = TRUE;
    mInfo.enable_2pass_quant = FALSE;
    mInfo.do_block_smoothing = TRUE;

    // Step 5: Start decompressor
    if (jpeg_start_decompress(&mInfo) == FALSE) {
      PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG,
             ("} (I/O suspension after jpeg_start_decompress())"));
      return; // I/O suspension
    }


    // If this is a progressive JPEG ...
    mState = mInfo.buffered_image ?
             JPEG_DECOMPRESS_PROGRESSIVE : JPEG_DECOMPRESS_SEQUENTIAL;
  }

  case JPEG_DECOMPRESS_SEQUENTIAL: {
    if (mState == JPEG_DECOMPRESS_SEQUENTIAL) {
      LOG_SCOPE(GetJPEGLog(), "nsJPEGDecoder::Write -- "
                              "JPEG_DECOMPRESS_SEQUENTIAL case");

      bool suspend;
      OutputScanlines(&suspend);

      if (suspend) {
        PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG,
               ("} (I/O suspension after OutputScanlines() - SEQUENTIAL)"));
        return; // I/O suspension
      }

      // If we've completed image output ...
      NS_ASSERTION(mInfo.output_scanline == mInfo.output_height,
                   "We didn't process all of the data!");
      mState = JPEG_DONE;
    }
  }

  case JPEG_DECOMPRESS_PROGRESSIVE: {
    if (mState == JPEG_DECOMPRESS_PROGRESSIVE) {
      LOG_SCOPE(GetJPEGLog(),
                "nsJPEGDecoder::Write -- JPEG_DECOMPRESS_PROGRESSIVE case");

      int status;
      do {
        status = jpeg_consume_input(&mInfo);
      } while ((status != JPEG_SUSPENDED) &&
               (status != JPEG_REACHED_EOI));

      for (;;) {
        if (mInfo.output_scanline == 0) {
          int scan = mInfo.input_scan_number;

          // if we haven't displayed anything yet (output_scan_number==0)
          // and we have enough data for a complete scan, force output
          // of the last full scan
          if ((mInfo.output_scan_number == 0) &&
              (scan > 1) &&
              (status != JPEG_REACHED_EOI))
            scan--;

          if (!jpeg_start_output(&mInfo, scan)) {
            PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG,
                   ("} (I/O suspension after jpeg_start_output() -"
                    " PROGRESSIVE)"));
            return; // I/O suspension
          }
        }

        if (mInfo.output_scanline == 0xffffff) {
          mInfo.output_scanline = 0;
        }

        bool suspend;
        OutputScanlines(&suspend);

        if (suspend) {
          if (mInfo.output_scanline == 0) {
            // didn't manage to read any lines - flag so we don't call
            // jpeg_start_output() multiple times for the same scan
            mInfo.output_scanline = 0xffffff;
          }
          PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG,
                 ("} (I/O suspension after OutputScanlines() - PROGRESSIVE)"));
          return; // I/O suspension
        }

        if (mInfo.output_scanline == mInfo.output_height) {
          if (!jpeg_finish_output(&mInfo)) {
            PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG,
                   ("} (I/O suspension after jpeg_finish_output() -"
                    " PROGRESSIVE)"));
            return; // I/O suspension
          }

          if (jpeg_input_complete(&mInfo) &&
              (mInfo.input_scan_number == mInfo.output_scan_number))
            break;

          mInfo.output_scanline = 0;
        }
      }

      mState = JPEG_DONE;
    }
  }

  case JPEG_DONE: {
    LOG_SCOPE(GetJPEGLog(), "nsJPEGDecoder::ProcessData -- entering"
                            " JPEG_DONE case");

    // Step 7: Finish decompression

    if (jpeg_finish_decompress(&mInfo) == FALSE) {
      PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG,
             ("} (I/O suspension after jpeg_finish_decompress() - DONE)"));
      return; // I/O suspension
    }

    mState = JPEG_SINK_NON_JPEG_TRAILER;

    // we're done dude
    break;
  }
  case JPEG_SINK_NON_JPEG_TRAILER:
    PR_LOG(GetJPEGLog(), PR_LOG_DEBUG,
           ("[this=%p] nsJPEGDecoder::ProcessData -- entering"
            " JPEG_SINK_NON_JPEG_TRAILER case\n", this));

    break;

  case JPEG_ERROR:
    NS_ABORT_IF_FALSE(0, "Should always return immediately after error and"
                         " not re-enter decoder");
  }

  PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG,
         ("} (end of function)"));
  return;
}
Пример #5
0
void
nsIconDecoder::WriteInternal(const char* aBuffer, uint32_t aCount,
                             DecodeStrategy)
{
  NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!");

  // We put this here to avoid errors about crossing initialization with case
  // jumps on linux.
  uint32_t bytesToRead = 0;

  // Loop until the input data is gone
  while (aCount > 0) {
    switch (mState) {
      case iconStateStart:

        // Grab the width
        mWidth = (uint8_t)*aBuffer;

        // Book Keeping
        aBuffer++;
        aCount--;
        mState = iconStateHaveHeight;
        break;

      case iconStateHaveHeight:

        // Grab the Height
        mHeight = (uint8_t)*aBuffer;

        // Post our size to the superclass
        PostSize(mWidth, mHeight);

        PostHasTransparency();

        if (HasError()) {
          // Setting the size led to an error.
          mState = iconStateFinished;
          return;
        }

        // If We're doing a size decode, we're done
        if (IsSizeDecode()) {
          mState = iconStateFinished;
          break;
        }

        if (!mImageData) {
          PostDecoderError(NS_ERROR_OUT_OF_MEMORY);
          return;
        }

        // Book Keeping
        aBuffer++;
        aCount--;
        mState = iconStateReadPixels;
        break;

      case iconStateReadPixels: {

        // How many bytes are we reading?
        bytesToRead = std::min(aCount, mImageDataLength - mPixBytesRead);

        // Copy the bytes
        memcpy(mImageData + mPixBytesRead, aBuffer, bytesToRead);

        // Performance isn't critical here, so our update rectangle is
        // always the full icon
        nsIntRect r(0, 0, mWidth, mHeight);

        // Invalidate
        PostInvalidation(r);

        // Book Keeping
        aBuffer += bytesToRead;
        aCount -= bytesToRead;
        mPixBytesRead += bytesToRead;

        // If we've got all the pixel bytes, we're finished
        if (mPixBytesRead == mImageDataLength) {
          PostFrameStop();
          PostDecodeDone();
          mState = iconStateFinished;
        }
        break;
      }

      case iconStateFinished:

        // Consume all excess data silently
        aCount = 0;

        break;
    }
  }
}
Пример #6
0
LexerTransition<nsJPEGDecoder::State>
nsJPEGDecoder::ReadJPEGData(const char* aData, size_t aLength)
{
  mSegment = reinterpret_cast<const JOCTET*>(aData);
  mSegmentLen = aLength;

  // Return here if there is a fatal error within libjpeg.
  nsresult error_code;
  // This cast to nsresult makes sense because setjmp() returns whatever we
  // passed to longjmp(), which was actually an nsresult.
  if ((error_code = static_cast<nsresult>(setjmp(mErr.setjmp_buffer))) != NS_OK) {
    if (error_code == NS_ERROR_FAILURE) {
      // Error due to corrupt data. Make sure that we don't feed any more data
      // to libjpeg-turbo.
      mState = JPEG_SINK_NON_JPEG_TRAILER;
      MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
             ("} (setjmp returned NS_ERROR_FAILURE)"));
    } else {
      // Error for another reason. (Possibly OOM.)
      PostDecoderError(error_code);
      mState = JPEG_ERROR;
      MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
             ("} (setjmp returned an error)"));
    }

    return Transition::TerminateFailure();
  }

  MOZ_LOG(sJPEGLog, LogLevel::Debug,
         ("[this=%p] nsJPEGDecoder::Write -- processing JPEG data\n", this));

  switch (mState) {
    case JPEG_HEADER: {
      LOG_SCOPE((mozilla::LogModule*)sJPEGLog, "nsJPEGDecoder::Write -- entering JPEG_HEADER"
                " case");

      // Step 3: read file parameters with jpeg_read_header()
      if (jpeg_read_header(&mInfo, TRUE) == JPEG_SUSPENDED) {
        MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
               ("} (JPEG_SUSPENDED)"));
        return Transition::ContinueUnbuffered(State::JPEG_DATA); // I/O suspension
      }

      // If we have a sample size specified for -moz-sample-size, use it.
      if (mSampleSize > 0) {
        mInfo.scale_num = 1;
        mInfo.scale_denom = mSampleSize;
      }

      // Used to set up image size so arrays can be allocated
      jpeg_calc_output_dimensions(&mInfo);

      // Post our size to the superclass
      PostSize(mInfo.output_width, mInfo.output_height,
               ReadOrientationFromEXIF());
      if (HasError()) {
        // Setting the size led to an error.
        mState = JPEG_ERROR;
        return Transition::TerminateFailure();
      }

      // If we're doing a metadata decode, we're done.
      if (IsMetadataDecode()) {
        return Transition::TerminateSuccess();
      }

      // We're doing a full decode.
      if (mCMSMode != eCMSMode_Off &&
          (mInProfile = GetICCProfile(mInfo)) != nullptr) {
        uint32_t profileSpace = qcms_profile_get_color_space(mInProfile);
        bool mismatch = false;

#ifdef DEBUG_tor
      fprintf(stderr, "JPEG profileSpace: 0x%08X\n", profileSpace);
#endif
      switch (mInfo.jpeg_color_space) {
        case JCS_GRAYSCALE:
          if (profileSpace == icSigRgbData) {
            mInfo.out_color_space = JCS_RGB;
          } else if (profileSpace != icSigGrayData) {
            mismatch = true;
          }
          break;
        case JCS_RGB:
          if (profileSpace != icSigRgbData) {
            mismatch =  true;
          }
          break;
        case JCS_YCbCr:
          if (profileSpace == icSigRgbData) {
            mInfo.out_color_space = JCS_RGB;
          } else {
            // qcms doesn't support ycbcr
            mismatch = true;
          }
          break;
        case JCS_CMYK:
        case JCS_YCCK:
            // qcms doesn't support cmyk
            mismatch = true;
          break;
        default:
          mState = JPEG_ERROR;
          PostDataError();
          MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
                 ("} (unknown colorpsace (1))"));
          return Transition::TerminateFailure();
      }

      if (!mismatch) {
        qcms_data_type type;
        switch (mInfo.out_color_space) {
          case JCS_GRAYSCALE:
            type = QCMS_DATA_GRAY_8;
            break;
          case JCS_RGB:
            type = QCMS_DATA_RGB_8;
            break;
          default:
            mState = JPEG_ERROR;
            PostDataError();
            MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
                   ("} (unknown colorpsace (2))"));
            return Transition::TerminateFailure();
        }
#if 0
        // We don't currently support CMYK profiles. The following
        // code dealt with lcms types. Add something like this
        // back when we gain support for CMYK.

        // Adobe Photoshop writes YCCK/CMYK files with inverted data
        if (mInfo.out_color_space == JCS_CMYK) {
          type |= FLAVOR_SH(mInfo.saw_Adobe_marker ? 1 : 0);
        }
#endif

        if (gfxPlatform::GetCMSOutputProfile()) {

          // Calculate rendering intent.
          int intent = gfxPlatform::GetRenderingIntent();
          if (intent == -1) {
            intent = qcms_profile_get_rendering_intent(mInProfile);
          }

          // Create the color management transform.
          mTransform = qcms_transform_create(mInProfile,
                                          type,
                                          gfxPlatform::GetCMSOutputProfile(),
                                          QCMS_DATA_RGB_8,
                                          (qcms_intent)intent);
        }
      } else {
#ifdef DEBUG_tor
        fprintf(stderr, "ICM profile colorspace mismatch\n");
#endif
      }
    }

    if (!mTransform) {
      switch (mInfo.jpeg_color_space) {
        case JCS_GRAYSCALE:
        case JCS_RGB:
        case JCS_YCbCr:
          // if we're not color managing we can decode directly to
          // MOZ_JCS_EXT_NATIVE_ENDIAN_XRGB
          if (mCMSMode != eCMSMode_All) {
              mInfo.out_color_space = MOZ_JCS_EXT_NATIVE_ENDIAN_XRGB;
              mInfo.out_color_components = 4;
          } else {
              mInfo.out_color_space = JCS_RGB;
          }
          break;
        case JCS_CMYK:
        case JCS_YCCK:
          // libjpeg can convert from YCCK to CMYK, but not to RGB
          mInfo.out_color_space = JCS_CMYK;
          break;
        default:
          mState = JPEG_ERROR;
          PostDataError();
          MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
                 ("} (unknown colorpsace (3))"));
          return Transition::TerminateFailure();
      }
    }

    // Don't allocate a giant and superfluous memory buffer
    // when not doing a progressive decode.
    mInfo.buffered_image = mDecodeStyle == PROGRESSIVE &&
                           jpeg_has_multiple_scans(&mInfo);

    MOZ_ASSERT(!mImageData, "Already have a buffer allocated?");
    nsIntSize targetSize = mDownscaler ? mDownscaler->TargetSize() : GetSize();
    nsresult rv = AllocateFrame(0, targetSize,
                                nsIntRect(nsIntPoint(), targetSize),
                                gfx::SurfaceFormat::B8G8R8A8);
    if (NS_FAILED(rv)) {
      mState = JPEG_ERROR;
      MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
             ("} (could not initialize image frame)"));
      return Transition::TerminateFailure();
    }

    MOZ_ASSERT(mImageData, "Should have a buffer now");

    if (mDownscaler) {
      nsresult rv = mDownscaler->BeginFrame(GetSize(), Nothing(),
                                            mImageData,
                                            /* aHasAlpha = */ false);
      if (NS_FAILED(rv)) {
        mState = JPEG_ERROR;
        return Transition::TerminateFailure();
      }
    }

    MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
           ("        JPEGDecoderAccounting: nsJPEGDecoder::"
            "Write -- created image frame with %ux%u pixels",
            mInfo.output_width, mInfo.output_height));

    mState = JPEG_START_DECOMPRESS;
    MOZ_FALLTHROUGH; // to start decompressing.
  }

  case JPEG_START_DECOMPRESS: {
    LOG_SCOPE((mozilla::LogModule*)sJPEGLog, "nsJPEGDecoder::Write -- entering"
                            " JPEG_START_DECOMPRESS case");
    // Step 4: set parameters for decompression

    // FIXME -- Should reset dct_method and dither mode
    // for final pass of progressive JPEG

    mInfo.dct_method =  JDCT_ISLOW;
    mInfo.dither_mode = JDITHER_FS;
    mInfo.do_fancy_upsampling = TRUE;
    mInfo.enable_2pass_quant = FALSE;
    mInfo.do_block_smoothing = TRUE;

    // Step 5: Start decompressor
    if (jpeg_start_decompress(&mInfo) == FALSE) {
      MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
             ("} (I/O suspension after jpeg_start_decompress())"));
      return Transition::ContinueUnbuffered(State::JPEG_DATA); // I/O suspension
    }

    // If this is a progressive JPEG ...
    mState = mInfo.buffered_image ?
             JPEG_DECOMPRESS_PROGRESSIVE : JPEG_DECOMPRESS_SEQUENTIAL;
    MOZ_FALLTHROUGH; // to decompress sequential JPEG.
  }

  case JPEG_DECOMPRESS_SEQUENTIAL: {
    if (mState == JPEG_DECOMPRESS_SEQUENTIAL) {
      LOG_SCOPE((mozilla::LogModule*)sJPEGLog, "nsJPEGDecoder::Write -- "
                              "JPEG_DECOMPRESS_SEQUENTIAL case");

      bool suspend;
      OutputScanlines(&suspend);

      if (suspend) {
        MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
               ("} (I/O suspension after OutputScanlines() - SEQUENTIAL)"));
        return Transition::ContinueUnbuffered(State::JPEG_DATA); // I/O suspension
      }

      // If we've completed image output ...
      NS_ASSERTION(mInfo.output_scanline == mInfo.output_height,
                   "We didn't process all of the data!");
      mState = JPEG_DONE;
    }
    MOZ_FALLTHROUGH; // to decompress progressive JPEG.
  }

  case JPEG_DECOMPRESS_PROGRESSIVE: {
    if (mState == JPEG_DECOMPRESS_PROGRESSIVE) {
      LOG_SCOPE((mozilla::LogModule*)sJPEGLog,
                "nsJPEGDecoder::Write -- JPEG_DECOMPRESS_PROGRESSIVE case");

      int status;
      do {
        status = jpeg_consume_input(&mInfo);
      } while ((status != JPEG_SUSPENDED) &&
               (status != JPEG_REACHED_EOI));

      for (;;) {
        if (mInfo.output_scanline == 0) {
          int scan = mInfo.input_scan_number;

          // if we haven't displayed anything yet (output_scan_number==0)
          // and we have enough data for a complete scan, force output
          // of the last full scan
          if ((mInfo.output_scan_number == 0) &&
              (scan > 1) &&
              (status != JPEG_REACHED_EOI))
            scan--;

          if (!jpeg_start_output(&mInfo, scan)) {
            MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
                   ("} (I/O suspension after jpeg_start_output() -"
                    " PROGRESSIVE)"));
            return Transition::ContinueUnbuffered(State::JPEG_DATA); // I/O suspension
          }
        }

        if (mInfo.output_scanline == 0xffffff) {
          mInfo.output_scanline = 0;
        }

        bool suspend;
        OutputScanlines(&suspend);

        if (suspend) {
          if (mInfo.output_scanline == 0) {
            // didn't manage to read any lines - flag so we don't call
            // jpeg_start_output() multiple times for the same scan
            mInfo.output_scanline = 0xffffff;
          }
          MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
                 ("} (I/O suspension after OutputScanlines() - PROGRESSIVE)"));
          return Transition::ContinueUnbuffered(State::JPEG_DATA); // I/O suspension
        }

        if (mInfo.output_scanline == mInfo.output_height) {
          if (!jpeg_finish_output(&mInfo)) {
            MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
                   ("} (I/O suspension after jpeg_finish_output() -"
                    " PROGRESSIVE)"));
            return Transition::ContinueUnbuffered(State::JPEG_DATA); // I/O suspension
          }

          if (jpeg_input_complete(&mInfo) &&
              (mInfo.input_scan_number == mInfo.output_scan_number))
            break;

          mInfo.output_scanline = 0;
          if (mDownscaler) {
            mDownscaler->ResetForNextProgressivePass();
          }
        }
      }

      mState = JPEG_DONE;
    }
    MOZ_FALLTHROUGH; // to finish decompressing.
  }

  case JPEG_DONE: {
    LOG_SCOPE((mozilla::LogModule*)sJPEGLog, "nsJPEGDecoder::ProcessData -- entering"
                            " JPEG_DONE case");

    // Step 7: Finish decompression

    if (jpeg_finish_decompress(&mInfo) == FALSE) {
      MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
             ("} (I/O suspension after jpeg_finish_decompress() - DONE)"));
      return Transition::ContinueUnbuffered(State::JPEG_DATA); // I/O suspension
    }

    // Make sure we don't feed any more data to libjpeg-turbo.
    mState = JPEG_SINK_NON_JPEG_TRAILER;

    // We're done.
    return Transition::TerminateSuccess();
  }
  case JPEG_SINK_NON_JPEG_TRAILER:
    MOZ_LOG(sJPEGLog, LogLevel::Debug,
           ("[this=%p] nsJPEGDecoder::ProcessData -- entering"
            " JPEG_SINK_NON_JPEG_TRAILER case\n", this));

    MOZ_ASSERT_UNREACHABLE("Should stop getting data after entering state "
                           "JPEG_SINK_NON_JPEG_TRAILER");

    return Transition::TerminateSuccess();

  case JPEG_ERROR:
    MOZ_ASSERT_UNREACHABLE("Should stop getting data after entering state "
                           "JPEG_ERROR");

    return Transition::TerminateFailure();
  }

  MOZ_ASSERT_UNREACHABLE("Escaped the JPEG decoder state machine");
  return Transition::TerminateFailure();
}
Пример #7
0
void
nsIconDecoder::WriteInternal(const char *aBuffer, PRUint32 aCount)
{
  NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!");

  // We put this here to avoid errors about crossing initialization with case
  // jumps on linux.
  PRUint32 bytesToRead = 0;
  nsresult rv;

  // Performance isn't critical here, so our update rectangle is 
  // always the full icon
  nsIntRect r(0, 0, mWidth, mHeight);

  // Loop until the input data is gone
  while (aCount > 0) {
    switch (mState) {
      case iconStateStart:

        // Grab the width
        mWidth = (PRUint8)*aBuffer;

        // Book Keeping
        aBuffer++;
        aCount--;
        mState = iconStateHaveHeight;
        break;

      case iconStateHaveHeight:

        // Grab the Height
        mHeight = (PRUint8)*aBuffer;

        // Post our size to the superclass
        PostSize(mWidth, mHeight);
        if (HasError()) {
          // Setting the size led to an error.
          mState = iconStateFinished;
          return;
        }

        // If We're doing a size decode, we're done
        if (IsSizeDecode()) {
          mState = iconStateFinished;
          break;
        }

        // Add the frame and signal
        rv = mImage.EnsureFrame(0, 0, 0, mWidth, mHeight,
                                gfxASurface::ImageFormatARGB32,
                                &mImageData, &mPixBytesTotal);
        if (NS_FAILED(rv)) {
          PostDecoderError(rv);
          return;
        }

        // Tell the superclass we're starting a frame
        PostFrameStart();

        // Book Keeping
        aBuffer++;
        aCount--;
        mState = iconStateReadPixels;
        break;

      case iconStateReadPixels:

        // How many bytes are we reading?
        bytesToRead = NS_MIN(aCount, mPixBytesTotal - mPixBytesRead);

        // Copy the bytes
        memcpy(mImageData + mPixBytesRead, aBuffer, bytesToRead);

        // Invalidate
        PostInvalidation(r);

        // Book Keeping
        aBuffer += bytesToRead;
        aCount -= bytesToRead;
        mPixBytesRead += bytesToRead;

        // If we've got all the pixel bytes, we're finished
        if (mPixBytesRead == mPixBytesTotal) {
          PostFrameStop();
          PostDecodeDone();
          mState = iconStateFinished;
        }
        break;

      case iconStateFinished:

        // Consume all excess data silently
        aCount = 0;

        break;
    }
  }
}
Пример #8
0
void
nsPNGDecoder::InitInternal()
{
  // For size decodes, we don't need to initialize the png decoder
  if (IsSizeDecode()) {
    return;
  }

  mCMSMode = gfxPlatform::GetCMSMode();
  if (GetDecodeFlags() & imgIContainer::FLAG_DECODE_NO_COLORSPACE_CONVERSION) {
    mCMSMode = eCMSMode_Off;
  }
  mDisablePremultipliedAlpha =
    GetDecodeFlags() & imgIContainer::FLAG_DECODE_NO_PREMULTIPLY_ALPHA;

#ifdef PNG_HANDLE_AS_UNKNOWN_SUPPORTED
  static png_byte color_chunks[]=
       { 99,  72,  82,  77, '\0',   // cHRM
        105,  67,  67,  80, '\0'};  // iCCP
  static png_byte unused_chunks[]=
       { 98,  75,  71,  68, '\0',   // bKGD
        104,  73,  83,  84, '\0',   // hIST
        105,  84,  88, 116, '\0',   // iTXt
        111,  70,  70, 115, '\0',   // oFFs
        112,  67,  65,  76, '\0',   // pCAL
        115,  67,  65,  76, '\0',   // sCAL
        112,  72,  89, 115, '\0',   // pHYs
        115,  66,  73,  84, '\0',   // sBIT
        115,  80,  76,  84, '\0',   // sPLT
        116,  69,  88, 116, '\0',   // tEXt
        116,  73,  77,  69, '\0',   // tIME
        122,  84,  88, 116, '\0'};  // zTXt
#endif

  // For full decodes, do png init stuff

  // Initialize the container's source image header
  // Always decode to 24 bit pixdepth

  mPNG = png_create_read_struct(PNG_LIBPNG_VER_STRING,
                                nullptr, nsPNGDecoder::error_callback,
                                nsPNGDecoder::warning_callback);
  if (!mPNG) {
    PostDecoderError(NS_ERROR_OUT_OF_MEMORY);
    return;
  }

  mInfo = png_create_info_struct(mPNG);
  if (!mInfo) {
    PostDecoderError(NS_ERROR_OUT_OF_MEMORY);
    png_destroy_read_struct(&mPNG, nullptr, nullptr);
    return;
  }

#ifdef PNG_HANDLE_AS_UNKNOWN_SUPPORTED
  // Ignore unused chunks
  if (mCMSMode == eCMSMode_Off) {
    png_set_keep_unknown_chunks(mPNG, 1, color_chunks, 2);
  }

  png_set_keep_unknown_chunks(mPNG, 1, unused_chunks,
                              (int)sizeof(unused_chunks)/5);
#endif

#ifdef PNG_SET_USER_LIMITS_SUPPORTED
  if (mCMSMode != eCMSMode_Off) {
    png_set_chunk_malloc_max(mPNG, 4000000L);
  }
#endif

#ifdef PNG_READ_CHECK_FOR_INVALID_INDEX_SUPPORTED
  // Disallow palette-index checking, for speed; we would ignore the warning
  // anyhow.  This feature was added at libpng version 1.5.10 and is disabled
  // in the embedded libpng but enabled by default in the system libpng.  This
  // call also disables it in the system libpng, for decoding speed.
  // Bug #745202.
  png_set_check_for_invalid_index(mPNG, 0);
#endif

#if defined(PNG_SET_OPTION_SUPPORTED) && defined(PNG_sRGB_PROFILE_CHECKS) && \
            PNG_sRGB_PROFILE_CHECKS >= 0
  // Skip checking of sRGB ICC profiles
  png_set_option(mPNG, PNG_SKIP_sRGB_CHECK_PROFILE, PNG_OPTION_ON);
#endif

  // use this as libpng "progressive pointer" (retrieve in callbacks)
  png_set_progressive_read_fn(mPNG, static_cast<png_voidp>(this),
                              nsPNGDecoder::info_callback,
                              nsPNGDecoder::row_callback,
                              nsPNGDecoder::end_callback);

}
Пример #9
0
void
nsGIFDecoder2::WriteInternal(const char* aBuffer, uint32_t aCount)
{
  MOZ_ASSERT(!HasError(), "Shouldn't call WriteInternal after error!");

  // These variables changed names; renaming would make a much bigger patch :(
  const uint8_t* buf = (const uint8_t*)aBuffer;
  uint32_t len = aCount;

  const uint8_t* q = buf;

  // Add what we have sofar to the block
  // If previous call to me left something in the hold first complete current
  // block, or if we are filling the colormaps, first complete the colormap
  uint8_t* p =
    (mGIFStruct.state ==
      gif_global_colormap) ? (uint8_t*) mGIFStruct.global_colormap :
        (mGIFStruct.state == gif_image_colormap) ? (uint8_t*) mColormap :
          (mGIFStruct.bytes_in_hold) ? mGIFStruct.hold : nullptr;

  if (len == 0 && buf == nullptr) {
    // We've just gotten the frame we asked for. Time to use the data we
    // stashed away.
    len = mGIFStruct.bytes_in_hold;
    q = buf = p;
  } else if (p) {
    // Add what we have sofar to the block
    uint32_t l = std::min(len, mGIFStruct.bytes_to_consume);
    memcpy(p+mGIFStruct.bytes_in_hold, buf, l);

    if (l < mGIFStruct.bytes_to_consume) {
      // Not enough in 'buf' to complete current block, get more
      mGIFStruct.bytes_in_hold += l;
      mGIFStruct.bytes_to_consume -= l;
      return;
    }
    // Point 'q' to complete block in hold (or in colormap)
    q = p;
  }

  // Invariant:
  //    'q' is start of current to be processed block (hold, colormap or buf)
  //    'bytes_to_consume' is number of bytes to consume from 'buf'
  //    'buf' points to the bytes to be consumed from the input buffer
  //    'len' is number of bytes left in input buffer from position 'buf'.
  //    At entrance of the for loop will 'buf' will be moved 'bytes_to_consume'
  //    to point to next buffer, 'len' is adjusted accordingly.
  //    So that next round in for loop, q gets pointed to the next buffer.

  for (;len >= mGIFStruct.bytes_to_consume; q=buf, mGIFStruct.bytes_in_hold = 0)
  {
    // Eat the current block from the buffer, q keeps pointed at current block
    buf += mGIFStruct.bytes_to_consume;
    len -= mGIFStruct.bytes_to_consume;

    switch (mGIFStruct.state) {
    case gif_lzw:
      if (!DoLzw(q)) {
        mGIFStruct.state = gif_error;
        break;
      }
      GETN(1, gif_sub_block);
      break;

    case gif_lzw_start: {
      // Make sure the transparent pixel is transparent in the colormap
      if (mGIFStruct.is_transparent) {
        // Save old value so we can restore it later
        if (mColormap == mGIFStruct.global_colormap) {
            mOldColor = mColormap[mGIFStruct.tpixel];
        }
        mColormap[mGIFStruct.tpixel] = 0;
      }

      // Initialize LZW parser/decoder
      mGIFStruct.datasize = *q;
      const int clear_code = ClearCode();
      if (mGIFStruct.datasize > MAX_LZW_BITS ||
          clear_code >= MAX_BITS) {
        mGIFStruct.state = gif_error;
        break;
      }

      mGIFStruct.avail = clear_code + 2;
      mGIFStruct.oldcode = -1;
      mGIFStruct.codesize = mGIFStruct.datasize + 1;
      mGIFStruct.codemask = (1 << mGIFStruct.codesize) - 1;
      mGIFStruct.datum = mGIFStruct.bits = 0;

      // init the tables
      for (int i = 0; i < clear_code; i++) {
        mGIFStruct.suffix[i] = i;
      }

      mGIFStruct.stackp = mGIFStruct.stack;

      GETN(1, gif_sub_block);
    }
    break;

    // All GIF files begin with "GIF87a" or "GIF89a"
    case gif_type:
      if (!strncmp((char*)q, "GIF89a", 6)) {
        mGIFStruct.version = 89;
      } else if (!strncmp((char*)q, "GIF87a", 6)) {
        mGIFStruct.version = 87;
      } else {
        mGIFStruct.state = gif_error;
        break;
      }
      GETN(7, gif_global_header);
      break;

    case gif_global_header:
      // This is the height and width of the "screen" or
      // frame into which images are rendered.  The
      // individual images can be smaller than the
      // screen size and located with an origin anywhere
      // within the screen.

      mGIFStruct.screen_width = GETINT16(q);
      mGIFStruct.screen_height = GETINT16(q + 2);
      mGIFStruct.global_colormap_depth = (q[4]&0x07) + 1;

      if (IsSizeDecode()) {
        MOZ_ASSERT(!mGIFOpen, "Gif should not be open at this point");
        PostSize(mGIFStruct.screen_width, mGIFStruct.screen_height);
        return;
      }

      // screen_bgcolor is not used
      //mGIFStruct.screen_bgcolor = q[5];
      // q[6] = Pixel Aspect Ratio
      //   Not used
      //   float aspect = (float)((q[6] + 15) / 64.0);

      if (q[4] & 0x80) {
        // Get the global colormap
        const uint32_t size = (3 << mGIFStruct.global_colormap_depth);
        if (len < size) {
          // Use 'hold' pattern to get the global colormap
          GETN(size, gif_global_colormap);
          break;
        }
        // Copy everything, go to colormap state to do CMS correction
        memcpy(mGIFStruct.global_colormap, buf, size);
        buf += size;
        len -= size;
        GETN(0, gif_global_colormap);
        break;
      }

      GETN(1, gif_image_start);
      break;

    case gif_global_colormap:
      // Everything is already copied into global_colormap
      // Convert into Cairo colors including CMS transformation
      ConvertColormap(mGIFStruct.global_colormap,
                      1<<mGIFStruct.global_colormap_depth);
      GETN(1, gif_image_start);
      break;

    case gif_image_start:
      switch (*q) {
        case GIF_TRAILER:
          mGIFStruct.state = gif_done;
          break;

        case GIF_EXTENSION_INTRODUCER:
          GETN(2, gif_extension);
          break;

        case GIF_IMAGE_SEPARATOR:
          GETN(9, gif_image_header);
          break;

        default:
          // If we get anything other than GIF_IMAGE_SEPARATOR,
          // GIF_EXTENSION_INTRODUCER, or GIF_TRAILER, there is extraneous data
          // between blocks. The GIF87a spec tells us to keep reading
          // until we find an image separator, but GIF89a says such
          // a file is corrupt. We follow GIF89a and bail out.
          if (mGIFStruct.images_decoded > 0) {
            // The file is corrupt, but one or more images have
            // been decoded correctly. In this case, we proceed
            // as if the file were correctly terminated and set
            // the state to gif_done, so the GIF will display.
            mGIFStruct.state = gif_done;
          } else {
            // No images decoded, there is nothing to display.
            mGIFStruct.state = gif_error;
          }
      }
      break;

    case gif_extension:
      mGIFStruct.bytes_to_consume = q[1];
      if (mGIFStruct.bytes_to_consume) {
        switch (*q) {
        case GIF_GRAPHIC_CONTROL_LABEL:
          // The GIF spec mandates that the GIFControlExtension header block
          // length is 4 bytes, and the parser for this block reads 4 bytes,
          // so we must enforce that the buffer contains at least this many
          // bytes. If the GIF specifies a different length, we allow that, so
          // long as it's larger; the additional data will simply be ignored.
          mGIFStruct.state = gif_control_extension;
          mGIFStruct.bytes_to_consume =
            std::max(mGIFStruct.bytes_to_consume, 4u);
          break;

        // The GIF spec also specifies the lengths of the following two
        // extensions' headers (as 12 and 11 bytes, respectively). Because
        // we ignore the plain text extension entirely and sanity-check the
        // actual length of the application extension header before reading it,
        // we allow GIFs to deviate from these values in either direction. This
        // is important for real-world compatibility, as GIFs in the wild exist
        // with application extension headers that are both shorter and longer
        // than 11 bytes.
        case GIF_APPLICATION_EXTENSION_LABEL:
          mGIFStruct.state = gif_application_extension;
          break;

        case GIF_PLAIN_TEXT_LABEL:
          mGIFStruct.state = gif_skip_block;
          break;

        case GIF_COMMENT_LABEL:
          mGIFStruct.state = gif_consume_comment;
          break;

        default:
          mGIFStruct.state = gif_skip_block;
        }
      } else {
        GETN(1, gif_image_start);
      }
      break;

    case gif_consume_block:
      if (!*q) {
        GETN(1, gif_image_start);
      } else {
        GETN(*q, gif_skip_block);
      }
      break;

    case gif_skip_block:
      GETN(1, gif_consume_block);
      break;

    case gif_control_extension:
      mGIFStruct.is_transparent = *q & 0x1;
      mGIFStruct.tpixel = q[3];
      mGIFStruct.disposal_method = ((*q) >> 2) & 0x7;

      if (mGIFStruct.disposal_method == 4) {
        // Some specs say 3rd bit (value 4), other specs say value 3.
        // Let's choose 3 (the more popular).
        mGIFStruct.disposal_method = 3;
      } else if (mGIFStruct.disposal_method > 4) {
        // This GIF is using a disposal method which is undefined in the spec.
        // Treat it as DisposalMethod::NOT_SPECIFIED.
        mGIFStruct.disposal_method = 0;
      }

      {
        DisposalMethod method = DisposalMethod(mGIFStruct.disposal_method);
        if (method == DisposalMethod::CLEAR_ALL ||
            method == DisposalMethod::CLEAR) {
          // We may have to display the background under this image during
          // animation playback, so we regard it as transparent.
          PostHasTransparency();
        }
      }

      mGIFStruct.delay_time = GETINT16(q + 1) * 10;
      GETN(1, gif_consume_block);
      break;

    case gif_comment_extension:
      if (*q) {
        GETN(*q, gif_consume_comment);
      } else {
        GETN(1, gif_image_start);
      }
      break;

    case gif_consume_comment:
      GETN(1, gif_comment_extension);
      break;

    case gif_application_extension:
      // Check for netscape application extension
      if (mGIFStruct.bytes_to_consume == 11 &&
          (!strncmp((char*)q, "NETSCAPE2.0", 11) ||
           !strncmp((char*)q, "ANIMEXTS1.0", 11))) {
        GETN(1, gif_netscape_extension_block);
      } else {
        GETN(1, gif_consume_block);
      }
      break;

    // Netscape-specific GIF extension: animation looping
    case gif_netscape_extension_block:
      if (*q) {
        // We might need to consume 3 bytes in
        // gif_consume_netscape_extension, so make sure we have at least that.
        GETN(std::max(3, static_cast<int>(*q)), gif_consume_netscape_extension);
      } else {
        GETN(1, gif_image_start);
      }
      break;

    // Parse netscape-specific application extensions
    case gif_consume_netscape_extension:
      switch (q[0] & 7) {
        case 1:
          // Loop entire animation specified # of times.  Only read the
          // loop count during the first iteration.
          mGIFStruct.loop_count = GETINT16(q + 1);
          GETN(1, gif_netscape_extension_block);
          break;

        case 2:
          // Wait for specified # of bytes to enter buffer

          // Don't do this, this extension doesn't exist (isn't used at all)
          // and doesn't do anything, as our streaming/buffering takes care
          // of it all...
          // See: http://semmix.pl/color/exgraf/eeg24.htm
          GETN(1, gif_netscape_extension_block);
          break;

        default:
          // 0,3-7 are yet to be defined netscape extension codes
          mGIFStruct.state = gif_error;
      }
      break;

    case gif_image_header: {
      // Get image offsets, with respect to the screen origin
      mGIFStruct.x_offset = GETINT16(q);
      mGIFStruct.y_offset = GETINT16(q + 2);

      // Get image width and height.
      mGIFStruct.width  = GETINT16(q + 4);
      mGIFStruct.height = GETINT16(q + 6);

      if (!mGIFStruct.images_decoded) {
        // Work around broken GIF files where the logical screen
        // size has weird width or height.  We assume that GIF87a
        // files don't contain animations.
        if ((mGIFStruct.screen_height < mGIFStruct.height) ||
            (mGIFStruct.screen_width < mGIFStruct.width) ||
            (mGIFStruct.version == 87)) {
          mGIFStruct.screen_height = mGIFStruct.height;
          mGIFStruct.screen_width = mGIFStruct.width;
          mGIFStruct.x_offset = 0;
          mGIFStruct.y_offset = 0;
        }
        // Create the image container with the right size.
        BeginGIF();
        if (HasError()) {
          // Setting the size led to an error.
          mGIFStruct.state = gif_error;
          return;
        }

        // If we were doing a size decode, we're done
        if (IsSizeDecode()) {
          return;
        }
      }

      // Work around more broken GIF files that have zero image width or height
      if (!mGIFStruct.height || !mGIFStruct.width) {
        mGIFStruct.height = mGIFStruct.screen_height;
        mGIFStruct.width = mGIFStruct.screen_width;
        if (!mGIFStruct.height || !mGIFStruct.width) {
          mGIFStruct.state = gif_error;
          break;
        }
      }

      // Depth of colors is determined by colormap
      // (q[8] & 0x80) indicates local colormap
      // bits per pixel is (q[8]&0x07 + 1) when local colormap is set
      uint32_t depth = mGIFStruct.global_colormap_depth;
      if (q[8] & 0x80) {
        depth = (q[8]&0x07) + 1;
      }
      uint32_t realDepth = depth;
      while (mGIFStruct.tpixel >= (1 << realDepth) && (realDepth < 8)) {
        realDepth++;
      }
      // Mask to limit the color values within the colormap
      mColorMask = 0xFF >> (8 - realDepth);

      if (NS_FAILED(BeginImageFrame(realDepth))) {
        mGIFStruct.state = gif_error;
        return;
      }

      // FALL THROUGH
    }

    case gif_image_header_continue: {
      // While decoders can reuse frames, we unconditionally increment
      // mGIFStruct.images_decoded when we're done with a frame, so we both can
      // and need to zero out the colormap and image data after every new frame.
      memset(mImageData, 0, mImageDataLength);
      if (mColormap) {
        memset(mColormap, 0, mColormapSize);
      }

      if (!mGIFStruct.images_decoded) {
        // Send a onetime invalidation for the first frame if it has a y-axis
        // offset. Otherwise, the area may never be refreshed and the
        // placeholder will remain on the screen. (Bug 37589)
        if (mGIFStruct.y_offset > 0) {
          nsIntRect r(0, 0, mGIFStruct.screen_width, mGIFStruct.y_offset);
          PostInvalidation(r);
        }
      }

      if (q[8] & 0x40) {
        mGIFStruct.interlaced = true;
        mGIFStruct.ipass = 1;
      } else {
        mGIFStruct.interlaced = false;
        mGIFStruct.ipass = 0;
      }

      // Only apply the Haeberli display hack on the first frame
      mGIFStruct.progressive_display = (mGIFStruct.images_decoded == 0);

      // Clear state from last image
      mGIFStruct.irow = 0;
      mGIFStruct.rows_remaining = mGIFStruct.height;
      mGIFStruct.rowp = mImageData;

      // Depth of colors is determined by colormap
      // (q[8] & 0x80) indicates local colormap
      // bits per pixel is (q[8]&0x07 + 1) when local colormap is set
      uint32_t depth = mGIFStruct.global_colormap_depth;
      if (q[8] & 0x80) {
        depth = (q[8]&0x07) + 1;
      }
      uint32_t realDepth = depth;
      while (mGIFStruct.tpixel >= (1 << realDepth) && (realDepth < 8)) {
        realDepth++;
      }
      // has a local colormap?
      if (q[8] & 0x80) {
        mGIFStruct.local_colormap_size = 1 << depth;
        if (!mGIFStruct.images_decoded) {
          // First frame has local colormap, allocate space for it
          // as the image frame doesn't have its own palette
          mColormapSize = sizeof(uint32_t) << realDepth;
          if (!mGIFStruct.local_colormap) {
            mGIFStruct.local_colormap = (uint32_t*)moz_xmalloc(mColormapSize);
          }
          mColormap = mGIFStruct.local_colormap;
        }
        const uint32_t size = 3 << depth;
        if (mColormapSize > size) {
          // Clear the notfilled part of the colormap
          memset(((uint8_t*)mColormap) + size, 0, mColormapSize - size);
        }
        if (len < size) {
          // Use 'hold' pattern to get the image colormap
          GETN(size, gif_image_colormap);
          break;
        }
        // Copy everything, go to colormap state to do CMS correction
        memcpy(mColormap, buf, size);
        buf += size;
        len -= size;
        GETN(0, gif_image_colormap);
        break;
      } else {
        // Switch back to the global palette
        if (mGIFStruct.images_decoded) {
          // Copy global colormap into the palette of current frame
          memcpy(mColormap, mGIFStruct.global_colormap, mColormapSize);
        } else {
          mColormap = mGIFStruct.global_colormap;
        }
      }
      GETN(1, gif_lzw_start);
    }
    break;

    case gif_image_colormap:
      // Everything is already copied into local_colormap
      // Convert into Cairo colors including CMS transformation
      ConvertColormap(mColormap, mGIFStruct.local_colormap_size);
      GETN(1, gif_lzw_start);
      break;

    case gif_sub_block:
      mGIFStruct.count = *q;
      if (mGIFStruct.count) {
        // Still working on the same image: Process next LZW data block
        // Make sure there are still rows left. If the GIF data
        // is corrupt, we may not get an explicit terminator.
        if (!mGIFStruct.rows_remaining) {
#ifdef DONT_TOLERATE_BROKEN_GIFS
          mGIFStruct.state = gif_error;
          break;
#else
          // This is an illegal GIF, but we remain tolerant.
          GETN(1, gif_sub_block);
#endif
          if (mGIFStruct.count == GIF_TRAILER) {
            // Found a terminator anyway, so consider the image done
            GETN(1, gif_done);
            break;
          }
        }
        GETN(mGIFStruct.count, gif_lzw);
      } else {
        // See if there are any more images in this sequence.
        EndImageFrame();
        GETN(1, gif_image_start);
      }
      break;

    case gif_done:
      MOZ_ASSERT(!IsSizeDecode(), "Size decodes shouldn't reach gif_done");
      FinishInternal();
      goto done;

    case gif_error:
      PostDataError();
      return;

    // We shouldn't ever get here.
    default:
      MOZ_ASSERT_UNREACHABLE("Unexpected mGIFStruct.state");
      PostDecoderError(NS_ERROR_UNEXPECTED);
      return;
    }
  }

  // if an error state is set but no data remains, code flow reaches here
  if (mGIFStruct.state == gif_error) {
      PostDataError();
      return;
  }

  // Copy the leftover into mGIFStruct.hold
  if (len) {
    // Add what we have sofar to the block
    if (mGIFStruct.state != gif_global_colormap &&
        mGIFStruct.state != gif_image_colormap) {
      if (!SetHold(buf, len)) {
        PostDataError();
        return;
      }
    } else {
      uint8_t* p = (mGIFStruct.state == gif_global_colormap) ?
                    (uint8_t*)mGIFStruct.global_colormap :
                    (uint8_t*)mColormap;
      memcpy(p, buf, len);
      mGIFStruct.bytes_in_hold = len;
    }

    mGIFStruct.bytes_to_consume -= len;
  }

// We want to flush before returning if we're on the first frame
done:
  if (!mGIFStruct.images_decoded) {
    FlushImageData();
    mLastFlushedRow = mCurrentRow;
    mLastFlushedPass = mCurrentPass;
  }
}
Пример #10
0
void
nsPNGDecoder::InitInternal()
{
  mCMSMode = gfxPlatform::GetCMSMode();
  if ((mDecodeFlags & DECODER_NO_COLORSPACE_CONVERSION) != 0)
    mCMSMode = eCMSMode_Off;
  mDisablePremultipliedAlpha = (mDecodeFlags & DECODER_NO_PREMULTIPLY_ALPHA) != 0;

#ifdef PNG_HANDLE_AS_UNKNOWN_SUPPORTED
  static png_byte color_chunks[]=
       { 99,  72,  82,  77, '\0',   /* cHRM */
        105,  67,  67,  80, '\0'};  /* iCCP */
  static png_byte unused_chunks[]=
       { 98,  75,  71,  68, '\0',   /* bKGD */
        104,  73,  83,  84, '\0',   /* hIST */
        105,  84,  88, 116, '\0',   /* iTXt */
        111,  70,  70, 115, '\0',   /* oFFs */
        112,  67,  65,  76, '\0',   /* pCAL */
        115,  67,  65,  76, '\0',   /* sCAL */
        112,  72,  89, 115, '\0',   /* pHYs */
        115,  66,  73,  84, '\0',   /* sBIT */
        115,  80,  76,  84, '\0',   /* sPLT */
        116,  69,  88, 116, '\0',   /* tEXt */
        116,  73,  77,  69, '\0',   /* tIME */
        122,  84,  88, 116, '\0'};  /* zTXt */
#endif

  // For size decodes, we only need a small buffer
  if (IsSizeDecode()) {
    mHeaderBuf = (PRUint8 *)moz_xmalloc(BYTES_NEEDED_FOR_DIMENSIONS);
    return;
  }

  /* For full decodes, do png init stuff */

  /* Initialize the container's source image header. */
  /* Always decode to 24 bit pixdepth */

  mPNG = png_create_read_struct(PNG_LIBPNG_VER_STRING,
                                NULL, nsPNGDecoder::error_callback,
                                nsPNGDecoder::warning_callback);
  if (!mPNG) {
    PostDecoderError(NS_ERROR_OUT_OF_MEMORY);
    return;
  }

  mInfo = png_create_info_struct(mPNG);
  if (!mInfo) {
    PostDecoderError(NS_ERROR_OUT_OF_MEMORY);
    png_destroy_read_struct(&mPNG, NULL, NULL);
    return;
  }

#ifdef PNG_HANDLE_AS_UNKNOWN_SUPPORTED
  /* Ignore unused chunks */
  if (mCMSMode == eCMSMode_Off)
    png_set_keep_unknown_chunks(mPNG, 1, color_chunks, 2);

  png_set_keep_unknown_chunks(mPNG, 1, unused_chunks,
                              (int)sizeof(unused_chunks)/5);   
#endif

#ifdef PNG_SET_CHUNK_MALLOC_LIMIT_SUPPORTED
  if (mCMSMode != eCMSMode_Off)
    png_set_chunk_malloc_max(mPNG, 4000000L);
#endif

  /* use this as libpng "progressive pointer" (retrieve in callbacks) */
  png_set_progressive_read_fn(mPNG, static_cast<png_voidp>(this),
                              nsPNGDecoder::info_callback,
                              nsPNGDecoder::row_callback,
                              nsPNGDecoder::end_callback);

}
Пример #11
0
void
nsJPEGDecoder::WriteInternal(const char *aBuffer, uint32_t aCount)
{
  mSegment = (const JOCTET *)aBuffer;
  mSegmentLen = aCount;

  NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!");

  /* Return here if there is a fatal error within libjpeg. */
  nsresult error_code;
  // This cast to nsresult makes sense because setjmp() returns whatever we
  // passed to longjmp(), which was actually an nsresult.
  if ((error_code = (nsresult)setjmp(mErr.setjmp_buffer)) != NS_OK) {
    if (error_code == NS_ERROR_FAILURE) {
      PostDataError();
      /* Error due to corrupt stream - return NS_OK and consume silently
         so that libpr0n doesn't throw away a partial image load */
      mState = JPEG_SINK_NON_JPEG_TRAILER;
      PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
             ("} (setjmp returned NS_ERROR_FAILURE)"));
      return;
    } else {
      /* Error due to reasons external to the stream (probably out of
         memory) - let libpr0n attempt to clean up, even though
         mozilla is seconds away from falling flat on its face. */
      PostDecoderError(error_code);
      mState = JPEG_ERROR;
      PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
             ("} (setjmp returned an error)"));
      return;
    }
  }

  PR_LOG(gJPEGlog, PR_LOG_DEBUG,
         ("[this=%p] nsJPEGDecoder::Write -- processing JPEG data\n", this));

  switch (mState) {
  case JPEG_HEADER:
  {
    LOG_SCOPE(gJPEGlog, "nsJPEGDecoder::Write -- entering JPEG_HEADER case");

    /* Step 3: read file parameters with jpeg_read_header() */
    if (jpeg_read_header(&mInfo, TRUE) == JPEG_SUSPENDED) {
      PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
             ("} (JPEG_SUSPENDED)"));
      return; /* I/O suspension */
    }

    // Post our size to the superclass
    PostSize(mInfo.image_width, mInfo.image_height);
    if (HasError()) {
      // Setting the size led to an error.
      mState = JPEG_ERROR;
      return;
    }

    /* If we're doing a size decode, we're done. */
    if (IsSizeDecode())
      return;

    /* We're doing a full decode. */
    if (mCMSMode != eCMSMode_Off &&
        (mInProfile = GetICCProfile(mInfo)) != nullptr) {
      uint32_t profileSpace = qcms_profile_get_color_space(mInProfile);
      bool mismatch = false;

#ifdef DEBUG_tor
      fprintf(stderr, "JPEG profileSpace: 0x%08X\n", profileSpace);
#endif
      switch (mInfo.jpeg_color_space) {
      case JCS_GRAYSCALE:
        if (profileSpace == icSigRgbData)
          mInfo.out_color_space = JCS_RGB;
        else if (profileSpace != icSigGrayData)
          mismatch = true;
        break;
      case JCS_RGB:
        if (profileSpace != icSigRgbData)
          mismatch =  true;
        break;
      case JCS_YCbCr:
        if (profileSpace == icSigRgbData)
          mInfo.out_color_space = JCS_RGB;
        else
	  // qcms doesn't support ycbcr
          mismatch = true;
        break;
      case JCS_CMYK:
      case JCS_YCCK:
	  // qcms doesn't support cmyk
          mismatch = true;
        break;
      default:
        mState = JPEG_ERROR;
        PostDataError();
        PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
               ("} (unknown colorpsace (1))"));
        return;
      }

      if (!mismatch) {
        qcms_data_type type;
        switch (mInfo.out_color_space) {
        case JCS_GRAYSCALE:
          type = QCMS_DATA_GRAY_8;
          break;
        case JCS_RGB:
          type = QCMS_DATA_RGB_8;
          break;
        default:
          mState = JPEG_ERROR;
          PostDataError();
          PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
                 ("} (unknown colorpsace (2))"));
          return;
        }
#if 0
        /* We don't currently support CMYK profiles. The following
         * code dealt with lcms types. Add something like this
         * back when we gain support for CMYK.
         */
        /* Adobe Photoshop writes YCCK/CMYK files with inverted data */
        if (mInfo.out_color_space == JCS_CMYK)
          type |= FLAVOR_SH(mInfo.saw_Adobe_marker ? 1 : 0);
#endif

        if (gfxPlatform::GetCMSOutputProfile()) {

          /* Calculate rendering intent. */
          int intent = gfxPlatform::GetRenderingIntent();
          if (intent == -1)
              intent = qcms_profile_get_rendering_intent(mInProfile);

          /* Create the color management transform. */
          mTransform = qcms_transform_create(mInProfile,
                                          type,
                                          gfxPlatform::GetCMSOutputProfile(),
                                          QCMS_DATA_RGB_8,
                                          (qcms_intent)intent);
        }
      } else {
#ifdef DEBUG_tor
        fprintf(stderr, "ICM profile colorspace mismatch\n");
#endif
      }
    }

    if (!mTransform) {
      switch (mInfo.jpeg_color_space) {
      case JCS_GRAYSCALE:
      case JCS_RGB:
      case JCS_YCbCr:
        mInfo.out_color_space = JCS_RGB;
        break;
      case JCS_CMYK:
      case JCS_YCCK:
        /* libjpeg can convert from YCCK to CMYK, but not to RGB */
        mInfo.out_color_space = JCS_CMYK;
        break;
      default:
        mState = JPEG_ERROR;
        PostDataError();
        PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
               ("} (unknown colorpsace (3))"));
        return;
        break;
      }
    }

    /*
     * Don't allocate a giant and superfluous memory buffer
     * when the image is a sequential JPEG.
     */
    mInfo.buffered_image = jpeg_has_multiple_scans(&mInfo);

    /* Used to set up image size so arrays can be allocated */
    jpeg_calc_output_dimensions(&mInfo);

    uint32_t imagelength;
    if (NS_FAILED(mImage.EnsureFrame(0, 0, 0, mInfo.image_width, mInfo.image_height,
                                     gfxASurface::ImageFormatRGB24,
                                     &mImageData, &imagelength))) {
      mState = JPEG_ERROR;
      PostDecoderError(NS_ERROR_OUT_OF_MEMORY);
      PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
             ("} (could not initialize image frame)"));
      return;
    }

    PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
           ("        JPEGDecoderAccounting: nsJPEGDecoder::Write -- created image frame with %ux%u pixels",
            mInfo.image_width, mInfo.image_height));

    // Tell the superclass we're starting a frame
    PostFrameStart();

    mState = JPEG_START_DECOMPRESS;
  }

  case JPEG_START_DECOMPRESS:
  {
    LOG_SCOPE(gJPEGlog, "nsJPEGDecoder::Write -- entering JPEG_START_DECOMPRESS case");
    /* Step 4: set parameters for decompression */

    /* FIXME -- Should reset dct_method and dither mode
     * for final pass of progressive JPEG
     */
    mInfo.dct_method =  JDCT_ISLOW;
    mInfo.dither_mode = JDITHER_FS;
    mInfo.do_fancy_upsampling = TRUE;
    mInfo.enable_2pass_quant = FALSE;
    mInfo.do_block_smoothing = TRUE;

    /* Step 5: Start decompressor */
    if (jpeg_start_decompress(&mInfo) == FALSE) {
      PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
             ("} (I/O suspension after jpeg_start_decompress())"));
      return; /* I/O suspension */
    }

    /* Force to use our YCbCr to Packed RGB converter when possible */
    if (!mTransform && (mCMSMode != eCMSMode_All) &&
        mInfo.jpeg_color_space == JCS_YCbCr && mInfo.out_color_space == JCS_RGB) {
      /* Special case for the most common case: transform from YCbCr direct into packed ARGB */
      mInfo.out_color_components = 4; /* Packed ARGB pixels are always 4 bytes...*/
      mInfo.cconvert->color_convert = ycc_rgb_convert_argb;
    }

    /* If this is a progressive JPEG ... */
    mState = mInfo.buffered_image ? JPEG_DECOMPRESS_PROGRESSIVE : JPEG_DECOMPRESS_SEQUENTIAL;
  }

  case JPEG_DECOMPRESS_SEQUENTIAL:
  {
    if (mState == JPEG_DECOMPRESS_SEQUENTIAL)
    {
      LOG_SCOPE(gJPEGlog, "nsJPEGDecoder::Write -- JPEG_DECOMPRESS_SEQUENTIAL case");
      
      bool suspend;
      OutputScanlines(&suspend);
      
      if (suspend) {
        PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
               ("} (I/O suspension after OutputScanlines() - SEQUENTIAL)"));
        return; /* I/O suspension */
      }
      
      /* If we've completed image output ... */
      NS_ASSERTION(mInfo.output_scanline == mInfo.output_height, "We didn't process all of the data!");
      mState = JPEG_DONE;
    }
  }

  case JPEG_DECOMPRESS_PROGRESSIVE:
  {
    if (mState == JPEG_DECOMPRESS_PROGRESSIVE)
    {
      LOG_SCOPE(gJPEGlog, "nsJPEGDecoder::Write -- JPEG_DECOMPRESS_PROGRESSIVE case");

      int status;
      do {
        status = jpeg_consume_input(&mInfo);
      } while ((status != JPEG_SUSPENDED) &&
               (status != JPEG_REACHED_EOI));

      for (;;) {
        if (mInfo.output_scanline == 0) {
          int scan = mInfo.input_scan_number;

          /* if we haven't displayed anything yet (output_scan_number==0)
             and we have enough data for a complete scan, force output
             of the last full scan */
          if ((mInfo.output_scan_number == 0) &&
              (scan > 1) &&
              (status != JPEG_REACHED_EOI))
            scan--;

          if (!jpeg_start_output(&mInfo, scan)) {
            PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
                   ("} (I/O suspension after jpeg_start_output() - PROGRESSIVE)"));
            return; /* I/O suspension */
          }
        }

        if (mInfo.output_scanline == 0xffffff)
          mInfo.output_scanline = 0;

        bool suspend;
        OutputScanlines(&suspend);

        if (suspend) {
          if (mInfo.output_scanline == 0) {
            /* didn't manage to read any lines - flag so we don't call
               jpeg_start_output() multiple times for the same scan */
            mInfo.output_scanline = 0xffffff;
          }
          PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
                 ("} (I/O suspension after OutputScanlines() - PROGRESSIVE)"));
          return; /* I/O suspension */
        }

        if (mInfo.output_scanline == mInfo.output_height)
        {
          if (!jpeg_finish_output(&mInfo)) {
            PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
                   ("} (I/O suspension after jpeg_finish_output() - PROGRESSIVE)"));
            return; /* I/O suspension */
          }

          if (jpeg_input_complete(&mInfo) &&
              (mInfo.input_scan_number == mInfo.output_scan_number))
            break;

          mInfo.output_scanline = 0;
        }
      }

      mState = JPEG_DONE;
    }
  }

  case JPEG_DONE:
  {
    LOG_SCOPE(gJPEGlog, "nsJPEGDecoder::ProcessData -- entering JPEG_DONE case");

    /* Step 7: Finish decompression */

    if (jpeg_finish_decompress(&mInfo) == FALSE) {
      PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
             ("} (I/O suspension after jpeg_finish_decompress() - DONE)"));
      return; /* I/O suspension */
    }

    mState = JPEG_SINK_NON_JPEG_TRAILER;

    /* we're done dude */
    break;
  }
  case JPEG_SINK_NON_JPEG_TRAILER:
    PR_LOG(gJPEGlog, PR_LOG_DEBUG,
           ("[this=%p] nsJPEGDecoder::ProcessData -- entering JPEG_SINK_NON_JPEG_TRAILER case\n", this));

    break;

  case JPEG_ERROR:
    NS_ABORT_IF_FALSE(0, "Should always return immediately after error and not re-enter decoder");
  }

  PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
         ("} (end of function)"));
  return;
}
Пример #12
0
void
nsPNGDecoder::InitInternal()
{
    mCMSMode = gfxPlatform::GetCMSMode();
    if ((mDecodeFlags & DECODER_NO_COLORSPACE_CONVERSION) != 0)
        mCMSMode = eCMSMode_Off;
    mDisablePremultipliedAlpha = (mDecodeFlags & DECODER_NO_PREMULTIPLY_ALPHA) != 0;

#ifdef PNG_HANDLE_AS_UNKNOWN_SUPPORTED
    static png_byte color_chunks[]=
    {   99,  72,  82,  77, '\0',   /* cHRM */
        105,  67,  67,  80, '\0'
    };  /* iCCP */
    static png_byte unused_chunks[]=
    {   98,  75,  71,  68, '\0',   /* bKGD */
        104,  73,  83,  84, '\0',   /* hIST */
        105,  84,  88, 116, '\0',   /* iTXt */
        111,  70,  70, 115, '\0',   /* oFFs */
        112,  67,  65,  76, '\0',   /* pCAL */
        115,  67,  65,  76, '\0',   /* sCAL */
        112,  72,  89, 115, '\0',   /* pHYs */
        115,  66,  73,  84, '\0',   /* sBIT */
        115,  80,  76,  84, '\0',   /* sPLT */
        116,  69,  88, 116, '\0',   /* tEXt */
        116,  73,  77,  69, '\0',   /* tIME */
        122,  84,  88, 116, '\0'
    };  /* zTXt */
#endif

    // For size decodes, we only need a small buffer
    if (IsSizeDecode()) {
        mHeaderBuf = (uint8_t *)moz_xmalloc(BYTES_NEEDED_FOR_DIMENSIONS);
        return;
    }

    /* For full decodes, do png init stuff */

    /* Initialize the container's source image header. */
    /* Always decode to 24 bit pixdepth */

    mPNG = png_create_read_struct(PNG_LIBPNG_VER_STRING,
                                  NULL, nsPNGDecoder::error_callback,
                                  nsPNGDecoder::warning_callback);
    if (!mPNG) {
        PostDecoderError(NS_ERROR_OUT_OF_MEMORY);
        return;
    }

    mInfo = png_create_info_struct(mPNG);
    if (!mInfo) {
        PostDecoderError(NS_ERROR_OUT_OF_MEMORY);
        png_destroy_read_struct(&mPNG, NULL, NULL);
        return;
    }

#ifdef PNG_HANDLE_AS_UNKNOWN_SUPPORTED
    /* Ignore unused chunks */
    if (mCMSMode == eCMSMode_Off)
        png_set_keep_unknown_chunks(mPNG, 1, color_chunks, 2);

    png_set_keep_unknown_chunks(mPNG, 1, unused_chunks,
                                (int)sizeof(unused_chunks)/5);
#endif

#ifdef PNG_SET_CHUNK_MALLOC_LIMIT_SUPPORTED
    if (mCMSMode != eCMSMode_Off)
        png_set_chunk_malloc_max(mPNG, 4000000L);
#endif

#ifdef PNG_READ_CHECK_FOR_INVALID_INDEX_SUPPORTED
#ifndef PR_LOGGING
    /* Disallow palette-index checking, for speed; we would ignore the warning
     * anyhow unless we have defined PR_LOGGING.  This feature was added at
     * libpng version 1.5.10 and is disabled in the embedded libpng but enabled
     * by default in the system libpng.  This call also disables it in the
     * system libpng, for decoding speed.  Bug #745202.
     */
    png_set_check_for_invalid_index(mPNG, 0);
#endif
#endif

    /* use this as libpng "progressive pointer" (retrieve in callbacks) */
    png_set_progressive_read_fn(mPNG, static_cast<png_voidp>(this),
                                nsPNGDecoder::info_callback,
                                nsPNGDecoder::row_callback,
                                nsPNGDecoder::end_callback);

}
Пример #13
0
void
nsWEBPDecoder::WriteInternal(const char *aBuffer, uint32_t aCount)
{
  MOZ_ASSERT(!HasError(), "Shouldn't call WriteInternal after error!");

  const uint8_t* buf = (const uint8_t*)aBuffer;
  VP8StatusCode rv = WebPIAppend(mDecoder, buf, aCount);
  if (rv == VP8_STATUS_OUT_OF_MEMORY) {
    PostDecoderError(NS_ERROR_OUT_OF_MEMORY);
    return;
  } else if (rv == VP8_STATUS_INVALID_PARAM ||
             rv == VP8_STATUS_BITSTREAM_ERROR) {
    PostDataError();
    return;
  } else if (rv == VP8_STATUS_UNSUPPORTED_FEATURE ||
             rv == VP8_STATUS_USER_ABORT) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  // Catch any remaining erroneous return value.
  if (rv != VP8_STATUS_OK && rv != VP8_STATUS_SUSPENDED) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  int lastLineRead = -1;
  int height = 0;
  int width = 0;
  int stride = 0;

  mData = WebPIDecGetRGB(mDecoder, &lastLineRead, &width, &height, &stride);

  // The only valid format for WebP decoding for both alpha and non-alpha
  // images is BGRA, where Opaque images have an A of 255.
  // Assume transparency for all images.
  // XXX: This could be compositor-optimized by doing a one-time check for
  // all-255 alpha pixels, but that might interfere with progressive
  // decoding. Probably not worth it?
  PostHasTransparency();
  
  if (lastLineRead == -1 || !mData)
    return;

  if (width <= 0 || height <= 0) {
    PostDataError();
    return;
  }

  if (!HasSize())
    PostSize(width, height);

  if (IsSizeDecode())
    return;

  if (!mImageData) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  // Transfer from mData to mImageData
  if (lastLineRead > mLastLine) {
    for (int line = mLastLine; line < lastLineRead; line++) {
      for (int pix = 0; pix < width; pix++) {
        // RGBA -> BGRA
        uint32_t DataOffset = 4 * (line * width + pix);
        mImageData[DataOffset+0] = mData[DataOffset+2];
        mImageData[DataOffset+1] = mData[DataOffset+1];
        mImageData[DataOffset+2] = mData[DataOffset+0];
        mImageData[DataOffset+3] = mData[DataOffset+3];
      }
    } 

    // Invalidate
    nsIntRect r(0, mLastLine, width, lastLineRead);
    PostInvalidation(r);
  }

  mLastLine = lastLineRead;
  return;
}
Пример #14
0
void
nsICODecoder::WriteInternal(const char* aBuffer, PRUint32 aCount)
{
  NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!");

  if (!aCount) // aCount=0 means EOF
    return;

  while (aCount && (mPos < ICONCOUNTOFFSET)) { // Skip to the # of icons.
    if (mPos == 2) { // if the third byte is 1: This is an icon, 2: a cursor
      if ((*aBuffer != 1) && (*aBuffer != 2)) {
        PostDataError();
        return;
      }
      mIsCursor = (*aBuffer == 2);
    }
    mPos++; aBuffer++; aCount--;
  }

  if (mPos == ICONCOUNTOFFSET && aCount >= 2) {
    mNumIcons = LITTLE_TO_NATIVE16(((PRUint16*)aBuffer)[0]);
    aBuffer += 2;
    mPos += 2;
    aCount -= 2;
  }

  if (mNumIcons == 0)
    return; // Nothing to do.

  PRUint16 colorDepth = 0;
  // Loop through each entry's dir entry
  while (mCurrIcon < mNumIcons) { 
    if (mPos >= DIRENTRYOFFSET + (mCurrIcon * sizeof(mDirEntryArray)) && 
        mPos < DIRENTRYOFFSET + ((mCurrIcon + 1) * sizeof(mDirEntryArray))) {
      PRUint32 toCopy = sizeof(mDirEntryArray) - 
                        (mPos - DIRENTRYOFFSET - mCurrIcon * sizeof(mDirEntryArray));
      if (toCopy > aCount) {
        toCopy = aCount;
      }
      memcpy(mDirEntryArray + sizeof(mDirEntryArray) - toCopy, aBuffer, toCopy);
      mPos += toCopy;
      aCount -= toCopy;
      aBuffer += toCopy;
    }
    if (aCount == 0)
      return; // Need more data

    IconDirEntry e;
    if (mPos == (DIRENTRYOFFSET + ICODIRENTRYSIZE) + 
                (mCurrIcon * sizeof(mDirEntryArray))) {
      mCurrIcon++;
      ProcessDirEntry(e);
      // We can't use GetRealWidth and GetRealHeight here because those operate
      // on mDirEntry, here we are going through each item in the directory
      if (((e.mWidth == 0 ? 256 : e.mWidth) == PREFICONSIZE && 
           (e.mHeight == 0 ? 256 : e.mHeight) == PREFICONSIZE && 
           (e.mBitCount >= colorDepth)) ||
          (mCurrIcon == mNumIcons && mImageOffset == 0)) {
        mImageOffset = e.mImageOffset;

        // ensure mImageOffset is >= size of the direntry headers (bug #245631)
        PRUint32 minImageOffset = DIRENTRYOFFSET + 
                                  mNumIcons * sizeof(mDirEntryArray);
        if (mImageOffset < minImageOffset) {
          PostDataError();
          return;
        }

        colorDepth = e.mBitCount;
        memcpy(&mDirEntry, &e, sizeof(IconDirEntry));
      }
    }
  }

  if (mPos < mImageOffset) {
    // Skip to (or at least towards) the desired image offset
    PRUint32 toSkip = mImageOffset - mPos;
    if (toSkip > aCount)
      toSkip = aCount;

    mPos    += toSkip;
    aBuffer += toSkip;
    aCount  -= toSkip;
  }

  // If we are within the first PNGSIGNATURESIZE bytes of the image data,
  // then we have either a BMP or a PNG.  We use the first PNGSIGNATURESIZE
  // bytes to determine which one we have.
  if (mCurrIcon == mNumIcons && mPos >= mImageOffset && 
      mPos < mImageOffset + PNGSIGNATURESIZE)
  {
    PRUint32 toCopy = PNGSIGNATURESIZE - (mPos - mImageOffset);
    if (toCopy > aCount) {
      toCopy = aCount;
    }

    memcpy(mSignature + (mPos - mImageOffset), aBuffer, toCopy);
    mPos += toCopy;
    aCount -= toCopy;
    aBuffer += toCopy;

    mIsPNG = !memcmp(mSignature, nsPNGDecoder::pngSignatureBytes, 
                     PNGSIGNATURESIZE);
    if (mIsPNG) {
      mContainedDecoder = new nsPNGDecoder();
      mContainedDecoder->InitSharedDecoder(mImage, mObserver);
      mContainedDecoder->Write(mSignature, PNGSIGNATURESIZE);
      mDataError = mContainedDecoder->HasDataError();
      if (mContainedDecoder->HasDataError()) {
        return;
      }
    }
  }

  // If we have a PNG, let the PNG decoder do all of the rest of the work
  if (mIsPNG && mContainedDecoder && mPos >= mImageOffset + PNGSIGNATURESIZE) {
    mContainedDecoder->Write(aBuffer, aCount);
    mDataError = mContainedDecoder->HasDataError();
    if (mContainedDecoder->HasDataError()) {
      return;
    }
    mPos += aCount;
    aBuffer += aCount;
    aCount = 0;

    // Raymond Chen says that 32bpp only are valid PNG ICOs
    // http://blogs.msdn.com/b/oldnewthing/archive/2010/10/22/10079192.aspx
    if (static_cast<nsPNGDecoder*>(mContainedDecoder.get())->HasValidInfo() && 
        static_cast<nsPNGDecoder*>(mContainedDecoder.get())->GetPixelDepth() != 32) {
      PostDataError();
    }
    return;
  }

  // We've processed all of the icon dir entries and are within the 
  // bitmap info size
  if (!mIsPNG && mCurrIcon == mNumIcons && mPos >= mImageOffset && 
      mPos >= mImageOffset + PNGSIGNATURESIZE && 
      mPos < mImageOffset + BITMAPINFOSIZE) {

    // As we were decoding, we did not know if we had a PNG signature or the
    // start of a bitmap information header.  At this point we know we had
    // a bitmap information header and not a PNG signature, so fill the bitmap
    // information header with the data it should already have.
    memcpy(mBIHraw, mSignature, PNGSIGNATURESIZE);

    // We've found the icon.
    PRUint32 toCopy = sizeof(mBIHraw) - (mPos - mImageOffset);
    if (toCopy > aCount)
      toCopy = aCount;

    memcpy(mBIHraw + (mPos - mImageOffset), aBuffer, toCopy);
    mPos += toCopy;
    aCount -= toCopy;
    aBuffer += toCopy;
  }

  // If we have a BMP inside the ICO and we have read the BIH header
  if (!mIsPNG && mPos == mImageOffset + BITMAPINFOSIZE) {

    // Make sure we have a sane value for the bitmap information header
    PRInt32 bihSize = ExtractBIHSizeFromBitmap(reinterpret_cast<PRInt8*>(mBIHraw));
    if (bihSize != BITMAPINFOSIZE) {
      PostDataError();
      return;
    }
    // We are extracting the BPP from the BIH header as it should be trusted 
    // over the one we have from the icon header
    mBPP = ExtractBPPFromBitmap(reinterpret_cast<PRInt8*>(mBIHraw));
    
    // Init the bitmap decoder which will do most of the work for us
    // It will do everything except the AND mask which isn't present in bitmaps
    // bmpDecoder is for local scope ease, it will be freed by mContainedDecoder
    nsBMPDecoder *bmpDecoder = new nsBMPDecoder(); 
    mContainedDecoder = bmpDecoder;
    bmpDecoder->SetUseAlphaData(PR_TRUE);
    mContainedDecoder->SetSizeDecode(IsSizeDecode());
    mContainedDecoder->InitSharedDecoder(mImage, mObserver);

    // The ICO format when containing a BMP does not include the 14 byte
    // bitmap file header. To use the code of the BMP decoder we need to 
    // generate this header ourselves and feed it to the BMP decoder.
    PRInt8 bfhBuffer[BMPFILEHEADERSIZE];
    if (!FillBitmapFileHeaderBuffer(bfhBuffer)) {
      PostDataError();
      return;
    }
    mContainedDecoder->Write((const char*)bfhBuffer, sizeof(bfhBuffer));
    mDataError = mContainedDecoder->HasDataError();
    if (mContainedDecoder->HasDataError()) {
      return;
    }

    // Setup the cursor hot spot if one is present
    SetHotSpotIfCursor();

    // Fix the height on the BMP resource
    FillBitmapInformationBufferHeight((PRInt8*)mBIHraw);

    // Write out the BMP's bitmap info header
    mContainedDecoder->Write(mBIHraw, sizeof(mBIHraw));
    mDataError = mContainedDecoder->HasDataError();
    if (mContainedDecoder->HasDataError()) {
      return;
    }

    // We have the size. If we're doing a size decode, we got what
    // we came for.
    if (IsSizeDecode())
      return;

    // Sometimes the ICO BPP header field is not filled out
    // so we should trust the contained resource over our own
    // information.
    mBPP = bmpDecoder->GetBitsPerPixel();

    // Check to make sure we have valid color settings
    PRUint16 numColors = GetNumColors();
    if (numColors == (PRUint16)-1) {
      PostDataError();
      return;
    }
  }

  // If we have a BMP
  if (!mIsPNG && mContainedDecoder && mPos >= mImageOffset + BITMAPINFOSIZE) {
    PRUint16 numColors = GetNumColors();
    if (numColors == (PRUint16)-1) {
      PostDataError();
      return;
    }
    // Feed the actual image data (not including headers) into the BMP decoder
    PRInt32 bmpDataOffset = mDirEntry.mImageOffset + BITMAPINFOSIZE;
    PRInt32 bmpDataEnd = mDirEntry.mImageOffset + BITMAPINFOSIZE + 
                         static_cast<nsBMPDecoder*>(mContainedDecoder.get())->GetCompressedImageSize() +
                         4 * numColors;

    // If we are feeding in the core image data, but we have not yet
    // reached the ICO's 'AND buffer mask'
    if (mPos >= bmpDataOffset && mPos < bmpDataEnd) {

      // Figure out how much data the BMP decoder wants
      PRUint32 toFeed = bmpDataEnd - mPos;
      if (toFeed > aCount) {
        toFeed = aCount;
      }

      mContainedDecoder->Write(aBuffer, toFeed);
      mDataError = mContainedDecoder->HasDataError();
      if (mContainedDecoder->HasDataError()) {
        return;
      }

      mPos += toFeed;
      aCount -= toFeed;
      aBuffer += toFeed;
    }
  
    // If the bitmap is fully processed, treat any left over data as the ICO's
    // 'AND buffer mask' which appears after the bitmap resource.
    if (!mIsPNG && mPos >= bmpDataEnd) {
      // There may be an optional AND bit mask after the data.  This is
      // only used if the alpha data is not already set. The alpha data 
      // is used for 32bpp bitmaps as per the comment in ICODecoder.h
      // The alpha mask should be checked in all other cases.
      if (static_cast<nsBMPDecoder*>(mContainedDecoder.get())->GetBitsPerPixel() != 32 || 
          !static_cast<nsBMPDecoder*>(mContainedDecoder.get())->HasAlphaData()) {
        PRUint32 rowSize = ((GetRealWidth() + 31) / 32) * 4; // + 31 to round up
        if (mPos == bmpDataEnd) {
          mPos++;
          mRowBytes = 0;
          mCurLine = GetRealHeight();
          mRow = (PRUint8*)moz_realloc(mRow, rowSize);
          if (!mRow) {
            PostDecoderError(NS_ERROR_OUT_OF_MEMORY);
            return;
          }
        }

        // Ensure memory has been allocated before decoding.
        NS_ABORT_IF_FALSE(mRow, "mRow is null");
        NS_ABORT_IF_FALSE(mImage, "mImage is null");
        if (!mRow || !mImage) {
          PostDataError();
          return;
        }

        while (mCurLine > 0 && aCount > 0) {
          PRUint32 toCopy = NS_MIN(rowSize - mRowBytes, aCount);
          if (toCopy) {
            memcpy(mRow + mRowBytes, aBuffer, toCopy);
            aCount -= toCopy;
            aBuffer += toCopy;
            mRowBytes += toCopy;
          }
          if (rowSize == mRowBytes) {
            mCurLine--;
            mRowBytes = 0;

            PRUint32* imageData = 
              static_cast<nsBMPDecoder*>(mContainedDecoder.get())->GetImageData();
            if (!imageData) {
              PostDataError();
              return;
            }
            PRUint32* decoded = imageData + mCurLine * GetRealWidth();
            PRUint32* decoded_end = decoded + GetRealWidth();
            PRUint8* p = mRow, *p_end = mRow + rowSize; 
            while (p < p_end) {
              PRUint8 idx = *p++;
              for (PRUint8 bit = 0x80; bit && decoded<decoded_end; bit >>= 1) {
                // Clear pixel completely for transparency.
                if (idx & bit) {
                  *decoded = 0;
                }
                decoded++;
              }
            }
          }
        }
      }
Пример #15
0
void
nsWEBPDecoder::WriteInternal(const char *aBuffer, uint32_t aCount, DecodeStrategy)
{
  NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!");

  const uint8_t* buf = (const uint8_t*)aBuffer;
  VP8StatusCode rv = WebPIAppend(mDecoder, buf, aCount);
  if (rv == VP8_STATUS_OUT_OF_MEMORY) {
    PostDecoderError(NS_ERROR_OUT_OF_MEMORY);
    return;
  } else if (rv == VP8_STATUS_INVALID_PARAM ||
             rv == VP8_STATUS_BITSTREAM_ERROR) {
    PostDataError();
    return;
  } else if (rv == VP8_STATUS_UNSUPPORTED_FEATURE ||
             rv == VP8_STATUS_USER_ABORT) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  // Catch any remaining erroneous return value.
  if (rv != VP8_STATUS_OK && rv != VP8_STATUS_SUSPENDED) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  int lastLineRead = -1;
  int height = 0;
  int width = 0;
  int stride = 0;

  mData = WebPIDecGetRGB(mDecoder, &lastLineRead, &width, &height, &stride);

  if (lastLineRead == -1 || !mData)
    return;

  if (width <= 0 || height <= 0) {
    PostDataError();
    return;
  }

  if (!HasSize())
    PostSize(width, height);

  if (IsSizeDecode())
    return;

  uint32_t imagelength;
  // First incremental Image data chunk. Special handling required.
  if (mLastLine == 0 && lastLineRead > 0) {
    imgFrame* aFrame;
    nsresult res = mImage.EnsureFrame(0, 0, 0, width, height,
                                       gfxASurface::ImageFormatARGB32,
                                       (uint8_t**)&mImageData, &imagelength, &aFrame);
    if (NS_FAILED(res) || !mImageData) {
      PostDecoderError(NS_ERROR_FAILURE);
      return;
    }
  }

  if (!mImageData) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  if (lastLineRead > mLastLine) {
    for (int line = mLastLine; line < lastLineRead; line++) {
      uint32_t *cptr32 = (uint32_t*)(mImageData + (line * width));
      uint8_t *cptr8 = mData + (line * stride);
      for (int pix = 0; pix < width; pix++, cptr8 += 4) {
	// if((cptr8[3] != 0) && (cptr8[0] != 0) && (cptr8[1] != 0) && (cptr8[2] != 0))
	   *cptr32++ = gfxPackedPixel(cptr8[3], cptr8[0], cptr8[1], cptr8[2]);
      }
    }

    // Invalidate
    nsIntRect r(0, mLastLine, width, lastLineRead);
    PostInvalidation(r);
  }

  mLastLine = lastLineRead;
  return;
}