LexerTransition<nsIconDecoder::State>
nsIconDecoder::ReadRowOfPixels(const char* aData, size_t aLength)
{
  MOZ_ASSERT(aLength % 4 == 0, "Rows should contain a multiple of four bytes");

  auto result = mPipe.WritePixels<uint32_t>([&]() -> NextPixel<uint32_t> {
    if (aLength == 0) {
      return AsVariant(WriteState::NEED_MORE_DATA);  // Done with this row.
    }

    uint32_t pixel;
    memcpy(&pixel, aData, 4);
    aData += 4;
    aLength -= 4;

    return AsVariant(pixel);
  });

  MOZ_ASSERT(result != WriteState::FAILURE);

  Maybe<SurfaceInvalidRect> invalidRect = mPipe.TakeInvalidRect();
  if (invalidRect) {
    PostInvalidation(invalidRect->mInputSpaceRect,
                     Some(invalidRect->mOutputSpaceRect));
  }

  return result == WriteState::FINISHED
       ? Transition::To(State::FINISH, 0)
       : Transition::To(State::ROW_OF_PIXELS, mBytesPerRow);
}
Exemple #2
0
// set timeout and frame disposal method for the current frame
void
nsPNGDecoder::EndImageFrame()
{
  if (mFrameIsHidden) {
    return;
  }

  mNumFrames++;

  Opacity opacity = Opacity::SOME_TRANSPARENCY;
  if (format == gfx::SurfaceFormat::B8G8R8X8) {
    opacity = Opacity::OPAQUE;
  }

#ifdef PNG_APNG_SUPPORTED
  uint32_t numFrames = GetFrameCount();

  // We can't use mPNG->num_frames_read as it may be one ahead.
  if (numFrames > 1) {
    PostInvalidation(mFrameRect);
  }
#endif

  PostFrameStop(opacity, mAnimInfo.mDispose, mAnimInfo.mTimeout,
                mAnimInfo.mBlend);
}
//******************************************************************************
nsresult nsGIFDecoder2::BeginImageFrame(uint16_t aDepth)
{
  uint32_t imageDataLength;
  nsresult rv;
  gfxASurface::gfxImageFormat format;
  if (mGIFStruct.is_transparent)
    format = gfxASurface::ImageFormatARGB32;
  else
    format = gfxASurface::ImageFormatRGB24;

  // Use correct format, RGB for first frame, PAL for following frames
  // and include transparency to allow for optimization of opaque images
  if (mGIFStruct.images_decoded) {
    // Image data is stored with original depth and palette
    rv = mImage.EnsureFrame(mGIFStruct.images_decoded,
                            mGIFStruct.x_offset, mGIFStruct.y_offset,
                            mGIFStruct.width, mGIFStruct.height,
                            format, aDepth, &mImageData, &imageDataLength,
                            &mColormap, &mColormapSize);

    // While EnsureFrame can reuse frames, we unconditionally increment
    // mGIFStruct.images_decoded when we're done with a frame, so we both can
    // and need to zero out the colormap and image data after every call to
    // EnsureFrame.
    if (NS_SUCCEEDED(rv) && mColormap) {
      memset(mColormap, 0, mColormapSize);
    }
  } else {
    // Regardless of depth of input, image is decoded into 24bit RGB
    rv = mImage.EnsureFrame(mGIFStruct.images_decoded,
                            mGIFStruct.x_offset, mGIFStruct.y_offset,
                            mGIFStruct.width, mGIFStruct.height,
                            format, &mImageData, &imageDataLength);
  }

  if (NS_FAILED(rv))
    return rv;

  memset(mImageData, 0, imageDataLength);

  mImage.SetFrameDisposalMethod(mGIFStruct.images_decoded,
                                mGIFStruct.disposal_method);

  // Tell the superclass we're starting a frame
  PostFrameStart();

  if (!mGIFStruct.images_decoded) {
    // Send a onetime invalidation for the first frame if it has a y-axis offset. 
    // Otherwise, the area may never be refreshed and the placeholder will remain
    // on the screen. (Bug 37589)
    if (mGIFStruct.y_offset > 0) {
      nsIntRect r(0, 0, mGIFStruct.screen_width, mGIFStruct.y_offset);
      PostInvalidation(r);
    }
  }

  mCurrentFrame = mGIFStruct.images_decoded;
  return NS_OK;
}
Exemple #4
0
//******************************************************************************
void nsGIFDecoder2::EndImageFrame()
{
    // First flush all pending image data
    if (!mGIFStruct.images_decoded) {
        // Only need to flush first frame
        FlushImageData();

        // If the first frame is smaller in height than the entire image, send an
        // invalidation for the area it does not have data for.
        // This will clear the remaining bits of the placeholder. (Bug 37589)
        const PRUint32 realFrameHeight = mGIFStruct.height + mGIFStruct.y_offset;
        if (realFrameHeight < mGIFStruct.screen_height) {
            nsIntRect r(0, realFrameHeight,
                        mGIFStruct.screen_width,
                        mGIFStruct.screen_height - realFrameHeight);
            PostInvalidation(r);
        }
        // This transparency check is only valid for first frame
        if (mGIFStruct.is_transparent && !mSawTransparency) {
            mImage->SetFrameHasNoAlpha(mGIFStruct.images_decoded);
        }
    }
    mCurrentRow = mLastFlushedRow = -1;
    mCurrentPass = mLastFlushedPass = 0;

    // Only add frame if we have any rows at all
    if (mGIFStruct.rows_remaining != mGIFStruct.height) {
        if (mGIFStruct.rows_remaining && mGIFStruct.images_decoded) {
            // Clear the remaining rows (only needed for the animation frames)
            PRUint8 *rowp = mImageData + ((mGIFStruct.height - mGIFStruct.rows_remaining) * mGIFStruct.width);
            memset(rowp, 0, mGIFStruct.rows_remaining * mGIFStruct.width);
        }

        // We actually have the timeout information before we get the lzw encoded
        // image data, at least according to the spec, but we delay in setting the
        // timeout for the image until here to help ensure that we have the whole
        // image frame decoded before we go off and try to display another frame.
        mImage->SetFrameTimeout(mGIFStruct.images_decoded, mGIFStruct.delay_time);
    }

    // Unconditionally increment images_decoded, because we unconditionally
    // append frames in BeginImageFrame(). This ensures that images_decoded
    // always refers to the frame in mImage we're currently decoding,
    // even if some of them weren't decoded properly and thus are blank.
    mGIFStruct.images_decoded++;

    // Tell the superclass we finished a frame
    PostFrameStop();

    // Reset the transparent pixel
    if (mOldColor) {
        mColormap[mGIFStruct.tpixel] = mOldColor;
        mOldColor = 0;
    }

    mCurrentFrame = -1;
}
//******************************************************************************
void nsGIFDecoder2::EndImageFrame()
{
  FrameBlender::FrameAlpha alpha = FrameBlender::kFrameHasAlpha;

  // First flush all pending image data 
  if (!mGIFStruct.images_decoded) {
    // Only need to flush first frame
    FlushImageData();

    // If the first frame is smaller in height than the entire image, send an
    // invalidation for the area it does not have data for.
    // This will clear the remaining bits of the placeholder. (Bug 37589)
    const uint32_t realFrameHeight = mGIFStruct.height + mGIFStruct.y_offset;
    if (realFrameHeight < mGIFStruct.screen_height) {
      nsIntRect r(0, realFrameHeight,
                  mGIFStruct.screen_width,
                  mGIFStruct.screen_height - realFrameHeight);
      PostInvalidation(r);
    }
    // This transparency check is only valid for first frame
    if (mGIFStruct.is_transparent && !mSawTransparency) {
      alpha = FrameBlender::kFrameOpaque;
    }
  }
  mCurrentRow = mLastFlushedRow = -1;
  mCurrentPass = mLastFlushedPass = 0;

  // Only add frame if we have any rows at all
  if (mGIFStruct.rows_remaining != mGIFStruct.height) {
    if (mGIFStruct.rows_remaining && mGIFStruct.images_decoded) {
      // Clear the remaining rows (only needed for the animation frames)
      uint8_t *rowp = mImageData + ((mGIFStruct.height - mGIFStruct.rows_remaining) * mGIFStruct.width);
      memset(rowp, 0, mGIFStruct.rows_remaining * mGIFStruct.width);
    }
  }

  // Unconditionally increment images_decoded, because we unconditionally
  // append frames in BeginImageFrame(). This ensures that images_decoded
  // always refers to the frame in mImage we're currently decoding,
  // even if some of them weren't decoded properly and thus are blank.
  mGIFStruct.images_decoded++;

  // Tell the superclass we finished a frame
  PostFrameStop(alpha,
                FrameBlender::FrameDisposalMethod(mGIFStruct.disposal_method),
                mGIFStruct.delay_time);

  // Reset the transparent pixel
  if (mOldColor) {
    mColormap[mGIFStruct.tpixel] = mOldColor;
    mOldColor = 0;
  }

  mCurrentFrameIndex = -1;
}
Exemple #6
0
//******************************************************************************
nsresult nsGIFDecoder2::BeginImageFrame(gfx_depth aDepth)
{
    PRUint32 imageDataLength;
    nsresult rv;
    gfxASurface::gfxImageFormat format;
    if (mGIFStruct.is_transparent)
        format = gfxASurface::ImageFormatARGB32;
    else
        format = gfxASurface::ImageFormatRGB24;

    // Use correct format, RGB for first frame, PAL for following frames
    // and include transparency to allow for optimization of opaque images
    if (mGIFStruct.images_decoded) {
        // Image data is stored with original depth and palette
        rv = mImage->AppendPalettedFrame(mGIFStruct.x_offset, mGIFStruct.y_offset,
                                         mGIFStruct.width, mGIFStruct.height,
                                         format, aDepth, &mImageData, &imageDataLength,
                                         &mColormap, &mColormapSize);
    } else {
        // Regardless of depth of input, image is decoded into 24bit RGB
        rv = mImage->AppendFrame(mGIFStruct.x_offset, mGIFStruct.y_offset,
                                 mGIFStruct.width, mGIFStruct.height,
                                 format, &mImageData, &imageDataLength);
    }

    if (NS_FAILED(rv))
        return rv;

    mImage->SetFrameDisposalMethod(mGIFStruct.images_decoded,
                                   mGIFStruct.disposal_method);

    // Tell the superclass we're starting a frame
    PostFrameStart();

    if (!mGIFStruct.images_decoded) {
        // Send a onetime invalidation for the first frame if it has a y-axis offset.
        // Otherwise, the area may never be refreshed and the placeholder will remain
        // on the screen. (Bug 37589)
        if (mGIFStruct.y_offset > 0) {
            PRInt32 imgWidth;
            mImage->GetWidth(&imgWidth);
            nsIntRect r(0, 0, imgWidth, mGIFStruct.y_offset);
            PostInvalidation(r);
        }
    }

    mCurrentFrame = mGIFStruct.images_decoded;
    return NS_OK;
}
Exemple #7
0
// set timeout and frame disposal method for the current frame
void nsPNGDecoder::EndImageFrame()
{
  if (mFrameIsHidden)
    return;

  PRUint32 numFrames = 1;
#ifdef PNG_APNG_SUPPORTED
  numFrames = mImage->GetNumFrames();

  // We can't use mPNG->num_frames_read as it may be one ahead.
  if (numFrames > 1) {
    // Tell the image renderer that the frame is complete
    if (mFrameHasNoAlpha)
      mImage->SetFrameHasNoAlpha(numFrames - 1);

    PostInvalidation(mFrameRect);
  }
#endif

  PostFrameStop();
}
Exemple #8
0
// set timeout and frame disposal method for the current frame
void nsPNGDecoder::EndImageFrame()
{
    if (mFrameIsHidden)
        return;

    uint32_t numFrames = 1;
#ifdef PNG_APNG_SUPPORTED
    numFrames = mImage.GetNumFrames();

    // We can't use mPNG->num_frames_read as it may be one ahead.
    if (numFrames > 1) {
        // Tell the image renderer that the frame is complete
        if (mFrameHasNoAlpha)
            mImage.SetFrameHasNoAlpha(numFrames - 1);

        // PNG is always non-premult
        mImage.SetFrameAsNonPremult(numFrames - 1, true);

        PostInvalidation(mFrameRect);
    }
#endif

    PostFrameStop();
}
// set timeout and frame disposal method for the current frame
void nsPNGDecoder::EndImageFrame()
{
  if (mFrameIsHidden)
    return;

  mNumFrames++;

  FrameBlender::FrameAlpha alpha;
  if (mFrameHasNoAlpha)
    alpha = FrameBlender::kFrameOpaque;
  else
    alpha = FrameBlender::kFrameHasAlpha;

#ifdef PNG_APNG_SUPPORTED
  uint32_t numFrames = GetFrameCount();

  // We can't use mPNG->num_frames_read as it may be one ahead.
  if (numFrames > 1) {
    PostInvalidation(mFrameRect);
  }
#endif

  PostFrameStop(alpha, mAnimInfo.mDispose, mAnimInfo.mTimeout, mAnimInfo.mBlend);
}
void
nsGIFDecoder2::WriteInternal(const char *aBuffer, uint32_t aCount, DecodeStrategy)
{
  NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!");

  // These variables changed names, and renaming would make a much bigger patch :(
  const uint8_t *buf = (const uint8_t *)aBuffer;
  uint32_t len = aCount;

  const uint8_t *q = buf;

  // Add what we have sofar to the block
  // If previous call to me left something in the hold first complete current block
  // Or if we are filling the colormaps, first complete the colormap
  uint8_t* p = (mGIFStruct.state == gif_global_colormap) ? (uint8_t*)mGIFStruct.global_colormap :
               (mGIFStruct.state == gif_image_colormap) ? (uint8_t*)mColormap :
               (mGIFStruct.bytes_in_hold) ? mGIFStruct.hold : nullptr;

  if (len == 0 && buf == nullptr) {
    // We've just gotten the frame we asked for. Time to use the data we
    // stashed away.
    len = mGIFStruct.bytes_in_hold;
    q = buf = p;
  } else if (p) {
    // Add what we have sofar to the block
    uint32_t l = std::min(len, mGIFStruct.bytes_to_consume);
    memcpy(p+mGIFStruct.bytes_in_hold, buf, l);

    if (l < mGIFStruct.bytes_to_consume) {
      // Not enough in 'buf' to complete current block, get more
      mGIFStruct.bytes_in_hold += l;
      mGIFStruct.bytes_to_consume -= l;
      return;
    }
    // Point 'q' to complete block in hold (or in colormap)
    q = p;
  }

  // Invariant:
  //    'q' is start of current to be processed block (hold, colormap or buf)
  //    'bytes_to_consume' is number of bytes to consume from 'buf'
  //    'buf' points to the bytes to be consumed from the input buffer
  //    'len' is number of bytes left in input buffer from position 'buf'.
  //    At entrance of the for loop will 'buf' will be moved 'bytes_to_consume'
  //    to point to next buffer, 'len' is adjusted accordingly.
  //    So that next round in for loop, q gets pointed to the next buffer.

  for (;len >= mGIFStruct.bytes_to_consume; q=buf, mGIFStruct.bytes_in_hold = 0) {
    // Eat the current block from the buffer, q keeps pointed at current block
    buf += mGIFStruct.bytes_to_consume;
    len -= mGIFStruct.bytes_to_consume;

    switch (mGIFStruct.state)
    {
    case gif_lzw:
      if (!DoLzw(q)) {
        mGIFStruct.state = gif_error;
        break;
      }
      GETN(1, gif_sub_block);
      break;

    case gif_lzw_start:
    {
      // Make sure the transparent pixel is transparent in the colormap
      if (mGIFStruct.is_transparent) {
        // Save old value so we can restore it later
        if (mColormap == mGIFStruct.global_colormap)
            mOldColor = mColormap[mGIFStruct.tpixel];
        mColormap[mGIFStruct.tpixel] = 0;
      }

      /* Initialize LZW parser/decoder */
      mGIFStruct.datasize = *q;
      const int clear_code = ClearCode();
      if (mGIFStruct.datasize > MAX_LZW_BITS ||
          clear_code >= MAX_BITS) {
        mGIFStruct.state = gif_error;
        break;
      }

      mGIFStruct.avail = clear_code + 2;
      mGIFStruct.oldcode = -1;
      mGIFStruct.codesize = mGIFStruct.datasize + 1;
      mGIFStruct.codemask = (1 << mGIFStruct.codesize) - 1;
      mGIFStruct.datum = mGIFStruct.bits = 0;

      /* init the tables */
      for (int i = 0; i < clear_code; i++)
        mGIFStruct.suffix[i] = i;

      mGIFStruct.stackp = mGIFStruct.stack;

      GETN(1, gif_sub_block);
    }
    break;

    /* All GIF files begin with "GIF87a" or "GIF89a" */
    case gif_type:
      if (!strncmp((char*)q, "GIF89a", 6)) {
        mGIFStruct.version = 89;
      } else if (!strncmp((char*)q, "GIF87a", 6)) {
        mGIFStruct.version = 87;
      } else {
        mGIFStruct.state = gif_error;
        break;
      }
      GETN(7, gif_global_header);
      break;

    case gif_global_header:
      /* This is the height and width of the "screen" or
       * frame into which images are rendered.  The
       * individual images can be smaller than the
       * screen size and located with an origin anywhere
       * within the screen.
       */

      mGIFStruct.screen_width = GETINT16(q);
      mGIFStruct.screen_height = GETINT16(q + 2);
      mGIFStruct.global_colormap_depth = (q[4]&0x07) + 1;

      if (IsSizeDecode()) {
        MOZ_ASSERT(!mGIFOpen, "Gif should not be open at this point");
        PostSize(mGIFStruct.screen_width, mGIFStruct.screen_height);
        return;
      }

      // screen_bgcolor is not used
      //mGIFStruct.screen_bgcolor = q[5];
      // q[6] = Pixel Aspect Ratio
      //   Not used
      //   float aspect = (float)((q[6] + 15) / 64.0);

      if (q[4] & 0x80) { /* global map */
        // Get the global colormap
        const uint32_t size = (3 << mGIFStruct.global_colormap_depth);
        if (len < size) {
          // Use 'hold' pattern to get the global colormap
          GETN(size, gif_global_colormap);
          break;
        }
        // Copy everything, go to colormap state to do CMS correction
        memcpy(mGIFStruct.global_colormap, buf, size);
        buf += size;
        len -= size;
        GETN(0, gif_global_colormap);
        break;
      }

      GETN(1, gif_image_start);
      break;

    case gif_global_colormap:
      // Everything is already copied into global_colormap
      // Convert into Cairo colors including CMS transformation
      ConvertColormap(mGIFStruct.global_colormap, 1<<mGIFStruct.global_colormap_depth);
      GETN(1, gif_image_start);
      break;

    case gif_image_start:
      switch (*q) {
        case GIF_TRAILER:
          mGIFStruct.state = gif_done;
          break;

        case GIF_EXTENSION_INTRODUCER:
          GETN(2, gif_extension);
          break;

        case GIF_IMAGE_SEPARATOR:
          GETN(9, gif_image_header);
          break;

        default:
          /* If we get anything other than GIF_IMAGE_SEPARATOR, 
           * GIF_EXTENSION_INTRODUCER, or GIF_TRAILER, there is extraneous data
           * between blocks. The GIF87a spec tells us to keep reading
           * until we find an image separator, but GIF89a says such
           * a file is corrupt. We follow GIF89a and bail out. */
          if (mGIFStruct.images_decoded > 0) {
            /* The file is corrupt, but one or more images have
             * been decoded correctly. In this case, we proceed
             * as if the file were correctly terminated and set
             * the state to gif_done, so the GIF will display.
             */
            mGIFStruct.state = gif_done;
          } else {
            /* No images decoded, there is nothing to display. */
            mGIFStruct.state = gif_error;
          }
      }
      break;

    case gif_extension:
      mGIFStruct.bytes_to_consume = q[1];
      if (mGIFStruct.bytes_to_consume) {
        switch (*q) {
        case GIF_GRAPHIC_CONTROL_LABEL:
          // The GIF spec mandates that the GIFControlExtension header block length is 4 bytes,
          // and the parser for this block reads 4 bytes, so we must enforce that the buffer
          // contains at least this many bytes. If the GIF specifies a different length, we
          // allow that, so long as it's larger; the additional data will simply be ignored.
          mGIFStruct.state = gif_control_extension;
          mGIFStruct.bytes_to_consume = std::max(mGIFStruct.bytes_to_consume, 4u);
          break;

        // The GIF spec also specifies the lengths of the following two extensions' headers
        // (as 12 and 11 bytes, respectively). Because we ignore the plain text extension entirely
        // and sanity-check the actual length of the application extension header before reading it,
        // we allow GIFs to deviate from these values in either direction. This is important for
        // real-world compatibility, as GIFs in the wild exist with application extension headers
        // that are both shorter and longer than 11 bytes.
        case GIF_APPLICATION_EXTENSION_LABEL:
          mGIFStruct.state = gif_application_extension;
          break;

        case GIF_PLAIN_TEXT_LABEL:
          mGIFStruct.state = gif_skip_block;
          break;

        case GIF_COMMENT_LABEL:
          mGIFStruct.state = gif_consume_comment;
          break;

        default:
          mGIFStruct.state = gif_skip_block;
        }
      } else {
        GETN(1, gif_image_start);
      }
      break;

    case gif_consume_block:
      if (!*q)
        GETN(1, gif_image_start);
      else
        GETN(*q, gif_skip_block);
      break;

    case gif_skip_block:
      GETN(1, gif_consume_block);
      break;

    case gif_control_extension:
      mGIFStruct.is_transparent = *q & 0x1;
      mGIFStruct.tpixel = q[3];
      mGIFStruct.disposal_method = ((*q) >> 2) & 0x7;
      // Some specs say 3rd bit (value 4), other specs say value 3
      // Let's choose 3 (the more popular)
      if (mGIFStruct.disposal_method == 4)
        mGIFStruct.disposal_method = 3;
      mGIFStruct.delay_time = GETINT16(q + 1) * 10;
      GETN(1, gif_consume_block);
      break;

    case gif_comment_extension:
      if (*q)
        GETN(*q, gif_consume_comment);
      else
        GETN(1, gif_image_start);
      break;

    case gif_consume_comment:
      GETN(1, gif_comment_extension);
      break;

    case gif_application_extension:
      /* Check for netscape application extension */
      if (mGIFStruct.bytes_to_consume == 11 &&
          (!strncmp((char*)q, "NETSCAPE2.0", 11) ||
           !strncmp((char*)q, "ANIMEXTS1.0", 11)))
        GETN(1, gif_netscape_extension_block);
      else
        GETN(1, gif_consume_block);
      break;

    /* Netscape-specific GIF extension: animation looping */
    case gif_netscape_extension_block:
      if (*q)
        // We might need to consume 3 bytes in
        // gif_consume_netscape_extension, so make sure we have at least that.
        GETN(std::max(3, static_cast<int>(*q)), gif_consume_netscape_extension);
      else
        GETN(1, gif_image_start);
      break;

    /* Parse netscape-specific application extensions */
    case gif_consume_netscape_extension:
      switch (q[0] & 7) {
        case 1:
          /* Loop entire animation specified # of times.  Only read the
             loop count during the first iteration. */
          mGIFStruct.loop_count = GETINT16(q + 1);
          GETN(1, gif_netscape_extension_block);
          break;

        case 2:
          /* Wait for specified # of bytes to enter buffer */
          // Don't do this, this extension doesn't exist (isn't used at all)
          // and doesn't do anything, as our streaming/buffering takes care of it all...
          // See: http://semmix.pl/color/exgraf/eeg24.htm
          GETN(1, gif_netscape_extension_block);
          break;

        default:
          // 0,3-7 are yet to be defined netscape extension codes
          mGIFStruct.state = gif_error;
      }
      break;

    case gif_image_header:
    {
      /* Get image offsets, with respect to the screen origin */
      mGIFStruct.x_offset = GETINT16(q);
      mGIFStruct.y_offset = GETINT16(q + 2);

      /* Get image width and height. */
      mGIFStruct.width  = GETINT16(q + 4);
      mGIFStruct.height = GETINT16(q + 6);

      if (!mGIFStruct.images_decoded) {
        /* Work around broken GIF files where the logical screen
         * size has weird width or height.  We assume that GIF87a
         * files don't contain animations.
         */
        if ((mGIFStruct.screen_height < mGIFStruct.height) ||
            (mGIFStruct.screen_width < mGIFStruct.width) ||
            (mGIFStruct.version == 87)) {
          mGIFStruct.screen_height = mGIFStruct.height;
          mGIFStruct.screen_width = mGIFStruct.width;
          mGIFStruct.x_offset = 0;
          mGIFStruct.y_offset = 0;
        }    
        // Create the image container with the right size.
        BeginGIF();
        if (HasError()) {
          // Setting the size led to an error.
          mGIFStruct.state = gif_error;
          return;
        }

        // If we were doing a size decode, we're done
        if (IsSizeDecode())
          return;
      }

      /* Work around more broken GIF files that have zero image
         width or height */
      if (!mGIFStruct.height || !mGIFStruct.width) {
        mGIFStruct.height = mGIFStruct.screen_height;
        mGIFStruct.width = mGIFStruct.screen_width;
        if (!mGIFStruct.height || !mGIFStruct.width) {
          mGIFStruct.state = gif_error;
          break;
        }
      }

      /* Depth of colors is determined by colormap */
      /* (q[8] & 0x80) indicates local colormap */
      /* bits per pixel is (q[8]&0x07 + 1) when local colormap is set */
      uint32_t depth = mGIFStruct.global_colormap_depth;
      if (q[8] & 0x80)
        depth = (q[8]&0x07) + 1;
      uint32_t realDepth = depth;
      while (mGIFStruct.tpixel >= (1 << realDepth) && (realDepth < 8)) {
        realDepth++;
      } 
      // Mask to limit the color values within the colormap
      mColorMask = 0xFF >> (8 - realDepth);
      BeginImageFrame(realDepth);

      if (NeedsNewFrame()) {
        // We now need a new frame from the decoder framework. We leave all our
        // data in the buffer as if it wasn't consumed, copy to our hold and return
        // to the decoder framework.
        uint32_t size = len + mGIFStruct.bytes_to_consume + mGIFStruct.bytes_in_hold;
        if (size) {
          if (SetHold(q, mGIFStruct.bytes_to_consume + mGIFStruct.bytes_in_hold, buf, len)) {
            // Back into the decoder infrastructure so we can get called again.
            GETN(9, gif_image_header_continue);
            return;
          }
        }
        break;
      } else {
        // FALL THROUGH
      }
    }

    case gif_image_header_continue:
    {
      // While decoders can reuse frames, we unconditionally increment
      // mGIFStruct.images_decoded when we're done with a frame, so we both can
      // and need to zero out the colormap and image data after every new frame.
      memset(mImageData, 0, mImageDataLength);
      if (mColormap) {
        memset(mColormap, 0, mColormapSize);
      }

      if (!mGIFStruct.images_decoded) {
        // Send a onetime invalidation for the first frame if it has a y-axis offset. 
        // Otherwise, the area may never be refreshed and the placeholder will remain
        // on the screen. (Bug 37589)
        if (mGIFStruct.y_offset > 0) {
          nsIntRect r(0, 0, mGIFStruct.screen_width, mGIFStruct.y_offset);
          PostInvalidation(r);
        }
      }

      if (q[8] & 0x40) {
        mGIFStruct.interlaced = true;
        mGIFStruct.ipass = 1;
      } else {
        mGIFStruct.interlaced = false;
        mGIFStruct.ipass = 0;
      }

      /* Only apply the Haeberli display hack on the first frame */
      mGIFStruct.progressive_display = (mGIFStruct.images_decoded == 0);

      /* Clear state from last image */
      mGIFStruct.irow = 0;
      mGIFStruct.rows_remaining = mGIFStruct.height;
      mGIFStruct.rowp = mImageData;

      /* Depth of colors is determined by colormap */
      /* (q[8] & 0x80) indicates local colormap */
      /* bits per pixel is (q[8]&0x07 + 1) when local colormap is set */
      uint32_t depth = mGIFStruct.global_colormap_depth;
      if (q[8] & 0x80)
        depth = (q[8]&0x07) + 1;
      uint32_t realDepth = depth;
      while (mGIFStruct.tpixel >= (1 << realDepth) && (realDepth < 8)) {
        realDepth++;
      }

      if (q[8] & 0x80) /* has a local colormap? */
      {
        mGIFStruct.local_colormap_size = 1 << depth;
        if (!mGIFStruct.images_decoded) {
          // First frame has local colormap, allocate space for it
          // as the image frame doesn't have its own palette
          mColormapSize = sizeof(uint32_t) << realDepth;
          if (!mGIFStruct.local_colormap) {
            mGIFStruct.local_colormap = (uint32_t*)moz_xmalloc(mColormapSize);
          }
          mColormap = mGIFStruct.local_colormap;
        }
        const uint32_t size = 3 << depth;
        if (mColormapSize > size) {
          // Clear the notfilled part of the colormap
          memset(((uint8_t*)mColormap) + size, 0, mColormapSize - size);
        }
        if (len < size) {
          // Use 'hold' pattern to get the image colormap
          GETN(size, gif_image_colormap);
          break;
        }
        // Copy everything, go to colormap state to do CMS correction
        memcpy(mColormap, buf, size);
        buf += size;
        len -= size;
        GETN(0, gif_image_colormap);
        break;
      } else {
        /* Switch back to the global palette */
        if (mGIFStruct.images_decoded) {
          // Copy global colormap into the palette of current frame
          memcpy(mColormap, mGIFStruct.global_colormap, mColormapSize);
        } else {
          mColormap = mGIFStruct.global_colormap;
        }
      }
      GETN(1, gif_lzw_start);
    }
    break;

    case gif_image_colormap:
      // Everything is already copied into local_colormap
      // Convert into Cairo colors including CMS transformation
      ConvertColormap(mColormap, mGIFStruct.local_colormap_size);
      GETN(1, gif_lzw_start);
      break;

    case gif_sub_block:
      mGIFStruct.count = *q;
      if (mGIFStruct.count) {
        /* Still working on the same image: Process next LZW data block */
        /* Make sure there are still rows left. If the GIF data */
        /* is corrupt, we may not get an explicit terminator.   */
        if (!mGIFStruct.rows_remaining) {
#ifdef DONT_TOLERATE_BROKEN_GIFS
          mGIFStruct.state = gif_error;
          break;
#else
          /* This is an illegal GIF, but we remain tolerant. */
          GETN(1, gif_sub_block);
#endif
          if (mGIFStruct.count == GIF_TRAILER) {
            /* Found a terminator anyway, so consider the image done */
            GETN(1, gif_done);
            break;
          }
        }
        GETN(mGIFStruct.count, gif_lzw);
      } else {
        /* See if there are any more images in this sequence. */
        EndImageFrame();
        GETN(1, gif_image_start);
      }
      break;

    case gif_done:
      MOZ_ASSERT(!IsSizeDecode(), "Size decodes shouldn't reach gif_done");
      FinishInternal();
      goto done;

    case gif_error:
      PostDataError();
      return;

    // We shouldn't ever get here.
    default:
      break;
    }
  }

  // if an error state is set but no data remains, code flow reaches here
  if (mGIFStruct.state == gif_error) {
      PostDataError();
      return;
  }

  // Copy the leftover into mGIFStruct.hold
  if (len) {
    // Add what we have sofar to the block
    if (mGIFStruct.state != gif_global_colormap && mGIFStruct.state != gif_image_colormap) {
      if (!SetHold(buf, len)) {
        PostDataError();
        return;
      }
    } else {
      uint8_t* p = (mGIFStruct.state == gif_global_colormap) ? (uint8_t*)mGIFStruct.global_colormap :
                                                               (uint8_t*)mColormap;
      memcpy(p, buf, len);
      mGIFStruct.bytes_in_hold = len;
    }

    mGIFStruct.bytes_to_consume -= len;
  }

// We want to flush before returning if we're on the first frame
done:
  if (!mGIFStruct.images_decoded) {
    FlushImageData();
    mLastFlushedRow = mCurrentRow;
    mLastFlushedPass = mCurrentPass;
  }

  return;
}
void
nsIconDecoder::WriteInternal(const char* aBuffer, uint32_t aCount,
                             DecodeStrategy)
{
  NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!");

  // We put this here to avoid errors about crossing initialization with case
  // jumps on linux.
  uint32_t bytesToRead = 0;

  // Loop until the input data is gone
  while (aCount > 0) {
    switch (mState) {
      case iconStateStart:

        // Grab the width
        mWidth = (uint8_t)*aBuffer;

        // Book Keeping
        aBuffer++;
        aCount--;
        mState = iconStateHaveHeight;
        break;

      case iconStateHaveHeight:

        // Grab the Height
        mHeight = (uint8_t)*aBuffer;

        // Post our size to the superclass
        PostSize(mWidth, mHeight);

        PostHasTransparency();

        if (HasError()) {
          // Setting the size led to an error.
          mState = iconStateFinished;
          return;
        }

        // If We're doing a size decode, we're done
        if (IsSizeDecode()) {
          mState = iconStateFinished;
          break;
        }

        if (!mImageData) {
          PostDecoderError(NS_ERROR_OUT_OF_MEMORY);
          return;
        }

        // Book Keeping
        aBuffer++;
        aCount--;
        mState = iconStateReadPixels;
        break;

      case iconStateReadPixels: {

        // How many bytes are we reading?
        bytesToRead = std::min(aCount, mImageDataLength - mPixBytesRead);

        // Copy the bytes
        memcpy(mImageData + mPixBytesRead, aBuffer, bytesToRead);

        // Performance isn't critical here, so our update rectangle is
        // always the full icon
        nsIntRect r(0, 0, mWidth, mHeight);

        // Invalidate
        PostInvalidation(r);

        // Book Keeping
        aBuffer += bytesToRead;
        aCount -= bytesToRead;
        mPixBytesRead += bytesToRead;

        // If we've got all the pixel bytes, we're finished
        if (mPixBytesRead == mImageDataLength) {
          PostFrameStop();
          PostDecodeDone();
          mState = iconStateFinished;
        }
        break;
      }

      case iconStateFinished:

        // Consume all excess data silently
        aCount = 0;

        break;
    }
  }
}
Exemple #12
0
void
nsIconDecoder::WriteInternal(const char *aBuffer, PRUint32 aCount)
{
  NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!");

  // We put this here to avoid errors about crossing initialization with case
  // jumps on linux.
  PRUint32 bytesToRead = 0;
  nsresult rv;

  // Performance isn't critical here, so our update rectangle is 
  // always the full icon
  nsIntRect r(0, 0, mWidth, mHeight);

  // Loop until the input data is gone
  while (aCount > 0) {
    switch (mState) {
      case iconStateStart:

        // Grab the width
        mWidth = (PRUint8)*aBuffer;

        // Book Keeping
        aBuffer++;
        aCount--;
        mState = iconStateHaveHeight;
        break;

      case iconStateHaveHeight:

        // Grab the Height
        mHeight = (PRUint8)*aBuffer;

        // Post our size to the superclass
        PostSize(mWidth, mHeight);
        if (HasError()) {
          // Setting the size led to an error.
          mState = iconStateFinished;
          return;
        }

        // If We're doing a size decode, we're done
        if (IsSizeDecode()) {
          mState = iconStateFinished;
          break;
        }

        // Add the frame and signal
        rv = mImage.EnsureFrame(0, 0, 0, mWidth, mHeight,
                                gfxASurface::ImageFormatARGB32,
                                &mImageData, &mPixBytesTotal);
        if (NS_FAILED(rv)) {
          PostDecoderError(rv);
          return;
        }

        // Tell the superclass we're starting a frame
        PostFrameStart();

        // Book Keeping
        aBuffer++;
        aCount--;
        mState = iconStateReadPixels;
        break;

      case iconStateReadPixels:

        // How many bytes are we reading?
        bytesToRead = NS_MIN(aCount, mPixBytesTotal - mPixBytesRead);

        // Copy the bytes
        memcpy(mImageData + mPixBytesRead, aBuffer, bytesToRead);

        // Invalidate
        PostInvalidation(r);

        // Book Keeping
        aBuffer += bytesToRead;
        aCount -= bytesToRead;
        mPixBytesRead += bytesToRead;

        // If we've got all the pixel bytes, we're finished
        if (mPixBytesRead == mPixBytesTotal) {
          PostFrameStop();
          PostDecodeDone();
          mState = iconStateFinished;
        }
        break;

      case iconStateFinished:

        // Consume all excess data silently
        aCount = 0;

        break;
    }
  }
}
Exemple #13
0
//******************************************************************************
void
nsGIFDecoder2::EndImageFrame()
{
  Opacity opacity = Opacity::SOME_TRANSPARENCY;

  // First flush all pending image data
  if (!mGIFStruct.images_decoded) {
    // Only need to flush first frame
    FlushImageData();

    // If the first frame is smaller in height than the entire image, send an
    // invalidation for the area it does not have data for.
    // This will clear the remaining bits of the placeholder. (Bug 37589)
    const uint32_t realFrameHeight = mGIFStruct.height + mGIFStruct.y_offset;
    if (realFrameHeight < mGIFStruct.screen_height) {
      nsIntRect r(0, realFrameHeight,
                  mGIFStruct.screen_width,
                  mGIFStruct.screen_height - realFrameHeight);
      PostInvalidation(r);
    }

    // The first frame was preallocated with alpha; if it wasn't transparent, we
    // should fix that. We can also mark it opaque unconditionally if we didn't
    // actually see any transparent pixels - this test is only valid for the
    // first frame.
    if (!mGIFStruct.is_transparent || !mSawTransparency) {
      opacity = Opacity::OPAQUE;
    }
  }
  mCurrentRow = mLastFlushedRow = -1;
  mCurrentPass = mLastFlushedPass = 0;

  // Only add frame if we have any rows at all
  if (mGIFStruct.rows_remaining != mGIFStruct.height) {
    if (mGIFStruct.rows_remaining && mGIFStruct.images_decoded) {
      // Clear the remaining rows (only needed for the animation frames)
      uint8_t* rowp =
        mImageData + ((mGIFStruct.height - mGIFStruct.rows_remaining) *
                      mGIFStruct.width);
      memset(rowp, 0, mGIFStruct.rows_remaining * mGIFStruct.width);
    }
  }

  // Unconditionally increment images_decoded, because we unconditionally
  // append frames in BeginImageFrame(). This ensures that images_decoded
  // always refers to the frame in mImage we're currently decoding,
  // even if some of them weren't decoded properly and thus are blank.
  mGIFStruct.images_decoded++;

  // Tell the superclass we finished a frame
  PostFrameStop(opacity,
                DisposalMethod(mGIFStruct.disposal_method),
                mGIFStruct.delay_time);

  // Reset the transparent pixel
  if (mOldColor) {
    mColormap[mGIFStruct.tpixel] = mOldColor;
    mOldColor = 0;
  }

  mCurrentFrameIndex = -1;
}
Exemple #14
0
nsresult
nsIconDecoder::WriteInternal(const char *aBuffer, PRUint32 aCount)
{
  nsresult rv;

  // We put this here to avoid errors about crossing initialization with case
  // jumps on linux.
  PRUint32 bytesToRead = 0;

  // Performance isn't critical here, so our update rectangle is 
  // always the full icon
  nsIntRect r(0, 0, mWidth, mHeight);

  // Loop until the input data is gone
  while (aCount > 0) {
    switch (mState) {
      case iconStateStart:

        // Grab the width
        mWidth = (PRUint8)*aBuffer;

        // Book Keeping
        aBuffer++;
        aCount--;
        mState = iconStateHaveHeight;
        break;

      case iconStateHaveHeight:

        // Grab the Height
        mHeight = (PRUint8)*aBuffer;

        // Post our size to the superclass
        PostSize(mWidth, mHeight);

        // If We're doing a size decode, we're done
        if (IsSizeDecode()) {
          mState = iconStateFinished;
          break;
        }

        // Add the frame and signal
        rv = mImage->AppendFrame(0, 0, mWidth, mHeight,
                                 gfxASurface::ImageFormatARGB32,
                                 &mImageData, &mPixBytesTotal);
        if (NS_FAILED(rv)) {
          mState = iconStateError;
          return rv;
        }

        // Tell the superclass we're starting a frame
        PostFrameStart();

        // Book Keeping
        aBuffer++;
        aCount--;
        mState = iconStateReadPixels;
        break;

      case iconStateReadPixels:

        // How many bytes are we reading?
        bytesToRead = PR_MIN(aCount, mPixBytesTotal - mPixBytesRead);

        // Copy the bytes
        memcpy(mImageData + mPixBytesRead, aBuffer, bytesToRead);

        // Invalidate
        PostInvalidation(r);

        // Book Keeping
        aBuffer += bytesToRead;
        aCount -= bytesToRead;
        mPixBytesRead += bytesToRead;

        // If we've got all the pixel bytes, we're finished
        if (mPixBytesRead == mPixBytesTotal) {
          NotifyDone(/* aSuccess = */ PR_TRUE);
          mState = iconStateFinished;
        }
        break;

      case iconStateFinished:

        // Consume all excess data silently
        aCount = 0;

        break;

      case iconStateError:
        return NS_IMAGELIB_ERROR_FAILURE;
        break;
    }
  }

  return NS_OK;
}
Exemple #15
0
void
nsWEBPDecoder::WriteInternal(const char *aBuffer, uint32_t aCount)
{
  MOZ_ASSERT(!HasError(), "Shouldn't call WriteInternal after error!");

  const uint8_t* buf = (const uint8_t*)aBuffer;
  VP8StatusCode rv = WebPIAppend(mDecoder, buf, aCount);
  if (rv == VP8_STATUS_OUT_OF_MEMORY) {
    PostDecoderError(NS_ERROR_OUT_OF_MEMORY);
    return;
  } else if (rv == VP8_STATUS_INVALID_PARAM ||
             rv == VP8_STATUS_BITSTREAM_ERROR) {
    PostDataError();
    return;
  } else if (rv == VP8_STATUS_UNSUPPORTED_FEATURE ||
             rv == VP8_STATUS_USER_ABORT) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  // Catch any remaining erroneous return value.
  if (rv != VP8_STATUS_OK && rv != VP8_STATUS_SUSPENDED) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  int lastLineRead = -1;
  int height = 0;
  int width = 0;
  int stride = 0;

  mData = WebPIDecGetRGB(mDecoder, &lastLineRead, &width, &height, &stride);

  // The only valid format for WebP decoding for both alpha and non-alpha
  // images is BGRA, where Opaque images have an A of 255.
  // Assume transparency for all images.
  // XXX: This could be compositor-optimized by doing a one-time check for
  // all-255 alpha pixels, but that might interfere with progressive
  // decoding. Probably not worth it?
  PostHasTransparency();
  
  if (lastLineRead == -1 || !mData)
    return;

  if (width <= 0 || height <= 0) {
    PostDataError();
    return;
  }

  if (!HasSize())
    PostSize(width, height);

  if (IsSizeDecode())
    return;

  if (!mImageData) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  // Transfer from mData to mImageData
  if (lastLineRead > mLastLine) {
    for (int line = mLastLine; line < lastLineRead; line++) {
      for (int pix = 0; pix < width; pix++) {
        // RGBA -> BGRA
        uint32_t DataOffset = 4 * (line * width + pix);
        mImageData[DataOffset+0] = mData[DataOffset+2];
        mImageData[DataOffset+1] = mData[DataOffset+1];
        mImageData[DataOffset+2] = mData[DataOffset+0];
        mImageData[DataOffset+3] = mData[DataOffset+3];
      }
    } 

    // Invalidate
    nsIntRect r(0, mLastLine, width, lastLineRead);
    PostInvalidation(r);
  }

  mLastLine = lastLineRead;
  return;
}
void
nsJPEGDecoder::OutputScanlines(bool* suspend)
{
  *suspend = false;

  const uint32_t top = mInfo.output_scanline;

  while ((mInfo.output_scanline < mInfo.output_height)) {
      // Use the Cairo image buffer as scanline buffer
      uint32_t* imageRow = ((uint32_t*)mImageData) +
                           (mInfo.output_scanline * mInfo.output_width);

      if (mInfo.out_color_space == MOZ_JCS_EXT_NATIVE_ENDIAN_XRGB) {
        // Special case: scanline will be directly converted into packed ARGB
        if (jpeg_read_scanlines(&mInfo, (JSAMPARRAY)&imageRow, 1) != 1) {
          *suspend = true; // suspend
          break;
        }
        continue; // all done for this row!
      }

      JSAMPROW sampleRow = (JSAMPROW)imageRow;
      if (mInfo.output_components == 3) {
        // Put the pixels at end of row to enable in-place expansion
        sampleRow += mInfo.output_width;
      }

      // Request one scanline.  Returns 0 or 1 scanlines.
      if (jpeg_read_scanlines(&mInfo, &sampleRow, 1) != 1) {
        *suspend = true; // suspend
        break;
      }

      if (mTransform) {
        JSAMPROW source = sampleRow;
        if (mInfo.out_color_space == JCS_GRAYSCALE) {
          // Convert from the 1byte grey pixels at begin of row
          // to the 3byte RGB byte pixels at 'end' of row
          sampleRow += mInfo.output_width;
        }
        qcms_transform_data(mTransform, source, sampleRow, mInfo.output_width);
        // Move 3byte RGB data to end of row
        if (mInfo.out_color_space == JCS_CMYK) {
          memmove(sampleRow + mInfo.output_width,
                  sampleRow,
                  3 * mInfo.output_width);
          sampleRow += mInfo.output_width;
        }
      } else {
        if (mInfo.out_color_space == JCS_CMYK) {
          // Convert from CMYK to RGB
          // We cannot convert directly to Cairo, as the CMSRGBTransform
          // may wants to do a RGB transform...
          // Would be better to have platform CMSenabled transformation
          // from CMYK to (A)RGB...
          cmyk_convert_rgb((JSAMPROW)imageRow, mInfo.output_width);
          sampleRow += mInfo.output_width;
        }
        if (mCMSMode == eCMSMode_All) {
          // No embedded ICC profile - treat as sRGB
          qcms_transform* transform = gfxPlatform::GetCMSRGBTransform();
          if (transform) {
            qcms_transform_data(transform, sampleRow, sampleRow,
                                mInfo.output_width);
          }
        }
      }

      // counter for while() loops below
      uint32_t idx = mInfo.output_width;

      // copy as bytes until source pointer is 32-bit-aligned
      for (; (NS_PTR_TO_UINT32(sampleRow) & 0x3) && idx; --idx) {
        *imageRow++ = gfxPackedPixel(0xFF, sampleRow[0], sampleRow[1],
                                     sampleRow[2]);
        sampleRow += 3;
      }

      // copy pixels in blocks of 4
      while (idx >= 4) {
        GFX_BLOCK_RGB_TO_FRGB(sampleRow, imageRow);
        idx       -=  4;
        sampleRow += 12;
        imageRow  +=  4;
      }

      // copy remaining pixel(s)
      while (idx--) {
        // 32-bit read of final pixel will exceed buffer, so read bytes
        *imageRow++ = gfxPackedPixel(0xFF, sampleRow[0], sampleRow[1],
                                     sampleRow[2]);
        sampleRow += 3;
      }
  }

  if (top != mInfo.output_scanline) {
      nsIntRect r(0, top, mInfo.output_width, mInfo.output_scanline-top);
      PostInvalidation(r);
  }

}
Exemple #17
0
void
nsWEBPDecoder::WriteInternal(const char *aBuffer, uint32_t aCount, DecodeStrategy)
{
  NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!");

  const uint8_t* buf = (const uint8_t*)aBuffer;
  VP8StatusCode rv = WebPIAppend(mDecoder, buf, aCount);
  if (rv == VP8_STATUS_OUT_OF_MEMORY) {
    PostDecoderError(NS_ERROR_OUT_OF_MEMORY);
    return;
  } else if (rv == VP8_STATUS_INVALID_PARAM ||
             rv == VP8_STATUS_BITSTREAM_ERROR) {
    PostDataError();
    return;
  } else if (rv == VP8_STATUS_UNSUPPORTED_FEATURE ||
             rv == VP8_STATUS_USER_ABORT) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  // Catch any remaining erroneous return value.
  if (rv != VP8_STATUS_OK && rv != VP8_STATUS_SUSPENDED) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  int lastLineRead = -1;
  int height = 0;
  int width = 0;
  int stride = 0;

  mData = WebPIDecGetRGB(mDecoder, &lastLineRead, &width, &height, &stride);

  if (lastLineRead == -1 || !mData)
    return;

  if (width <= 0 || height <= 0) {
    PostDataError();
    return;
  }

  if (!HasSize())
    PostSize(width, height);

  if (IsSizeDecode())
    return;

  uint32_t imagelength;
  // First incremental Image data chunk. Special handling required.
  if (mLastLine == 0 && lastLineRead > 0) {
    imgFrame* aFrame;
    nsresult res = mImage.EnsureFrame(0, 0, 0, width, height,
                                       gfxASurface::ImageFormatARGB32,
                                       (uint8_t**)&mImageData, &imagelength, &aFrame);
    if (NS_FAILED(res) || !mImageData) {
      PostDecoderError(NS_ERROR_FAILURE);
      return;
    }
  }

  if (!mImageData) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  if (lastLineRead > mLastLine) {
    for (int line = mLastLine; line < lastLineRead; line++) {
      uint32_t *cptr32 = (uint32_t*)(mImageData + (line * width));
      uint8_t *cptr8 = mData + (line * stride);
      for (int pix = 0; pix < width; pix++, cptr8 += 4) {
	// if((cptr8[3] != 0) && (cptr8[0] != 0) && (cptr8[1] != 0) && (cptr8[2] != 0))
	   *cptr32++ = gfxPackedPixel(cptr8[3], cptr8[0], cptr8[1], cptr8[2]);
      }
    }

    // Invalidate
    nsIntRect r(0, mLastLine, width, lastLineRead);
    PostInvalidation(r);
  }

  mLastLine = lastLineRead;
  return;
}