Ejemplo n.º 1
0
// CreateFrame() is used for both simple and animated images
void nsPNGDecoder::CreateFrame(png_uint_32 x_offset, png_uint_32 y_offset,
                               PRInt32 width, PRInt32 height,
                               gfxASurface::gfxImageFormat format)
{
  PRUint32 imageDataLength;
  nsresult rv = mImage->EnsureFrame(GetFrameCount(), x_offset, y_offset,
                                    width, height, format,
                                    &mImageData, &imageDataLength);
  if (NS_FAILED(rv))
    longjmp(png_jmpbuf(mPNG), 5); // NS_ERROR_OUT_OF_MEMORY

  mFrameRect.x = x_offset;
  mFrameRect.y = y_offset;
  mFrameRect.width = width;
  mFrameRect.height = height;

#ifdef PNG_APNG_SUPPORTED
  if (png_get_valid(mPNG, mInfo, PNG_INFO_acTL))
    SetAnimFrameInfo();
#endif

  // Tell the superclass we're starting a frame
  PostFrameStart();

  PR_LOG(gPNGDecoderAccountingLog, PR_LOG_DEBUG,
         ("PNGDecoderAccounting: nsPNGDecoder::CreateFrame -- created "
          "image frame with %dx%d pixels in container %p",
          width, height,
          mImage.get ()));

  mFrameHasNoAlpha = PR_TRUE;
}
Ejemplo n.º 2
0
nsresult
Decoder::AllocateFrame()
{
  MOZ_ASSERT(mNeedsNewFrame);
  MOZ_ASSERT(NS_IsMainThread());

  MarkFrameDirty();

  nsresult rv;
  if (mNewFrameData.mPaletteDepth) {
    rv = mImage.EnsureFrame(mNewFrameData.mFrameNum, mNewFrameData.mOffsetX,
                            mNewFrameData.mOffsetY, mNewFrameData.mWidth,
                            mNewFrameData.mHeight, mNewFrameData.mFormat,
                            mNewFrameData.mPaletteDepth,
                            &mImageData, &mImageDataLength,
                            &mColormap, &mColormapSize, &mCurrentFrame);
  } else {
    rv = mImage.EnsureFrame(mNewFrameData.mFrameNum, mNewFrameData.mOffsetX,
                            mNewFrameData.mOffsetY, mNewFrameData.mWidth,
                            mNewFrameData.mHeight, mNewFrameData.mFormat,
                            &mImageData, &mImageDataLength, &mCurrentFrame);
  }

  if (NS_SUCCEEDED(rv) && mNewFrameData.mFrameNum == mFrameCount) {
    PostFrameStart();
  } else if (NS_FAILED(rv)) {
    PostDataError();
  }

  // Mark ourselves as not needing another frame before talking to anyone else
  // so they can tell us if they need yet another.
  mNeedsNewFrame = false;

  return rv;
}
Ejemplo n.º 3
0
//******************************************************************************
nsresult nsGIFDecoder2::BeginImageFrame(uint16_t aDepth)
{
  uint32_t imageDataLength;
  nsresult rv;
  gfxASurface::gfxImageFormat format;
  if (mGIFStruct.is_transparent)
    format = gfxASurface::ImageFormatARGB32;
  else
    format = gfxASurface::ImageFormatRGB24;

  // Use correct format, RGB for first frame, PAL for following frames
  // and include transparency to allow for optimization of opaque images
  if (mGIFStruct.images_decoded) {
    // Image data is stored with original depth and palette
    rv = mImage.EnsureFrame(mGIFStruct.images_decoded,
                            mGIFStruct.x_offset, mGIFStruct.y_offset,
                            mGIFStruct.width, mGIFStruct.height,
                            format, aDepth, &mImageData, &imageDataLength,
                            &mColormap, &mColormapSize);

    // While EnsureFrame can reuse frames, we unconditionally increment
    // mGIFStruct.images_decoded when we're done with a frame, so we both can
    // and need to zero out the colormap and image data after every call to
    // EnsureFrame.
    if (NS_SUCCEEDED(rv) && mColormap) {
      memset(mColormap, 0, mColormapSize);
    }
  } else {
    // Regardless of depth of input, image is decoded into 24bit RGB
    rv = mImage.EnsureFrame(mGIFStruct.images_decoded,
                            mGIFStruct.x_offset, mGIFStruct.y_offset,
                            mGIFStruct.width, mGIFStruct.height,
                            format, &mImageData, &imageDataLength);
  }

  if (NS_FAILED(rv))
    return rv;

  memset(mImageData, 0, imageDataLength);

  mImage.SetFrameDisposalMethod(mGIFStruct.images_decoded,
                                mGIFStruct.disposal_method);

  // Tell the superclass we're starting a frame
  PostFrameStart();

  if (!mGIFStruct.images_decoded) {
    // Send a onetime invalidation for the first frame if it has a y-axis offset. 
    // Otherwise, the area may never be refreshed and the placeholder will remain
    // on the screen. (Bug 37589)
    if (mGIFStruct.y_offset > 0) {
      nsIntRect r(0, 0, mGIFStruct.screen_width, mGIFStruct.y_offset);
      PostInvalidation(r);
    }
  }

  mCurrentFrame = mGIFStruct.images_decoded;
  return NS_OK;
}
Ejemplo n.º 4
0
//******************************************************************************
nsresult nsGIFDecoder2::BeginImageFrame(gfx_depth aDepth)
{
    PRUint32 imageDataLength;
    nsresult rv;
    gfxASurface::gfxImageFormat format;
    if (mGIFStruct.is_transparent)
        format = gfxASurface::ImageFormatARGB32;
    else
        format = gfxASurface::ImageFormatRGB24;

    // Use correct format, RGB for first frame, PAL for following frames
    // and include transparency to allow for optimization of opaque images
    if (mGIFStruct.images_decoded) {
        // Image data is stored with original depth and palette
        rv = mImage->AppendPalettedFrame(mGIFStruct.x_offset, mGIFStruct.y_offset,
                                         mGIFStruct.width, mGIFStruct.height,
                                         format, aDepth, &mImageData, &imageDataLength,
                                         &mColormap, &mColormapSize);
    } else {
        // Regardless of depth of input, image is decoded into 24bit RGB
        rv = mImage->AppendFrame(mGIFStruct.x_offset, mGIFStruct.y_offset,
                                 mGIFStruct.width, mGIFStruct.height,
                                 format, &mImageData, &imageDataLength);
    }

    if (NS_FAILED(rv))
        return rv;

    mImage->SetFrameDisposalMethod(mGIFStruct.images_decoded,
                                   mGIFStruct.disposal_method);

    // Tell the superclass we're starting a frame
    PostFrameStart();

    if (!mGIFStruct.images_decoded) {
        // Send a onetime invalidation for the first frame if it has a y-axis offset.
        // Otherwise, the area may never be refreshed and the placeholder will remain
        // on the screen. (Bug 37589)
        if (mGIFStruct.y_offset > 0) {
            PRInt32 imgWidth;
            mImage->GetWidth(&imgWidth);
            nsIntRect r(0, 0, imgWidth, mGIFStruct.y_offset);
            PostInvalidation(r);
        }
    }

    mCurrentFrame = mGIFStruct.images_decoded;
    return NS_OK;
}
Ejemplo n.º 5
0
// Initializes a decoder whose image and observer is already being used by a
// parent decoder
void
Decoder::InitSharedDecoder(uint8_t* imageData, uint32_t imageDataLength,
                           uint32_t* colormap, uint32_t colormapSize,
                           imgFrame* currentFrame)
{
  // No re-initializing
  NS_ABORT_IF_FALSE(!mInitialized, "Can't re-initialize a decoder!");
  NS_ABORT_IF_FALSE(mObserver, "Need an observer!");

  mImageData = imageData;
  mImageDataLength = imageDataLength;
  mColormap = colormap;
  mColormapSize = colormapSize;
  mCurrentFrame = currentFrame;
  // We have all the frame data, so we've started the frame.
  if (!IsSizeDecode()) {
    PostFrameStart();
  }

  // Implementation-specific initialization
  InitInternal();
  mInitialized = true;
}
Ejemplo n.º 6
0
// Initializes a decoder whose image and observer is already being used by a
// parent decoder
void
Decoder::InitSharedDecoder(uint8_t* aImageData, uint32_t aImageDataLength,
                           uint32_t* aColormap, uint32_t aColormapSize,
                           RawAccessFrameRef&& aFrameRef)
{
  // No re-initializing
  NS_ABORT_IF_FALSE(!mInitialized, "Can't re-initialize a decoder!");

  mImageData = aImageData;
  mImageDataLength = aImageDataLength;
  mColormap = aColormap;
  mColormapSize = aColormapSize;
  mCurrentFrame = Move(aFrameRef);

  // We have all the frame data, so we've started the frame.
  if (!IsSizeDecode()) {
    PostFrameStart();
  }

  // Implementation-specific initialization
  InitInternal();
  mInitialized = true;
}
Ejemplo n.º 7
0
nsresult
Decoder::AllocateFrame()
{
  MOZ_ASSERT(mNeedsNewFrame);
  MOZ_ASSERT(NS_IsMainThread());

  mCurrentFrame = mImage.EnsureFrame(mNewFrameData.mFrameNum,
                                     mNewFrameData.mFrameRect,
                                     mDecodeFlags,
                                     mNewFrameData.mFormat,
                                     mNewFrameData.mPaletteDepth,
                                     mCurrentFrame.get());

  if (mCurrentFrame) {
    // Gather the raw pointers the decoders will use.
    mCurrentFrame->GetImageData(&mImageData, &mImageDataLength);
    mCurrentFrame->GetPaletteData(&mColormap, &mColormapSize);

    if (mNewFrameData.mFrameNum == mFrameCount) {
      PostFrameStart();
    }
  } else {
    PostDataError();
  }

  // Mark ourselves as not needing another frame before talking to anyone else
  // so they can tell us if they need yet another.
  mNeedsNewFrame = false;

  // If we've received any data at all, we may have pending data that needs to
  // be flushed now that we have a frame to decode into.
  if (mBytesDecoded > 0) {
    mNeedsToFlushData = true;
  }

  return mCurrentFrame ? NS_OK : NS_ERROR_FAILURE;
}
Ejemplo n.º 8
0
void
nsIconDecoder::WriteInternal(const char *aBuffer, PRUint32 aCount)
{
  NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!");

  // We put this here to avoid errors about crossing initialization with case
  // jumps on linux.
  PRUint32 bytesToRead = 0;
  nsresult rv;

  // Performance isn't critical here, so our update rectangle is 
  // always the full icon
  nsIntRect r(0, 0, mWidth, mHeight);

  // Loop until the input data is gone
  while (aCount > 0) {
    switch (mState) {
      case iconStateStart:

        // Grab the width
        mWidth = (PRUint8)*aBuffer;

        // Book Keeping
        aBuffer++;
        aCount--;
        mState = iconStateHaveHeight;
        break;

      case iconStateHaveHeight:

        // Grab the Height
        mHeight = (PRUint8)*aBuffer;

        // Post our size to the superclass
        PostSize(mWidth, mHeight);
        if (HasError()) {
          // Setting the size led to an error.
          mState = iconStateFinished;
          return;
        }

        // If We're doing a size decode, we're done
        if (IsSizeDecode()) {
          mState = iconStateFinished;
          break;
        }

        // Add the frame and signal
        rv = mImage.EnsureFrame(0, 0, 0, mWidth, mHeight,
                                gfxASurface::ImageFormatARGB32,
                                &mImageData, &mPixBytesTotal);
        if (NS_FAILED(rv)) {
          PostDecoderError(rv);
          return;
        }

        // Tell the superclass we're starting a frame
        PostFrameStart();

        // Book Keeping
        aBuffer++;
        aCount--;
        mState = iconStateReadPixels;
        break;

      case iconStateReadPixels:

        // How many bytes are we reading?
        bytesToRead = NS_MIN(aCount, mPixBytesTotal - mPixBytesRead);

        // Copy the bytes
        memcpy(mImageData + mPixBytesRead, aBuffer, bytesToRead);

        // Invalidate
        PostInvalidation(r);

        // Book Keeping
        aBuffer += bytesToRead;
        aCount -= bytesToRead;
        mPixBytesRead += bytesToRead;

        // If we've got all the pixel bytes, we're finished
        if (mPixBytesRead == mPixBytesTotal) {
          PostFrameStop();
          PostDecodeDone();
          mState = iconStateFinished;
        }
        break;

      case iconStateFinished:

        // Consume all excess data silently
        aCount = 0;

        break;
    }
  }
}
Ejemplo n.º 9
0
void
nsJPEGDecoder::WriteInternal(const char *aBuffer, uint32_t aCount)
{
  mSegment = (const JOCTET *)aBuffer;
  mSegmentLen = aCount;

  NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!");

  /* Return here if there is a fatal error within libjpeg. */
  nsresult error_code;
  // This cast to nsresult makes sense because setjmp() returns whatever we
  // passed to longjmp(), which was actually an nsresult.
  if ((error_code = (nsresult)setjmp(mErr.setjmp_buffer)) != NS_OK) {
    if (error_code == NS_ERROR_FAILURE) {
      PostDataError();
      /* Error due to corrupt stream - return NS_OK and consume silently
         so that libpr0n doesn't throw away a partial image load */
      mState = JPEG_SINK_NON_JPEG_TRAILER;
      PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
             ("} (setjmp returned NS_ERROR_FAILURE)"));
      return;
    } else {
      /* Error due to reasons external to the stream (probably out of
         memory) - let libpr0n attempt to clean up, even though
         mozilla is seconds away from falling flat on its face. */
      PostDecoderError(error_code);
      mState = JPEG_ERROR;
      PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
             ("} (setjmp returned an error)"));
      return;
    }
  }

  PR_LOG(gJPEGlog, PR_LOG_DEBUG,
         ("[this=%p] nsJPEGDecoder::Write -- processing JPEG data\n", this));

  switch (mState) {
  case JPEG_HEADER:
  {
    LOG_SCOPE(gJPEGlog, "nsJPEGDecoder::Write -- entering JPEG_HEADER case");

    /* Step 3: read file parameters with jpeg_read_header() */
    if (jpeg_read_header(&mInfo, TRUE) == JPEG_SUSPENDED) {
      PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
             ("} (JPEG_SUSPENDED)"));
      return; /* I/O suspension */
    }

    // Post our size to the superclass
    PostSize(mInfo.image_width, mInfo.image_height);
    if (HasError()) {
      // Setting the size led to an error.
      mState = JPEG_ERROR;
      return;
    }

    /* If we're doing a size decode, we're done. */
    if (IsSizeDecode())
      return;

    /* We're doing a full decode. */
    if (mCMSMode != eCMSMode_Off &&
        (mInProfile = GetICCProfile(mInfo)) != nullptr) {
      uint32_t profileSpace = qcms_profile_get_color_space(mInProfile);
      bool mismatch = false;

#ifdef DEBUG_tor
      fprintf(stderr, "JPEG profileSpace: 0x%08X\n", profileSpace);
#endif
      switch (mInfo.jpeg_color_space) {
      case JCS_GRAYSCALE:
        if (profileSpace == icSigRgbData)
          mInfo.out_color_space = JCS_RGB;
        else if (profileSpace != icSigGrayData)
          mismatch = true;
        break;
      case JCS_RGB:
        if (profileSpace != icSigRgbData)
          mismatch =  true;
        break;
      case JCS_YCbCr:
        if (profileSpace == icSigRgbData)
          mInfo.out_color_space = JCS_RGB;
        else
	  // qcms doesn't support ycbcr
          mismatch = true;
        break;
      case JCS_CMYK:
      case JCS_YCCK:
	  // qcms doesn't support cmyk
          mismatch = true;
        break;
      default:
        mState = JPEG_ERROR;
        PostDataError();
        PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
               ("} (unknown colorpsace (1))"));
        return;
      }

      if (!mismatch) {
        qcms_data_type type;
        switch (mInfo.out_color_space) {
        case JCS_GRAYSCALE:
          type = QCMS_DATA_GRAY_8;
          break;
        case JCS_RGB:
          type = QCMS_DATA_RGB_8;
          break;
        default:
          mState = JPEG_ERROR;
          PostDataError();
          PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
                 ("} (unknown colorpsace (2))"));
          return;
        }
#if 0
        /* We don't currently support CMYK profiles. The following
         * code dealt with lcms types. Add something like this
         * back when we gain support for CMYK.
         */
        /* Adobe Photoshop writes YCCK/CMYK files with inverted data */
        if (mInfo.out_color_space == JCS_CMYK)
          type |= FLAVOR_SH(mInfo.saw_Adobe_marker ? 1 : 0);
#endif

        if (gfxPlatform::GetCMSOutputProfile()) {

          /* Calculate rendering intent. */
          int intent = gfxPlatform::GetRenderingIntent();
          if (intent == -1)
              intent = qcms_profile_get_rendering_intent(mInProfile);

          /* Create the color management transform. */
          mTransform = qcms_transform_create(mInProfile,
                                          type,
                                          gfxPlatform::GetCMSOutputProfile(),
                                          QCMS_DATA_RGB_8,
                                          (qcms_intent)intent);
        }
      } else {
#ifdef DEBUG_tor
        fprintf(stderr, "ICM profile colorspace mismatch\n");
#endif
      }
    }

    if (!mTransform) {
      switch (mInfo.jpeg_color_space) {
      case JCS_GRAYSCALE:
      case JCS_RGB:
      case JCS_YCbCr:
        mInfo.out_color_space = JCS_RGB;
        break;
      case JCS_CMYK:
      case JCS_YCCK:
        /* libjpeg can convert from YCCK to CMYK, but not to RGB */
        mInfo.out_color_space = JCS_CMYK;
        break;
      default:
        mState = JPEG_ERROR;
        PostDataError();
        PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
               ("} (unknown colorpsace (3))"));
        return;
        break;
      }
    }

    /*
     * Don't allocate a giant and superfluous memory buffer
     * when the image is a sequential JPEG.
     */
    mInfo.buffered_image = jpeg_has_multiple_scans(&mInfo);

    /* Used to set up image size so arrays can be allocated */
    jpeg_calc_output_dimensions(&mInfo);

    uint32_t imagelength;
    if (NS_FAILED(mImage.EnsureFrame(0, 0, 0, mInfo.image_width, mInfo.image_height,
                                     gfxASurface::ImageFormatRGB24,
                                     &mImageData, &imagelength))) {
      mState = JPEG_ERROR;
      PostDecoderError(NS_ERROR_OUT_OF_MEMORY);
      PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
             ("} (could not initialize image frame)"));
      return;
    }

    PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
           ("        JPEGDecoderAccounting: nsJPEGDecoder::Write -- created image frame with %ux%u pixels",
            mInfo.image_width, mInfo.image_height));

    // Tell the superclass we're starting a frame
    PostFrameStart();

    mState = JPEG_START_DECOMPRESS;
  }

  case JPEG_START_DECOMPRESS:
  {
    LOG_SCOPE(gJPEGlog, "nsJPEGDecoder::Write -- entering JPEG_START_DECOMPRESS case");
    /* Step 4: set parameters for decompression */

    /* FIXME -- Should reset dct_method and dither mode
     * for final pass of progressive JPEG
     */
    mInfo.dct_method =  JDCT_ISLOW;
    mInfo.dither_mode = JDITHER_FS;
    mInfo.do_fancy_upsampling = TRUE;
    mInfo.enable_2pass_quant = FALSE;
    mInfo.do_block_smoothing = TRUE;

    /* Step 5: Start decompressor */
    if (jpeg_start_decompress(&mInfo) == FALSE) {
      PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
             ("} (I/O suspension after jpeg_start_decompress())"));
      return; /* I/O suspension */
    }

    /* Force to use our YCbCr to Packed RGB converter when possible */
    if (!mTransform && (mCMSMode != eCMSMode_All) &&
        mInfo.jpeg_color_space == JCS_YCbCr && mInfo.out_color_space == JCS_RGB) {
      /* Special case for the most common case: transform from YCbCr direct into packed ARGB */
      mInfo.out_color_components = 4; /* Packed ARGB pixels are always 4 bytes...*/
      mInfo.cconvert->color_convert = ycc_rgb_convert_argb;
    }

    /* If this is a progressive JPEG ... */
    mState = mInfo.buffered_image ? JPEG_DECOMPRESS_PROGRESSIVE : JPEG_DECOMPRESS_SEQUENTIAL;
  }

  case JPEG_DECOMPRESS_SEQUENTIAL:
  {
    if (mState == JPEG_DECOMPRESS_SEQUENTIAL)
    {
      LOG_SCOPE(gJPEGlog, "nsJPEGDecoder::Write -- JPEG_DECOMPRESS_SEQUENTIAL case");
      
      bool suspend;
      OutputScanlines(&suspend);
      
      if (suspend) {
        PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
               ("} (I/O suspension after OutputScanlines() - SEQUENTIAL)"));
        return; /* I/O suspension */
      }
      
      /* If we've completed image output ... */
      NS_ASSERTION(mInfo.output_scanline == mInfo.output_height, "We didn't process all of the data!");
      mState = JPEG_DONE;
    }
  }

  case JPEG_DECOMPRESS_PROGRESSIVE:
  {
    if (mState == JPEG_DECOMPRESS_PROGRESSIVE)
    {
      LOG_SCOPE(gJPEGlog, "nsJPEGDecoder::Write -- JPEG_DECOMPRESS_PROGRESSIVE case");

      int status;
      do {
        status = jpeg_consume_input(&mInfo);
      } while ((status != JPEG_SUSPENDED) &&
               (status != JPEG_REACHED_EOI));

      for (;;) {
        if (mInfo.output_scanline == 0) {
          int scan = mInfo.input_scan_number;

          /* if we haven't displayed anything yet (output_scan_number==0)
             and we have enough data for a complete scan, force output
             of the last full scan */
          if ((mInfo.output_scan_number == 0) &&
              (scan > 1) &&
              (status != JPEG_REACHED_EOI))
            scan--;

          if (!jpeg_start_output(&mInfo, scan)) {
            PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
                   ("} (I/O suspension after jpeg_start_output() - PROGRESSIVE)"));
            return; /* I/O suspension */
          }
        }

        if (mInfo.output_scanline == 0xffffff)
          mInfo.output_scanline = 0;

        bool suspend;
        OutputScanlines(&suspend);

        if (suspend) {
          if (mInfo.output_scanline == 0) {
            /* didn't manage to read any lines - flag so we don't call
               jpeg_start_output() multiple times for the same scan */
            mInfo.output_scanline = 0xffffff;
          }
          PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
                 ("} (I/O suspension after OutputScanlines() - PROGRESSIVE)"));
          return; /* I/O suspension */
        }

        if (mInfo.output_scanline == mInfo.output_height)
        {
          if (!jpeg_finish_output(&mInfo)) {
            PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
                   ("} (I/O suspension after jpeg_finish_output() - PROGRESSIVE)"));
            return; /* I/O suspension */
          }

          if (jpeg_input_complete(&mInfo) &&
              (mInfo.input_scan_number == mInfo.output_scan_number))
            break;

          mInfo.output_scanline = 0;
        }
      }

      mState = JPEG_DONE;
    }
  }

  case JPEG_DONE:
  {
    LOG_SCOPE(gJPEGlog, "nsJPEGDecoder::ProcessData -- entering JPEG_DONE case");

    /* Step 7: Finish decompression */

    if (jpeg_finish_decompress(&mInfo) == FALSE) {
      PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
             ("} (I/O suspension after jpeg_finish_decompress() - DONE)"));
      return; /* I/O suspension */
    }

    mState = JPEG_SINK_NON_JPEG_TRAILER;

    /* we're done dude */
    break;
  }
  case JPEG_SINK_NON_JPEG_TRAILER:
    PR_LOG(gJPEGlog, PR_LOG_DEBUG,
           ("[this=%p] nsJPEGDecoder::ProcessData -- entering JPEG_SINK_NON_JPEG_TRAILER case\n", this));

    break;

  case JPEG_ERROR:
    NS_ABORT_IF_FALSE(0, "Should always return immediately after error and not re-enter decoder");
  }

  PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG,
         ("} (end of function)"));
  return;
}
Ejemplo n.º 10
0
nsresult
nsIconDecoder::WriteInternal(const char *aBuffer, PRUint32 aCount)
{
  nsresult rv;

  // We put this here to avoid errors about crossing initialization with case
  // jumps on linux.
  PRUint32 bytesToRead = 0;

  // Performance isn't critical here, so our update rectangle is 
  // always the full icon
  nsIntRect r(0, 0, mWidth, mHeight);

  // Loop until the input data is gone
  while (aCount > 0) {
    switch (mState) {
      case iconStateStart:

        // Grab the width
        mWidth = (PRUint8)*aBuffer;

        // Book Keeping
        aBuffer++;
        aCount--;
        mState = iconStateHaveHeight;
        break;

      case iconStateHaveHeight:

        // Grab the Height
        mHeight = (PRUint8)*aBuffer;

        // Post our size to the superclass
        PostSize(mWidth, mHeight);

        // If We're doing a size decode, we're done
        if (IsSizeDecode()) {
          mState = iconStateFinished;
          break;
        }

        // Add the frame and signal
        rv = mImage->AppendFrame(0, 0, mWidth, mHeight,
                                 gfxASurface::ImageFormatARGB32,
                                 &mImageData, &mPixBytesTotal);
        if (NS_FAILED(rv)) {
          mState = iconStateError;
          return rv;
        }

        // Tell the superclass we're starting a frame
        PostFrameStart();

        // Book Keeping
        aBuffer++;
        aCount--;
        mState = iconStateReadPixels;
        break;

      case iconStateReadPixels:

        // How many bytes are we reading?
        bytesToRead = PR_MIN(aCount, mPixBytesTotal - mPixBytesRead);

        // Copy the bytes
        memcpy(mImageData + mPixBytesRead, aBuffer, bytesToRead);

        // Invalidate
        PostInvalidation(r);

        // Book Keeping
        aBuffer += bytesToRead;
        aCount -= bytesToRead;
        mPixBytesRead += bytesToRead;

        // If we've got all the pixel bytes, we're finished
        if (mPixBytesRead == mPixBytesTotal) {
          NotifyDone(/* aSuccess = */ PR_TRUE);
          mState = iconStateFinished;
        }
        break;

      case iconStateFinished:

        // Consume all excess data silently
        aCount = 0;

        break;

      case iconStateError:
        return NS_IMAGELIB_ERROR_FAILURE;
        break;
    }
  }

  return NS_OK;
}