Ejemplo n.º 1
0
void WEBPImageDecoder::applyColorProfile(const uint8_t* data, size_t size, ImageFrame& buffer)
{
    int width;
    int decodedHeight;
    if (!WebPIDecGetRGB(m_decoder, &decodedHeight, &width, 0, 0))
        return; // See also https://bugs.webkit.org/show_bug.cgi?id=74062
    if (decodedHeight <= 0)
        return;

    if (!m_haveReadProfile) {
        readColorProfile(data, size);
        m_haveReadProfile = true;
    }

    ASSERT(width == scaledSize().width());
    ASSERT(decodedHeight <= scaledSize().height());

    for (int y = m_decodedHeight; y < decodedHeight; ++y) {
        uint8_t* row = reinterpret_cast<uint8_t*>(buffer.getAddr(0, y));
        if (qcms_transform* transform = colorTransform())
            qcms_transform_data_type(transform, row, row, width, QCMS_OUTPUT_RGBX);
        uint8_t* pixel = row;
        for (int x = 0; x < width; ++x, pixel += 4)
            buffer.setRGBA(x, y, pixel[0], pixel[1], pixel[2], pixel[3]);
    }

    m_decodedHeight = decodedHeight;
}
Ejemplo n.º 2
0
bool WEBPImageDecoder::updateDemuxer()
{
    if (failed())
        return false;

    if (m_haveAlreadyParsedThisData)
        return true;

    m_haveAlreadyParsedThisData = true;

    const unsigned webpHeaderSize = 30;
    if (m_data->size() < webpHeaderSize)
        return false; // Await VP8X header so WebPDemuxPartial succeeds.

    WebPDemuxDelete(m_demux);
    WebPData inputData = { reinterpret_cast<const uint8_t*>(m_data->data()), m_data->size() };
    m_demux = WebPDemuxPartial(&inputData, &m_demuxState);
    if (!m_demux || (isAllDataReceived() && m_demuxState != WEBP_DEMUX_DONE))
        return setFailed();

    ASSERT(m_demuxState > WEBP_DEMUX_PARSING_HEADER);
    if (!WebPDemuxGetI(m_demux, WEBP_FF_FRAME_COUNT))
        return false; // Wait until the encoded image frame data arrives.

    if (!isDecodedSizeAvailable()) {
        int width = WebPDemuxGetI(m_demux, WEBP_FF_CANVAS_WIDTH);
        int height = WebPDemuxGetI(m_demux, WEBP_FF_CANVAS_HEIGHT);
        if (!setSize(width, height))
            return setFailed();

        m_formatFlags = WebPDemuxGetI(m_demux, WEBP_FF_FORMAT_FLAGS);
        if (!(m_formatFlags & ANIMATION_FLAG)) {
            m_repetitionCount = cAnimationNone;
        } else {
            // Since we have parsed at least one frame, even if partially,
            // the global animation (ANIM) properties have been read since
            // an ANIM chunk must precede the ANMF frame chunks.
            m_repetitionCount = WebPDemuxGetI(m_demux, WEBP_FF_LOOP_COUNT);
            // Repetition count is always <= 16 bits.
            ASSERT(m_repetitionCount == (m_repetitionCount & 0xffff));
            // Repetition count is the number of animation cycles to show,
            // where 0 means "infinite". But ImageSource::repetitionCount()
            // returns -1 for "infinite", and 0 and up for "show the image
            // animation one cycle more than the value". Subtract one here
            // to correctly handle the finite and infinite cases.
            --m_repetitionCount;
            // FIXME: Implement ICC profile support for animated images.
            m_formatFlags &= ~ICCP_FLAG;
        }

#if USE(QCMSLIB)
        if ((m_formatFlags & ICCP_FLAG) && !ignoresGammaAndColorProfile())
            readColorProfile();
#endif
    }

    ASSERT(isDecodedSizeAvailable());
    return true;
}
Ejemplo n.º 3
0
void PNGImageDecoder::headerAvailable()
{
    png_structp png = m_reader->pngPtr();
    png_infop info = m_reader->infoPtr();
    png_uint_32 width = png_get_image_width(png, info);
    png_uint_32 height = png_get_image_height(png, info);
    
    // Protect against large images.
    if (width > cMaxPNGSize || height > cMaxPNGSize) {
        longjmp(JMPBUF(png), 1);
        return;
    }
    
    // We can fill in the size now that the header is available.  Avoid memory
    // corruption issues by neutering setFailed() during this call; if we don't
    // do this, failures will cause |m_reader| to be deleted, and our jmpbuf
    // will cease to exist.  Note that we'll still properly set the failure flag
    // in this case as soon as we longjmp().
    m_doNothingOnFailure = true;
    bool result = setSize(width, height);
    m_doNothingOnFailure = false;
    if (!result) {
        longjmp(JMPBUF(png), 1);
        return;
    }

    int bitDepth, colorType, interlaceType, compressionType, filterType, channels;
    png_get_IHDR(png, info, &width, &height, &bitDepth, &colorType, &interlaceType, &compressionType, &filterType);

    if ((colorType == PNG_COLOR_TYPE_RGB || colorType == PNG_COLOR_TYPE_RGB_ALPHA) && !m_ignoreGammaAndColorProfile) {
        // We currently support color profiles only for RGB and RGBA PNGs.  Supporting
        // color profiles for gray-scale images is slightly tricky, at least using the
        // CoreGraphics ICC library, because we expand gray-scale images to RGB but we
        // don't similarly transform the color profile.  We'd either need to transform
        // the color profile or we'd need to decode into a gray-scale image buffer and
        // hand that to CoreGraphics.
        m_colorProfile = readColorProfile(png, info);
    }

    // The options we set here match what Mozilla does.

    // Expand to ensure we use 24-bit for RGB and 32-bit for RGBA.
    if (colorType == PNG_COLOR_TYPE_PALETTE || (colorType == PNG_COLOR_TYPE_GRAY && bitDepth < 8))
        png_set_expand(png);
    
    png_bytep trns = 0;
    int trnsCount = 0;
    if (png_get_valid(png, info, PNG_INFO_tRNS)) {
        png_get_tRNS(png, info, &trns, &trnsCount, 0);
        png_set_expand(png);
    }

    if (bitDepth == 16)
        png_set_strip_16(png);

    if (colorType == PNG_COLOR_TYPE_GRAY || colorType == PNG_COLOR_TYPE_GRAY_ALPHA)
        png_set_gray_to_rgb(png);

    // Deal with gamma and keep it under our control.
    double gamma;
    if (!m_ignoreGammaAndColorProfile && png_get_gAMA(png, info, &gamma)) {
        if ((gamma <= 0.0) || (gamma > cMaxGamma)) {
            gamma = cInverseGamma;
            png_set_gAMA(png, info, gamma);
        }
        png_set_gamma(png, cDefaultGamma, gamma);
    } else
        png_set_gamma(png, cDefaultGamma, cInverseGamma);

    // Tell libpng to send us rows for interlaced pngs.
    if (interlaceType == PNG_INTERLACE_ADAM7)
        png_set_interlace_handling(png);

    // Update our info now.
    png_read_update_info(png, info);
    channels = png_get_channels(png, info);
    ASSERT(channels == 3 || channels == 4);

    m_reader->setHasAlpha(channels == 4);

    if (m_reader->decodingSizeOnly()) {
        // If we only needed the size, halt the reader.
#if defined(PNG_LIBPNG_VER_MAJOR) && defined(PNG_LIBPNG_VER_MINOR) && (PNG_LIBPNG_VER_MAJOR > 1 || (PNG_LIBPNG_VER_MAJOR == 1 && PNG_LIBPNG_VER_MINOR >= 5))
        // '0' argument to png_process_data_pause means: Do not cache unprocessed data.
        m_reader->setReadOffset(m_reader->currentBufferSize() - png_process_data_pause(png, 0));
#else
        m_reader->setReadOffset(m_reader->currentBufferSize() - png->buffer_size);
        png->buffer_size = 0;
#endif
    }
}
Ejemplo n.º 4
0
    bool decode(const SharedBuffer& data, bool onlySize)
    {
        unsigned newByteCount = data.size() - m_bufferLength;
        unsigned readOffset = m_bufferLength - m_info.src->bytes_in_buffer;

        m_info.src->bytes_in_buffer += newByteCount;
        m_info.src->next_input_byte = (JOCTET*)(data.data()) + readOffset;

        // If we still have bytes to skip, try to skip those now.
        if (m_bytesToSkip)
            skipBytes(m_bytesToSkip);

        m_bufferLength = data.size();

        // We need to do the setjmp here. Otherwise bad things will happen
        if (setjmp(m_err.setjmp_buffer))
            return m_decoder->setFailed();

        switch (m_state) {
        case JPEG_HEADER:
            // Read file parameters with jpeg_read_header().
            if (jpeg_read_header(&m_info, true) == JPEG_SUSPENDED)
                return false; // I/O suspension.

            switch (m_info.jpeg_color_space) {
            case JCS_GRAYSCALE:
            case JCS_RGB:
            case JCS_YCbCr:
                // libjpeg can convert GRAYSCALE and YCbCr image pixels to RGB.
                m_info.out_color_space = rgbOutputColorSpace();
#if defined(TURBO_JPEG_RGB_SWIZZLE)
                if (m_info.saw_JFIF_marker)
                    break;
                // FIXME: Swizzle decoding does not support Adobe transform=0
                // images (yet), so revert to using JSC_RGB in that case.
                if (m_info.saw_Adobe_marker && !m_info.Adobe_transform)
                    m_info.out_color_space = JCS_RGB;
#endif
                break;
            case JCS_CMYK:
            case JCS_YCCK:
                // libjpeg can convert YCCK to CMYK, but neither to RGB, so we
                // manually convert CMKY to RGB.
                m_info.out_color_space = JCS_CMYK;
                break;
            default:
                return m_decoder->setFailed();
            }

            m_state = JPEG_START_DECOMPRESS;

            // We can fill in the size now that the header is available.
            if (!m_decoder->setSize(m_info.image_width, m_info.image_height))
                return false;

            // Calculate and set decoded size.
            m_info.scale_num = m_decoder->desiredScaleNumerator();
            m_info.scale_denom = scaleDenominator;
            jpeg_calc_output_dimensions(&m_info);
            m_decoder->setDecodedSize(m_info.output_width, m_info.output_height);

            m_decoder->setOrientation(readImageOrientation(info()));

#if USE(QCMSLIB)
            // Allow color management of the decoded RGBA pixels if possible.
            if (!m_decoder->ignoresGammaAndColorProfile()) {
                ColorProfile colorProfile;
                readColorProfile(info(), colorProfile);
                createColorTransform(colorProfile, colorSpaceHasAlpha(m_info.out_color_space));
#if defined(TURBO_JPEG_RGB_SWIZZLE)
                // Input RGBA data to qcms. Note: restored to BGRA on output.
                if (m_transform && m_info.out_color_space == JCS_EXT_BGRA)
                    m_info.out_color_space = JCS_EXT_RGBA;
#endif
            }
#endif
            // Don't allocate a giant and superfluous memory buffer when the
            // image is a sequential JPEG.
            m_info.buffered_image = jpeg_has_multiple_scans(&m_info);

            if (onlySize) {
                // We can stop here. Reduce our buffer length and available data.
                m_bufferLength -= m_info.src->bytes_in_buffer;
                m_info.src->bytes_in_buffer = 0;
                return true;
            }
        // FALL THROUGH

        case JPEG_START_DECOMPRESS:
            // Set parameters for decompression.
            // FIXME -- Should reset dct_method and dither mode for final pass
            // of progressive JPEG.
            m_info.dct_method = dctMethod();
            m_info.dither_mode = ditherMode();
            m_info.do_fancy_upsampling = doFancyUpsampling();
            m_info.enable_2pass_quant = false;
            m_info.do_block_smoothing = true;

            // Make a one-row-high sample array that will go away when done with
            // image. Always make it big enough to hold an RGB row. Since this
            // uses the IJG memory manager, it must be allocated before the call
            // to jpeg_start_compress().
            // FIXME: note that some output color spaces do not need the samples
            // buffer. Remove this allocation for those color spaces.
            m_samples = (*m_info.mem->alloc_sarray)(reinterpret_cast<j_common_ptr>(&m_info), JPOOL_IMAGE, m_info.output_width * 4, 1);

            // Start decompressor.
            if (!jpeg_start_decompress(&m_info))
                return false; // I/O suspension.

            // If this is a progressive JPEG ...
            m_state = (m_info.buffered_image) ? JPEG_DECOMPRESS_PROGRESSIVE : JPEG_DECOMPRESS_SEQUENTIAL;
        // FALL THROUGH

        case JPEG_DECOMPRESS_SEQUENTIAL:
            if (m_state == JPEG_DECOMPRESS_SEQUENTIAL) {

                if (!m_decoder->outputScanlines())
                    return false; // I/O suspension.

                // If we've completed image output...
                ASSERT(m_info.output_scanline == m_info.output_height);
                m_state = JPEG_DONE;
            }
        // FALL THROUGH

        case JPEG_DECOMPRESS_PROGRESSIVE:
            if (m_state == JPEG_DECOMPRESS_PROGRESSIVE) {
                int status;
                do {
                    status = jpeg_consume_input(&m_info);
                } while ((status != JPEG_SUSPENDED) && (status != JPEG_REACHED_EOI));

                for (;;) {
                    if (!m_info.output_scanline) {
                        int scan = m_info.input_scan_number;

                        // If we haven't displayed anything yet
                        // (output_scan_number == 0) and we have enough data for
                        // a complete scan, force output of the last full scan.
                        if (!m_info.output_scan_number && (scan > 1) && (status != JPEG_REACHED_EOI))
                            --scan;

                        if (!jpeg_start_output(&m_info, scan))
                            return false; // I/O suspension.
                    }

                    if (m_info.output_scanline == 0xffffff)
                        m_info.output_scanline = 0;

                    // If outputScanlines() fails, it deletes |this|. Therefore,
                    // copy the decoder pointer and use it to check for failure
                    // to avoid member access in the failure case.
                    JPEGImageDecoder* decoder = m_decoder;
                    if (!decoder->outputScanlines()) {
                        if (decoder->failed()) // Careful; |this| is deleted.
                            return false;
                        if (!m_info.output_scanline)
                            // Didn't manage to read any lines - flag so we
                            // don't call jpeg_start_output() multiple times for
                            // the same scan.
                            m_info.output_scanline = 0xffffff;
                        return false; // I/O suspension.
                    }

                    if (m_info.output_scanline == m_info.output_height) {
                        if (!jpeg_finish_output(&m_info))
                            return false; // I/O suspension.

                        if (jpeg_input_complete(&m_info) && (m_info.input_scan_number == m_info.output_scan_number))
                            break;

                        m_info.output_scanline = 0;
                    }
                }

                m_state = JPEG_DONE;
            }
        // FALL THROUGH

        case JPEG_DONE:
            // Finish decompression.
            return jpeg_finish_decompress(&m_info);

        case JPEG_ERROR:
            // We can get here if the constructor failed.
            return m_decoder->setFailed();
        }

        return true;
    }
Ejemplo n.º 5
0
    bool decode(const SharedBuffer& data, bool onlySize)
    {
        m_decodingSizeOnly = onlySize;

        unsigned newByteCount = data.size() - m_bufferLength;
        unsigned readOffset = m_bufferLength - m_info.src->bytes_in_buffer;

        m_info.src->bytes_in_buffer += newByteCount;
        m_info.src->next_input_byte = (JOCTET*)(data.data()) + readOffset;

        // If we still have bytes to skip, try to skip those now.
        if (m_bytesToSkip)
            skipBytes(m_bytesToSkip);

        m_bufferLength = data.size();

        // We need to do the setjmp here. Otherwise bad things will happen
        if (setjmp(m_err.setjmp_buffer))
            return m_decoder->setFailed();

        switch (m_state) {
        case JPEG_HEADER:
            // Read file parameters with jpeg_read_header().
            if (jpeg_read_header(&m_info, true) == JPEG_SUSPENDED)
                return false; // I/O suspension.

            switch (m_info.jpeg_color_space) {
            case JCS_GRAYSCALE:
            case JCS_RGB:
            case JCS_YCbCr:
                // libjpeg can convert GRAYSCALE and YCbCr image pixels to RGB.
                m_info.out_color_space = rgbOutputColorSpace();
                break;
            case JCS_CMYK:
            case JCS_YCCK:
                // libjpeg can convert YCCK to CMYK, but neither to RGB, so we
                // manually convert CMKY to RGB.
                m_info.out_color_space = JCS_CMYK;
                break;
            default:
                return m_decoder->setFailed();
            }

            // Don't allocate a giant and superfluous memory buffer when the
            // image is a sequential JPEG.
            m_info.buffered_image = jpeg_has_multiple_scans(&m_info);

            // Used to set up image size so arrays can be allocated.
            jpeg_calc_output_dimensions(&m_info);

            // Make a one-row-high sample array that will go away when done with
            // image. Always make it big enough to hold an RGB row.  Since this
            // uses the IJG memory manager, it must be allocated before the call
            // to jpeg_start_compress().
            m_samples = (*m_info.mem->alloc_sarray)((j_common_ptr) &m_info, JPOOL_IMAGE, m_info.output_width * 4, 1);

            m_state = JPEG_START_DECOMPRESS;

            // We can fill in the size now that the header is available.
            if (!m_decoder->setSize(m_info.image_width, m_info.image_height))
                return false;

            // Allow color management of the decoded RGBA pixels if possible.
            if (!m_decoder->ignoresGammaAndColorProfile()) {
                ColorProfile rgbInputDeviceColorProfile = readColorProfile(info());
                if (!rgbInputDeviceColorProfile.isEmpty())
                    m_decoder->setColorProfile(rgbInputDeviceColorProfile);
            }

            if (m_decodingSizeOnly) {
                // We can stop here.  Reduce our buffer length and available
                // data.
                m_bufferLength -= m_info.src->bytes_in_buffer;
                m_info.src->bytes_in_buffer = 0;
                return true;
            }
        // FALL THROUGH

        case JPEG_START_DECOMPRESS:
            // Set parameters for decompression.
            // FIXME -- Should reset dct_method and dither mode for final pass
            // of progressive JPEG.
            m_info.dct_method =  JDCT_ISLOW;
            m_info.dither_mode = JDITHER_FS;
            m_info.do_fancy_upsampling = true;
            m_info.enable_2pass_quant = false;
            m_info.do_block_smoothing = true;

            // Start decompressor.
            if (!jpeg_start_decompress(&m_info))
                return false; // I/O suspension.

            // If this is a progressive JPEG ...
            m_state = (m_info.buffered_image) ? JPEG_DECOMPRESS_PROGRESSIVE : JPEG_DECOMPRESS_SEQUENTIAL;
        // FALL THROUGH

        case JPEG_DECOMPRESS_SEQUENTIAL:
            if (m_state == JPEG_DECOMPRESS_SEQUENTIAL) {

                if (!m_decoder->outputScanlines())
                    return false; // I/O suspension.

                // If we've completed image output...
                ASSERT(m_info.output_scanline == m_info.output_height);
                m_state = JPEG_DONE;
            }
        // FALL THROUGH

        case JPEG_DECOMPRESS_PROGRESSIVE:
            if (m_state == JPEG_DECOMPRESS_PROGRESSIVE) {
                int status;
                do {
                    status = jpeg_consume_input(&m_info);
                } while ((status != JPEG_SUSPENDED) && (status != JPEG_REACHED_EOI));

                for (;;) {
                    if (!m_info.output_scanline) {
                        int scan = m_info.input_scan_number;

                        // If we haven't displayed anything yet
                        // (output_scan_number == 0) and we have enough data for
                        // a complete scan, force output of the last full scan.
                        if (!m_info.output_scan_number && (scan > 1) && (status != JPEG_REACHED_EOI))
                            --scan;

                        if (!jpeg_start_output(&m_info, scan))
                            return false; // I/O suspension.
                    }

                    if (m_info.output_scanline == 0xffffff)
                        m_info.output_scanline = 0;

                    if (!m_decoder->outputScanlines()) {
                        if (!m_info.output_scanline)
                            // Didn't manage to read any lines - flag so we
                            // don't call jpeg_start_output() multiple times for
                            // the same scan.
                            m_info.output_scanline = 0xffffff;
                        return false; // I/O suspension.
                    }

                    if (m_info.output_scanline == m_info.output_height) {
                        if (!jpeg_finish_output(&m_info))
                            return false; // I/O suspension.

                        if (jpeg_input_complete(&m_info) && (m_info.input_scan_number == m_info.output_scan_number))
                            break;

                        m_info.output_scanline = 0;
                    }
                }

                m_state = JPEG_DONE;
            }
        // FALL THROUGH

        case JPEG_DONE:
            // Finish decompression.
            return jpeg_finish_decompress(&m_info);

        case JPEG_ERROR:
            // We can get here if the constructor failed.
            return m_decoder->setFailed();
        }

        return true;
    }
Ejemplo n.º 6
0
void WEBPImageDecoder::applyPostProcessing(size_t frameIndex)
{
    ImageFrame& buffer = m_frameBufferCache[frameIndex];
    int width;
    int decodedHeight;
    if (!WebPIDecGetRGB(m_decoder, &decodedHeight, &width, 0, 0))
        return; // See also https://bugs.webkit.org/show_bug.cgi?id=74062
    if (decodedHeight <= 0)
        return;

    const IntRect& frameRect = buffer.originalFrameRect();
    ASSERT_WITH_SECURITY_IMPLICATION(width == frameRect.width());
    ASSERT_WITH_SECURITY_IMPLICATION(decodedHeight <= frameRect.height());
    const int left = frameRect.x();
    const int top = frameRect.y();

#if USE(QCMSLIB)
    if ((m_formatFlags & ICCP_FLAG) && !ignoresGammaAndColorProfile()) {
        if (!m_haveReadProfile) {
            readColorProfile();
            m_haveReadProfile = true;
        }
        for (int y = m_decodedHeight; y < decodedHeight; ++y) {
            const int canvasY = top + y;
            uint8_t* row = reinterpret_cast<uint8_t*>(buffer.getAddr(left, canvasY));
            if (qcms_transform* transform = colorTransform())
                qcms_transform_data_type(transform, row, row, width, QCMS_OUTPUT_RGBX);
            uint8_t* pixel = row;
            for (int x = 0; x < width; ++x, pixel += 4) {
                const int canvasX = left + x;
                buffer.setRGBA(canvasX, canvasY, pixel[0], pixel[1], pixel[2], pixel[3]);
            }
        }
    }
#endif // USE(QCMSLIB)

    // During the decoding of current frame, we may have set some pixels to be transparent (i.e. alpha < 255).
    // However, the value of each of these pixels should have been determined by blending it against the value
    // of that pixel in the previous frame if alpha blend source was 'BlendAtopPreviousFrame'. So, we correct these
    // pixels based on disposal method of the previous frame and the previous frame buffer.
    // FIXME: This could be avoided if libwebp decoder had an API that used the previous required frame
    // to do the alpha-blending by itself.
    if ((m_formatFlags & ANIMATION_FLAG) && frameIndex && buffer.alphaBlendSource() == ImageFrame::BlendAtopPreviousFrame && buffer.requiredPreviousFrameIndex() != kNotFound) {
        ImageFrame& prevBuffer = m_frameBufferCache[frameIndex - 1];
        ASSERT(prevBuffer.status() == ImageFrame::FrameComplete);
        ImageFrame::DisposalMethod prevDisposalMethod = prevBuffer.disposalMethod();
        if (prevDisposalMethod == ImageFrame::DisposeKeep) { // Blend transparent pixels with pixels in previous canvas.
            for (int y = m_decodedHeight; y < decodedHeight; ++y) {
                m_blendFunction(buffer, prevBuffer, top + y, left, width);
            }
        } else if (prevDisposalMethod == ImageFrame::DisposeOverwriteBgcolor) {
            const IntRect& prevRect = prevBuffer.originalFrameRect();
            // We need to blend a transparent pixel with its value just after initFrame() call. That is:
            //   * Blend with fully transparent pixel if it belongs to prevRect <-- This is a no-op.
            //   * Blend with the pixel in the previous canvas otherwise <-- Needs alpha-blending.
            for (int y = m_decodedHeight; y < decodedHeight; ++y) {
                int canvasY = top + y;
                int left1, width1, left2, width2;
                findBlendRangeAtRow(frameRect, prevRect, canvasY, left1, width1, left2, width2);
                if (width1 > 0)
                    m_blendFunction(buffer, prevBuffer, canvasY, left1, width1);
                if (width2 > 0)
                    m_blendFunction(buffer, prevBuffer, canvasY, left2, width2);
            }
        }
    }

    m_decodedHeight = decodedHeight;
    buffer.setPixelsChanged(true);
}
Ejemplo n.º 7
0
bool WEBPImageDecoder::updateDemuxer() {
  if (failed())
    return false;

  if (m_haveAlreadyParsedThisData)
    return true;

  m_haveAlreadyParsedThisData = true;

  const unsigned webpHeaderSize = 30;
  if (m_data->size() < webpHeaderSize)
    return false;  // Await VP8X header so WebPDemuxPartial succeeds.

  WebPDemuxDelete(m_demux);
  m_consolidatedData = m_data->getAsSkData();
  WebPData inputData = {
      reinterpret_cast<const uint8_t*>(m_consolidatedData->data()),
      m_consolidatedData->size()};
  m_demux = WebPDemuxPartial(&inputData, &m_demuxState);
  if (!m_demux || (isAllDataReceived() && m_demuxState != WEBP_DEMUX_DONE)) {
    if (!m_demux)
      m_consolidatedData.reset();
    return setFailed();
  }

  ASSERT(m_demuxState > WEBP_DEMUX_PARSING_HEADER);
  if (!WebPDemuxGetI(m_demux, WEBP_FF_FRAME_COUNT))
    return false;  // Wait until the encoded image frame data arrives.

  if (!isDecodedSizeAvailable()) {
    int width = WebPDemuxGetI(m_demux, WEBP_FF_CANVAS_WIDTH);
    int height = WebPDemuxGetI(m_demux, WEBP_FF_CANVAS_HEIGHT);
    if (!setSize(width, height))
      return setFailed();

    m_formatFlags = WebPDemuxGetI(m_demux, WEBP_FF_FORMAT_FLAGS);
    if (!(m_formatFlags & ANIMATION_FLAG)) {
      m_repetitionCount = cAnimationNone;
    } else {
      // Since we have parsed at least one frame, even if partially,
      // the global animation (ANIM) properties have been read since
      // an ANIM chunk must precede the ANMF frame chunks.
      m_repetitionCount = WebPDemuxGetI(m_demux, WEBP_FF_LOOP_COUNT);
      // Repetition count is always <= 16 bits.
      ASSERT(m_repetitionCount == (m_repetitionCount & 0xffff));
      if (!m_repetitionCount)
        m_repetitionCount = cAnimationLoopInfinite;
      // FIXME: Implement ICC profile support for animated images.
      m_formatFlags &= ~ICCP_FLAG;
    }

    if ((m_formatFlags & ICCP_FLAG) && !ignoresColorSpace())
      readColorProfile();
  }

  ASSERT(isDecodedSizeAvailable());

  size_t frameCount = WebPDemuxGetI(m_demux, WEBP_FF_FRAME_COUNT);
  updateAggressivePurging(frameCount);

  return true;
}