示例#1
0
int GIFImageDecoder::repetitionCount() const
{
    // This value can arrive at any point in the image data stream.  Most GIFs
    // in the wild declare it near the beginning of the file, so it usually is
    // set by the time we've decoded the size, but (depending on the GIF and the
    // packets sent back by the webserver) not always.  If the reader hasn't
    // seen a loop count yet, it will return cLoopCountNotSeen, in which case we
    // should default to looping once (the initial value for
    // |m_repetitionCount|).
    //
    // There are some additional wrinkles here. First, ImageSource::clear()
    // may destroy the reader, making the result from the reader _less_
    // authoritative on future calls if the recreated reader hasn't seen the
    // loop count.  We don't need to special-case this because in this case the
    // new reader will once again return cLoopCountNotSeen, and we won't
    // overwrite the cached correct value.
    //
    // Second, a GIF might never set a loop count at all, in which case we
    // should continue to treat it as a "loop once" animation.  We don't need
    // special code here either, because in this case we'll never change
    // |m_repetitionCount| from its default value.
    //
    // Third, we use the same GIFImageReader for counting frames and we might
    // see the loop count and then encounter a decoding error which happens
    // later in the stream. It is also possible that no frames are in the
    // stream. In these cases we should just loop once.
    if (isAllDataReceived() && parseCompleted() && m_reader->imagesCount() == 1)
        m_repetitionCount = cAnimationNone;
    else if (failed() || (m_reader && (!m_reader->imagesCount())))
        m_repetitionCount = cAnimationLoopOnce;
    else if (m_reader && m_reader->loopCount() != cLoopCountNotSeen)
        m_repetitionCount = m_reader->loopCount();
    return m_repetitionCount;
}
bool WEBPImageDecoder::decodeSingleFrame(const uint8_t* dataBytes, size_t dataSize, size_t frameIndex)
{
    if (failed())
        return false;

    ASSERT(isDecodedSizeAvailable());

    ASSERT(m_frameBufferCache.size() > frameIndex);
    ImageFrame& buffer = m_frameBufferCache[frameIndex];
    ASSERT(buffer.status() != ImageFrame::FrameComplete);

    if (buffer.status() == ImageFrame::FrameEmpty) {
        if (!buffer.setSize(size().width(), size().height()))
            return setFailed();
        buffer.setStatus(ImageFrame::FramePartial);
        // The buffer is transparent outside the decoded area while the image is loading.
        // The correct value of 'hasAlpha' for the frame will be set when it is fully decoded.
        buffer.setHasAlpha(true);
        buffer.setOriginalFrameRect(IntRect(IntPoint(), size()));
    }

    const IntRect& frameRect = buffer.originalFrameRect();
    if (!m_decoder) {
        WEBP_CSP_MODE mode = outputMode(m_formatFlags & ALPHA_FLAG);
        if (!m_premultiplyAlpha)
            mode = outputMode(false);
#if USE(QCMSLIB)
        if (colorTransform())
            mode = MODE_RGBA; // Decode to RGBA for input to libqcms.
#endif
        WebPInitDecBuffer(&m_decoderBuffer);
        m_decoderBuffer.colorspace = mode;
        m_decoderBuffer.u.RGBA.stride = size().width() * sizeof(ImageFrame::PixelData);
        m_decoderBuffer.u.RGBA.size = m_decoderBuffer.u.RGBA.stride * frameRect.height();
        m_decoderBuffer.is_external_memory = 1;
        m_decoder = WebPINewDecoder(&m_decoderBuffer);
        if (!m_decoder)
            return setFailed();
    }

    m_decoderBuffer.u.RGBA.rgba = reinterpret_cast<uint8_t*>(buffer.getAddr(frameRect.x(), frameRect.y()));

    switch (WebPIUpdate(m_decoder, dataBytes, dataSize)) {
    case VP8_STATUS_OK:
        applyPostProcessing(frameIndex);
        buffer.setHasAlpha((m_formatFlags & ALPHA_FLAG) || m_frameBackgroundHasAlpha);
        buffer.setStatus(ImageFrame::FrameComplete);
        clearDecoder();
        return true;
    case VP8_STATUS_SUSPENDED:
        if (!isAllDataReceived() && !frameIsCompleteAtIndex(frameIndex)) {
            applyPostProcessing(frameIndex);
            return false;
        }
        // FALLTHROUGH
    default:
        clear();
        return setFailed();
    }
}
示例#3
0
void GIFImageDecoder::decode(size_t frameIndex)
{
    parse(GIFFrameCountQuery);

    if (failed())
        return;

    Vector<size_t> framesToDecode;
    size_t frameToDecode = frameIndex;
    do {
        framesToDecode.append(frameToDecode);
        frameToDecode = m_frameBufferCache[frameToDecode].requiredPreviousFrameIndex();
    } while (frameToDecode != notFound && m_frameBufferCache[frameToDecode].status() != ImageFrame::FrameComplete);

    for (size_t i = framesToDecode.size(); i > 0; --i) {
        size_t frameIndex = framesToDecode[i - 1];
        if (!m_reader->decode(frameIndex)) {
            setFailed();
            return;
        }

        // We need more data to continue decoding.
        if (m_frameBufferCache[frameIndex].status() != ImageFrame::FrameComplete)
            break;
    }

    // It is also a fatal error if all data is received and we have decoded all
    // frames available but the file is truncated.
    if (frameIndex >= m_frameBufferCache.size() - 1 && isAllDataReceived() && m_reader && !m_reader->parseCompleted())
        setFailed();
}
void GIFImageDecoder::decode(size_t index)
{
    parse(GIFFrameCountQuery);

    if (failed())
        return;

    updateAggressivePurging(index);

    Vector<size_t> framesToDecode;
    size_t frameToDecode = index;
    do {
        framesToDecode.append(frameToDecode);
        frameToDecode = m_frameBufferCache[frameToDecode].requiredPreviousFrameIndex();
    } while (frameToDecode != kNotFound && m_frameBufferCache[frameToDecode].getStatus() != ImageFrame::FrameComplete);

    for (auto i = framesToDecode.rbegin(); i != framesToDecode.rend(); ++i) {
        if (!m_reader->decode(*i)) {
            setFailed();
            return;
        }

        if (m_purgeAggressively)
            clearCacheExceptFrame(*i);

        // We need more data to continue decoding.
        if (m_frameBufferCache[*i].getStatus() != ImageFrame::FrameComplete)
            break;
    }

    // It is also a fatal error if all data is received and we have decoded all
    // frames available but the file is truncated.
    if (index >= m_frameBufferCache.size() - 1 && isAllDataReceived() && m_reader && !m_reader->parseCompleted())
        setFailed();
}
void RxIImageDecoder::decode(bool onlySize)
{
    if (failed())
        return;

    if (!m_reader)
        m_reader.set(new RxIImageReader(this));

    if (onlySize) {
        if (!m_reader->decodeForSize(m_data->buffer(), m_format))
            setFailed();
        return;
    }

    if (!isAllDataReceived())
        return;

    // If we couldn't decode the image but we've received all the data, decoding
    // has failed.
    if (!m_reader->decode(m_data->buffer(), m_format))
        setFailed();

    if (failed() || (!m_frameBufferCache.isEmpty() && m_frameBufferCache[0].status() == RGBA32Buffer::FrameComplete))
        m_reader.clear();
}
bool WEBPImageDecoder::updateDemuxer()
{
    if (failed())
        return false;

    if (m_haveAlreadyParsedThisData)
        return true;

    m_haveAlreadyParsedThisData = true;

    const unsigned webpHeaderSize = 30;
    if (m_data->size() < webpHeaderSize)
        return false; // Await VP8X header so WebPDemuxPartial succeeds.

    WebPDemuxDelete(m_demux);
    WebPData inputData = { reinterpret_cast<const uint8_t*>(m_data->data()), m_data->size() };
    m_demux = WebPDemuxPartial(&inputData, &m_demuxState);
    if (!m_demux || (isAllDataReceived() && m_demuxState != WEBP_DEMUX_DONE))
        return setFailed();

    ASSERT(m_demuxState > WEBP_DEMUX_PARSING_HEADER);
    if (!WebPDemuxGetI(m_demux, WEBP_FF_FRAME_COUNT))
        return false; // Wait until the encoded image frame data arrives.

    if (!isDecodedSizeAvailable()) {
        int width = WebPDemuxGetI(m_demux, WEBP_FF_CANVAS_WIDTH);
        int height = WebPDemuxGetI(m_demux, WEBP_FF_CANVAS_HEIGHT);
        if (!setSize(width, height))
            return setFailed();

        m_formatFlags = WebPDemuxGetI(m_demux, WEBP_FF_FORMAT_FLAGS);
        if (!(m_formatFlags & ANIMATION_FLAG)) {
            m_repetitionCount = cAnimationNone;
        } else {
            // Since we have parsed at least one frame, even if partially,
            // the global animation (ANIM) properties have been read since
            // an ANIM chunk must precede the ANMF frame chunks.
            m_repetitionCount = WebPDemuxGetI(m_demux, WEBP_FF_LOOP_COUNT);
            // Repetition count is always <= 16 bits.
            ASSERT(m_repetitionCount == (m_repetitionCount & 0xffff));
            // Repetition count is the number of animation cycles to show,
            // where 0 means "infinite". But ImageSource::repetitionCount()
            // returns -1 for "infinite", and 0 and up for "show the image
            // animation one cycle more than the value". Subtract one here
            // to correctly handle the finite and infinite cases.
            --m_repetitionCount;
            // FIXME: Implement ICC profile support for animated images.
            m_formatFlags &= ~ICCP_FLAG;
        }

#if USE(QCMSLIB)
        if ((m_formatFlags & ICCP_FLAG) && !ignoresGammaAndColorProfile())
            readColorProfile();
#endif
    }

    ASSERT(isDecodedSizeAvailable());
    return true;
}
示例#7
0
void BMPImageDecoder::decode(bool onlySize)
{
    if (failed())
        return;

    // If we couldn't decode the image but we've received all the data, decoding
    // has failed.
    if (!decodeHelper(onlySize) && isAllDataReceived())
        setFailed();
}
示例#8
0
void ICOImageDecoder::setDataForPNGDecoderAtIndex(size_t index)
{
    if (!m_pngDecoders[index])
        return;

    const IconDirectoryEntry& dirEntry = m_dirEntries[index];
    // Copy out PNG data to a separate vector and send to the PNG decoder.
    // FIXME: Save this copy by making the PNG decoder able to take an
    // optional offset.
    RefPtr<SharedBuffer> pngData(SharedBuffer::create(&m_data->data()[dirEntry.m_imageOffset], m_data->size() - dirEntry.m_imageOffset));
    m_pngDecoders[index]->setData(pngData.get(), isAllDataReceived());
}
示例#9
0
void BMPImageDecoder::decode(bool onlySize)
{
    if (failed())
        return;

    // If we couldn't decode the image but we've received all the data, decoding
    // has failed.
    if (!decodeHelper(onlySize) && isAllDataReceived())
        setFailed();
    // If we're done decoding the image, we don't need the BMPImageReader
    // anymore.  (If we failed, |m_reader| has already been cleared.)
    else if (!m_frameBufferCache.isEmpty() && (m_frameBufferCache.first().status() == ImageFrame::FrameComplete))
        m_reader.clear();
}
示例#10
0
bool WEBPImageDecoder::decode(bool onlySize)
{
    // Minimum number of bytes needed to ensure one can parse size information.
    static const size_t sizeOfHeader = 30;
    // Number of bytes per pixel.
    static const int bytesPerPixel = 3;

    if (failed())
        return false;
    const size_t dataSize = m_data->buffer().size();
    const uint8_t* dataBytes =
        reinterpret_cast<const uint8_t*>(m_data->buffer().data());
    int width, height;
    if (dataSize < sizeOfHeader)
        return true;
    if (!WebPGetInfo(dataBytes, dataSize, &width, &height))
        return setFailed();
    if (!ImageDecoder::isSizeAvailable() && !setSize(width, height))
        return setFailed();
    if (onlySize)
        return true;

    // FIXME: Add support for progressive decoding.
    if (!isAllDataReceived())
        return true;
    ASSERT(!m_frameBufferCache.isEmpty());
    RGBA32Buffer& buffer = m_frameBufferCache[0];
    if (buffer.status() == RGBA32Buffer::FrameEmpty) {
        ASSERT(width == size().width());
        ASSERT(height == size().height());
        if (!buffer.setSize(width, height))
            return setFailed();
    }
    const int stride = width * bytesPerPixel;
    Vector<uint8_t> rgb;
    rgb.resize(height * stride);
    if (!WebPDecodeBGRInto(dataBytes, dataSize, rgb.data(), rgb.size(), stride))
        return setFailed();
    // FIXME: remove this data copy.
    for (int y = 0; y < height; ++y) {
        const uint8_t* const src = &rgb[y * stride];
        for (int x = 0; x < width; ++x)
            buffer.setRGBA(x, y, src[bytesPerPixel * x + 2], src[bytesPerPixel * x + 1], src[bytesPerPixel * x + 0], 0xff);
    }
    buffer.setStatus(RGBA32Buffer::FrameComplete);
    buffer.setHasAlpha(false);
    buffer.setRect(IntRect(IntPoint(), size()));
    return true;
}
示例#11
0
void GIFImageDecoder::decode(unsigned haltAtFrame, GIFQuery query)
{
    if (failed())
        return;

    if (!m_reader) {
        m_reader = adoptPtr(new GIFImageReader(this));
        m_reader->setData(m_data);
    }

    // If we couldn't decode the image but we've received all the data, decoding
    // has failed.
    if (!m_reader->decode(query, haltAtFrame) && isAllDataReceived())
        setFailed();
}
void PNGImageDecoder::decode(bool onlySize)
{
    if (failed())
        return;

    if (!m_reader)
        m_reader = adoptPtr(new PNGImageReader(this, m_offset));

    // If we couldn't decode the image but have received all the data, decoding
    // has failed.
    if (!m_reader->decode(*m_data, onlySize) && isAllDataReceived())
        setFailed();

    // If decoding is done or failed, we don't need the PNGImageReader anymore.
    if (isComplete(this) || failed())
        m_reader.clear();
}
示例#13
0
void ICOImageDecoder::decode(size_t index, bool onlySize)
{
    if (failed())
        return;

    // If we couldn't decode the image but we've received all the data, decoding
    // has failed.
    if ((!decodeDirectory() || (!onlySize && !decodeAtIndex(index))) && isAllDataReceived())
        setFailed();
    // If we're done decoding this frame, we don't need the BMPImageReader or
    // PNGImageDecoder anymore.  (If we failed, these have already been
    // cleared.)
    else if ((m_frameBufferCache.size() > index) && (m_frameBufferCache[index].status() == ImageFrame::FrameComplete)) {
        m_bmpReaders[index].clear();
        m_pngDecoders[index].clear();
    }
}
void JPEGImageDecoder::decode(bool onlySize)
{
    if (failed())
        return;

    if (!m_reader)
        m_reader = adoptPtr(new JPEGImageReader(this));

    // If we couldn't decode the image but we've received all the data, decoding
    // has failed.
    if (!m_reader->decode(*m_data, onlySize) && isAllDataReceived())
        setFailed();
    // If we're done decoding the image, we don't need the JPEGImageReader
    // anymore.  (If we failed, |m_reader| has already been cleared.)
    else if (!m_frameBufferCache.isEmpty() && (m_frameBufferCache[0].status() == ImageFrame::FrameComplete))
        m_reader.clear();
}
示例#15
0
void PNGImageDecoder::decode(bool onlySize)
{
    if (failed())
        return;

    if (!m_reader)
        m_reader = adoptPtr(new PNGImageReader(this));

    // If we couldn't decode the image but we've received all the data, decoding
    // has failed.
    if (!m_reader->decode(*m_data, onlySize) && isAllDataReceived())
        setFailed();
    // If we're done decoding the image, we don't need the PNGImageReader
    // anymore.  (If we failed, |m_reader| has already been cleared.)
    else if (isComplete())
        m_reader.clear();
}
示例#16
0
ImageFrame* WEBPImageDecoder::frameBufferAtIndex(size_t index)
{
    if (index >= frameCount())
        return 0;

    ImageFrame& frame = m_frameBufferCache[index];
    if (frame.status() == ImageFrame::FrameComplete)
        return &frame;

    Vector<size_t> framesToDecode;
    size_t frameToDecode = index;
    do {
        framesToDecode.append(frameToDecode);
        frameToDecode = m_frameBufferCache[frameToDecode].requiredPreviousFrameIndex();
    } while (frameToDecode != kNotFound && m_frameBufferCache[frameToDecode].status() != ImageFrame::FrameComplete);

    ASSERT(m_demux);
    for (size_t i = framesToDecode.size(); i > 0; --i) {
        size_t frameIndex = framesToDecode[i - 1];
        if ((m_formatFlags & ANIMATION_FLAG) && !initFrameBuffer(frameIndex))
            return 0;
        WebPIterator webpFrame;
        if (!WebPDemuxGetFrame(m_demux, frameIndex + 1, &webpFrame))
            return 0;
        PlatformInstrumentation::willDecodeImage("WEBP");
        decode(webpFrame.fragment.bytes, webpFrame.fragment.size, false, frameIndex);
        PlatformInstrumentation::didDecodeImage();
        WebPDemuxReleaseIterator(&webpFrame);

        if (failed())
            return 0;

        // We need more data to continue decoding.
        if (m_frameBufferCache[frameIndex].status() != ImageFrame::FrameComplete)
            break;
    }

    // It is also a fatal error if all data is received and we have decoded all
    // frames available but the file is truncated.
    if (index >= m_frameBufferCache.size() - 1 && isAllDataReceived() && m_demux && m_demuxState != WEBP_DEMUX_DONE)
        setFailed();

    frame.notifyBitmapIfPixelsChanged();
    return &frame;
}
示例#17
0
void WEBPImageDecoder::decode(size_t index) {
  if (failed())
    return;

  Vector<size_t> framesToDecode;
  size_t frameToDecode = index;
  do {
    framesToDecode.append(frameToDecode);
    frameToDecode =
        m_frameBufferCache[frameToDecode].requiredPreviousFrameIndex();
  } while (frameToDecode != kNotFound &&
           m_frameBufferCache[frameToDecode].getStatus() !=
               ImageFrame::FrameComplete);

  ASSERT(m_demux);
  for (auto i = framesToDecode.rbegin(); i != framesToDecode.rend(); ++i) {
    if ((m_formatFlags & ANIMATION_FLAG) && !initFrameBuffer(*i))
      return;
    WebPIterator webpFrame;
    if (!WebPDemuxGetFrame(m_demux, *i + 1, &webpFrame)) {
      setFailed();
    } else {
      decodeSingleFrame(webpFrame.fragment.bytes, webpFrame.fragment.size, *i);
      WebPDemuxReleaseIterator(&webpFrame);
    }
    if (failed())
      return;

    // We need more data to continue decoding.
    if (m_frameBufferCache[*i].getStatus() != ImageFrame::FrameComplete)
      break;

    if (m_purgeAggressively)
      clearCacheExceptFrame(*i);
  }

  // It is also a fatal error if all data is received and we have decoded all
  // frames available but the file is truncated.
  if (index >= m_frameBufferCache.size() - 1 && isAllDataReceived() &&
      m_demux && m_demuxState != WEBP_DEMUX_DONE)
    setFailed();
}
示例#18
0
void GIFImageDecoder::decode(unsigned haltAtFrame, GIFQuery query)
{
    if (failed())
        return;

    if (!m_reader) {
        m_reader = adoptPtr(new GIFImageReader(this));
        m_reader->setData(m_data);
    }

    if (query == GIFSizeQuery) {
        if (!m_reader->decode(GIFSizeQuery, haltAtFrame))
            setFailed();
        return;
    }

    if (!m_reader->decode(GIFFrameCountQuery, haltAtFrame)) {
        setFailed();
        return;
    }

    const size_t oldSize = m_frameBufferCache.size();
    m_frameBufferCache.resize(m_reader->imagesCount());
    for (size_t i = oldSize; i < m_reader->imagesCount(); ++i)
        m_frameBufferCache[i].setPremultiplyAlpha(m_premultiplyAlpha);

    if (query == GIFFrameCountQuery)
        return;

    if (!m_reader->decode(GIFFullQuery, haltAtFrame)) {
        setFailed();
        return;
    }

    // It is also a fatal error if all data is received but we failed to decode
    // all frames completely.
    if (isAllDataReceived() && haltAtFrame >= m_frameBufferCache.size() && m_reader)
        setFailed();
}
示例#19
0
size_t ICOImageDecoder::decodeFrameCount() {
  decodeSize();

  // If decodeSize() fails, return the existing number of frames.  This way
  // if we get halfway through the image before decoding fails, we won't
  // suddenly start reporting that the image has zero frames.
  if (failed())
    return m_frameBufferCache.size();

  // If the file is incomplete, return the length of the sequence of completely
  // received frames.  We don't do this when the file is fully received, since
  // some ICOs have entries whose claimed offset + size extends past the end of
  // the file, and we still want to display these if they don't trigger decoding
  // failures elsewhere.
  if (!isAllDataReceived()) {
    for (size_t i = 0; i < m_dirEntries.size(); ++i) {
      const IconDirectoryEntry& dirEntry = m_dirEntries[i];
      if ((dirEntry.m_imageOffset + dirEntry.m_byteSize) > m_data->size())
        return i;
    }
  }
  return m_dirEntries.size();
}
示例#20
0
void ICOImageDecoder::decode(size_t index, bool onlySize) {
  if (failed())
    return;

  // Defensively clear the FastSharedBufferReader's cache, as another caller
  // may have called SharedBuffer::mergeSegmentsIntoBuffer().
  m_fastReader.clearCache();

  // If we couldn't decode the image but we've received all the data, decoding
  // has failed.
  if ((!decodeDirectory() || (!onlySize && !decodeAtIndex(index))) &&
      isAllDataReceived()) {
    setFailed();
    // If we're done decoding this frame, we don't need the BMPImageReader or
    // PNGImageDecoder anymore.  (If we failed, these have already been
    // cleared.)
  } else if ((m_frameBufferCache.size() > index) &&
             (m_frameBufferCache[index].getStatus() ==
              ImageFrame::FrameComplete)) {
    m_bmpReaders[index].reset();
    m_pngDecoders[index].reset();
  }
}
示例#21
0
void ICOImageDecoder::decode(size_t index, bool onlySize)
{
    if (failed())
        return;

    // If we couldn't decode the image but we've received all the data, decoding
    // has failed.
    if ((!decodeDirectory() || (!onlySize && !decodeAtIndex(index))) && isAllDataReceived())
        setFailed();
    // If we're done decoding this frame, we don't need the BMPImageReader or
    // PNGImageDecoder anymore.  (If we failed, these have already been
    // cleared.)
    else if ((m_frameBufferCache.size() > index) && m_frameBufferCache[index].isComplete()) {
        m_bmpReaders[index] = nullptr;
        m_pngDecoders[index] = nullptr;
    }

    if (m_frameBufferCache.isEmpty())
        m_frameBufferCache.resize(m_dirEntries.size());
    // CAUTION: We must not resize m_frameBufferCache again after this, as
    // decodeAtIndex() may give a BMPImageReader a pointer to one of the
    // entries.
}
示例#22
0
bool WEBPImageDecoder::decode(bool onlySize)
{
    // Minimum number of bytes needed to ensure one can parse size information.
    static const size_t sizeOfHeader = 30;
    // Number of bytes per pixel.
    static const int bytesPerPixel = 3;

    if (failed())
        return false;

    const size_t dataSize = m_data->size();
    if (dataSize < sizeOfHeader)
        return true;

    int width, height;
    const uint8_t* dataBytes = reinterpret_cast<const uint8_t*>(m_data->data());
    if (!WebPGetInfo(dataBytes, dataSize, &width, &height))
        return setFailed();
    if (!ImageDecoder::isSizeAvailable() && !setSize(width, height))
        return setFailed();
    if (onlySize)
        return true;

    bool allDataReceived = isAllDataReceived();
    int stride = width * bytesPerPixel;
    ASSERT(!m_frameBufferCache.isEmpty());
    ImageFrame& buffer = m_frameBufferCache[0];
    if (buffer.status() == ImageFrame::FrameEmpty) {
        ASSERT(width == size().width());
        ASSERT(height == size().height());
        if (!buffer.setSize(width, height))
            return setFailed();
        buffer.setStatus(allDataReceived ? ImageFrame::FrameComplete : ImageFrame::FramePartial);
        // FIXME: We currently hard code false below because libwebp doesn't support alpha yet.
        buffer.setHasAlpha(false);
        buffer.setOriginalFrameRect(IntRect(IntPoint(), size()));
        m_rgbOutput.resize(height * stride);
    }
    int newLastVisibleRow = 0; // Last completed row.
    if (allDataReceived) {
        if (!WebPDecodeRGBInto(dataBytes, dataSize, m_rgbOutput.data(), m_rgbOutput.size(), stride))
            return setFailed();
        newLastVisibleRow = height;
    } else {
        if (!m_decoder) {
            m_decoder = WebPINewRGB(MODE_RGB, m_rgbOutput.data(), m_rgbOutput.size(), stride);
            if (!m_decoder)
                return setFailed();
        }
        const VP8StatusCode status = WebPIUpdate(m_decoder, dataBytes, dataSize);
        if (status != VP8_STATUS_OK && status != VP8_STATUS_SUSPENDED)
            return setFailed();
        if (!WebPIDecGetRGB(m_decoder, &newLastVisibleRow, 0, 0, 0))
            return setFailed();
        ASSERT(newLastVisibleRow >= 0);
        ASSERT(newLastVisibleRow <= height);
    }
    // FIXME: remove this data copy.
    for (int y = m_lastVisibleRow; y < newLastVisibleRow; ++y) {
        const uint8_t* const src = &m_rgbOutput[y * stride];
        for (int x = 0; x < width; ++x)
            buffer.setRGBA(x, y, src[bytesPerPixel * x + 0], src[bytesPerPixel * x + 1], src[bytesPerPixel * x + 2], 0xff);
    }
    m_lastVisibleRow = newLastVisibleRow;
    if (m_lastVisibleRow == height)
         buffer.setStatus(ImageFrame::FrameComplete);
    return m_lastVisibleRow == height;
}
示例#23
0
bool WEBPImageDecoder::updateDemuxer()
{
    if (failed())
        return false;

    if (m_haveAlreadyParsedThisData)
        return true;

    m_haveAlreadyParsedThisData = true;

    const unsigned webpHeaderSize = 20;
    if (m_data->size() < webpHeaderSize)
        return false; // Wait for headers so that WebPDemuxPartial doesn't return null.

    WebPDemuxDelete(m_demux);
    WebPData inputData = { reinterpret_cast<const uint8_t*>(m_data->data()), m_data->size() };
    m_demux = WebPDemuxPartial(&inputData, &m_demuxState);
    if (!m_demux || (isAllDataReceived() && m_demuxState != WEBP_DEMUX_DONE))
        return setFailed();

    if (m_demuxState <= WEBP_DEMUX_PARSING_HEADER)
        return false; // Not enough data for parsing canvas width/height yet.

    bool hasAnimation = (m_formatFlags & ANIMATION_FLAG);
    if (!ImageDecoder::isSizeAvailable()) {
        m_formatFlags = WebPDemuxGetI(m_demux, WEBP_FF_FORMAT_FLAGS);
        hasAnimation = (m_formatFlags & ANIMATION_FLAG);
        if (!hasAnimation)
            m_repetitionCount = cAnimationNone;
        else
            m_formatFlags &= ~ICCP_FLAG; // FIXME: Implement ICC profile support for animated images.
#if USE(QCMSLIB)
        if ((m_formatFlags & ICCP_FLAG) && !ignoresGammaAndColorProfile())
            m_hasColorProfile = true;
#endif
        if (!setSize(WebPDemuxGetI(m_demux, WEBP_FF_CANVAS_WIDTH), WebPDemuxGetI(m_demux, WEBP_FF_CANVAS_HEIGHT)))
            return setFailed();
    }

    ASSERT(ImageDecoder::isSizeAvailable());
    const size_t newFrameCount = WebPDemuxGetI(m_demux, WEBP_FF_FRAME_COUNT);
    if (hasAnimation && !m_haveReadAnimationParameters && newFrameCount) {
        // As we have parsed at least one frame (even if partially),
        // we must already have parsed the animation properties.
        // This is because ANIM chunk always precedes ANMF chunks.
        m_repetitionCount = WebPDemuxGetI(m_demux, WEBP_FF_LOOP_COUNT);
        ASSERT(m_repetitionCount == (m_repetitionCount & 0xffff)); // Loop count is always <= 16 bits.
        // |m_repetitionCount| is the total number of animation cycles to show,
        // with 0 meaning "infinite". But ImageSource::repetitionCount()
        // returns -1 for "infinite", and 0 and up for "show the animation one
        // cycle more than this value". By subtracting one here, we convert
        // both finite and infinite cases correctly.
        --m_repetitionCount;
        m_haveReadAnimationParameters = true;
    }

    const size_t oldFrameCount = m_frameBufferCache.size();
    if (newFrameCount > oldFrameCount) {
        m_frameBufferCache.resize(newFrameCount);
        for (size_t i = oldFrameCount; i < newFrameCount; ++i) {
            m_frameBufferCache[i].setPremultiplyAlpha(m_premultiplyAlpha);
            if (!hasAnimation) {
                ASSERT(!i);
                m_frameBufferCache[i].setRequiredPreviousFrameIndex(kNotFound);
                continue;
            }
            WebPIterator animatedFrame;
            WebPDemuxGetFrame(m_demux, i + 1, &animatedFrame);
            ASSERT(animatedFrame.complete == 1);
            m_frameBufferCache[i].setDuration(animatedFrame.duration);
            m_frameBufferCache[i].setDisposalMethod(animatedFrame.dispose_method == WEBP_MUX_DISPOSE_BACKGROUND ? ImageFrame::DisposeOverwriteBgcolor : ImageFrame::DisposeKeep);
            m_frameBufferCache[i].setAlphaBlendSource(animatedFrame.blend_method == WEBP_MUX_BLEND ? ImageFrame::BlendAtopPreviousFrame : ImageFrame::BlendAtopBgcolor);
            IntRect frameRect(animatedFrame.x_offset, animatedFrame.y_offset, animatedFrame.width, animatedFrame.height);
            // Make sure the frameRect doesn't extend outside the buffer.
            if (frameRect.maxX() > size().width())
                frameRect.setWidth(size().width() - animatedFrame.x_offset);
            if (frameRect.maxY() > size().height())
                frameRect.setHeight(size().height() - animatedFrame.y_offset);
            m_frameBufferCache[i].setOriginalFrameRect(frameRect);
            m_frameBufferCache[i].setRequiredPreviousFrameIndex(findRequiredPreviousFrame(i, !animatedFrame.has_alpha));
            WebPDemuxReleaseIterator(&animatedFrame);
        }
    }

    return true;
}
示例#24
0
void ICOImageDecoder::setDataForPNGDecoderAtIndex(size_t index) {
  if (!m_pngDecoders[index])
    return;

  m_pngDecoders[index]->setData(m_data.get(), isAllDataReceived());
}
示例#25
0
bool WEBPImageDecoder::decode(bool onlySize)
{
    if (failed())
        return false;

#if defined(__LB_SHELL__)
    // We dont want progressive decoding.
    if (!isAllDataReceived())
        return false;
#endif

    const uint8_t* dataBytes = reinterpret_cast<const uint8_t*>(m_data->data());
    const size_t dataSize = m_data->size();

    if (!ImageDecoder::isSizeAvailable()) {
        static const size_t imageHeaderSize = 30;
        if (dataSize < imageHeaderSize)
            return false;
        int width, height;
#if (WEBP_DECODER_ABI_VERSION >= 0x0163)
        WebPBitstreamFeatures features;
        if (WebPGetFeatures(dataBytes, dataSize, &features) != VP8_STATUS_OK)
            return setFailed();
        width = features.width;
        height = features.height;
        m_hasAlpha = features.has_alpha;
#else
        // Earlier version won't be able to display WebP files with alpha.
        if (!WebPGetInfo(dataBytes, dataSize, &width, &height))
            return setFailed();
        m_hasAlpha = false;
#endif
        if (!setSize(width, height))
            return setFailed();
    }

    ASSERT(ImageDecoder::isSizeAvailable());
    if (onlySize)
        return true;

    ASSERT(!m_frameBufferCache.isEmpty());
    ImageFrame& buffer = m_frameBufferCache[0];
    ASSERT(buffer.status() != ImageFrame::FrameComplete);

    if (buffer.status() == ImageFrame::FrameEmpty) {
        if (!buffer.setSize(size().width(), size().height()))
            return setFailed();
        buffer.setStatus(ImageFrame::FramePartial);
        buffer.setHasAlpha(m_hasAlpha);
        buffer.setOriginalFrameRect(IntRect(IntPoint(), size()));
    }

    if (!m_decoder) {
        int rowStride = size().width() * sizeof(ImageFrame::PixelData);
        uint8_t* output = reinterpret_cast<uint8_t*>(buffer.getAddr(0, 0));
        int outputSize = size().height() * rowStride;
        m_decoder = WebPINewRGB(outputMode(m_hasAlpha), output, outputSize, rowStride);
        if (!m_decoder)
            return setFailed();
    }

    switch (WebPIUpdate(m_decoder, dataBytes, dataSize)) {
    case VP8_STATUS_OK:
        buffer.setStatus(ImageFrame::FrameComplete);
        WebPIDelete(m_decoder);
        m_decoder = 0;
        return true;
    case VP8_STATUS_SUSPENDED:
        return false;
    default:
        WebPIDelete(m_decoder);
        m_decoder = 0;
        return setFailed();
    }
}
示例#26
0
    void RPIImageDecoder::decode(bool onlySize)
    {
        unsigned int width, height;

        if (failed())
            return;

        // make sure we have all the data before doing anything
        if (!isAllDataReceived())
            return;

        if (onlySize)
        {
            if (readSize(width, height));
            {
                setSize(width, height);
            }
            return;
        }
        else
        {
            readSize(width, height);

            clock_t start = clock();

            ImageFrame& buffer = m_frameBufferCache[0];

            if (m_frameBufferCache.isEmpty())
            {
                log("decode : frameBuffercache is empty");
                setFailed();
                return;
            }

            if (buffer.status() == ImageFrame::FrameEmpty)
            {
                if (!buffer.setSize(width, height))
                {
                    log("decode : could not define buffer size");
                    setFailed();
                    return;
                }

                // The buffer is transparent outside the decoded area while the image is
                // loading. The completed image will be marked fully opaque in jpegComplete().
                buffer.setHasAlpha(false);
            }

            // lock the mutex so that we only process once at a time
            pthread_mutex_lock(&decode_mutex);

            // setup decoder request information
            BRCMIMAGE_REQUEST_T* dec_request = getDecoderRequest();
            BRCMIMAGE_T *decoder = getDecoder();

            memset(dec_request, 0, sizeof(BRCMIMAGE_REQUEST_T));
            dec_request->input = (unsigned char*)m_data->data();
            dec_request->input_size = m_data->size();
            dec_request->output = (unsigned char*)buffer.getAddr(0, 0);
            dec_request->output_alloc_size = width * height * 4;
            dec_request->output_handle = 0;
            dec_request->pixel_format = PIXEL_FORMAT_RGBA;
            dec_request->buffer_width = 0;
            dec_request->buffer_height = 0;

            brcmimage_acquire(decoder);
            BRCMIMAGE_STATUS_T status = brcmimage_process(decoder, dec_request);

            if (status == BRCMIMAGE_SUCCESS)
            {
                clock_t copy = clock();

                unsigned char *ptr = (unsigned char *)buffer.getAddr(0, 0);
                for (unsigned int i = 0; i < dec_request->height * dec_request->width; i++)
                {
                    // we swap RGBA -> BGRA
                    unsigned char tmp = *ptr;
                    *ptr = ptr[2];
                    ptr[2] = tmp;
                    ptr += 4;
                }

                brcmimage_release(decoder);

                buffer.setPixelsChanged(true);
                buffer.setStatus(ImageFrame::FrameComplete);
                buffer.setHasAlpha(m_hasAlpha);

                clock_t end = clock();
                unsigned long millis = (end - start) * 1000 / CLOCKS_PER_SEC;
                unsigned long copymillis = (end - copy) * 1000 / CLOCKS_PER_SEC;

                log("decode : image (%d x %d)(Alpha=%d) decoded in %d ms (copy in %d ms), source size = %d bytes", width, height, m_hasAlpha, millis, copymillis, m_data->size());

            }
            else
            {
                log("decode : Decoding failed with status %d", status);
            }

            pthread_mutex_unlock(&decode_mutex);
        }


    }
示例#27
0
void GIFImageDecoder::decode(unsigned haltAtFrame, GIFQuery query)
{
    if (failed())
        return;

    if (!m_reader)
        m_reader.set(new GIFImageReader(this));

    // If we couldn't decode the image but we've received all the data, decoding
    // has failed.
    if (!m_reader->read((const unsigned char*)m_data->data() + m_readOffset, m_data->size() - m_readOffset, query, haltAtFrame) && isAllDataReceived())
        setFailed();

    if (failed())
        m_reader.clear();
}
示例#28
0
bool WEBPImageDecoder::decodeSingleFrame(const uint8_t* dataBytes,
                                         size_t dataSize,
                                         size_t frameIndex) {
  if (failed())
    return false;

  ASSERT(isDecodedSizeAvailable());

  ASSERT(m_frameBufferCache.size() > frameIndex);
  ImageFrame& buffer = m_frameBufferCache[frameIndex];
  ASSERT(buffer.getStatus() != ImageFrame::FrameComplete);

  if (buffer.getStatus() == ImageFrame::FrameEmpty) {
    if (!buffer.setSizeAndColorSpace(size().width(), size().height(),
                                     colorSpace()))
      return setFailed();
    buffer.setStatus(ImageFrame::FramePartial);
    // The buffer is transparent outside the decoded area while the image is
    // loading. The correct alpha value for the frame will be set when it is
    // fully decoded.
    buffer.setHasAlpha(true);
    buffer.setOriginalFrameRect(IntRect(IntPoint(), size()));
  }

  const IntRect& frameRect = buffer.originalFrameRect();
  if (!m_decoder) {
    WEBP_CSP_MODE mode = outputMode(m_formatFlags & ALPHA_FLAG);
    if (!m_premultiplyAlpha)
      mode = outputMode(false);
    if (colorTransform()) {
      // Swizzling between RGBA and BGRA is zero cost in a color transform.
      // So when we have a color transform, we should decode to whatever is
      // easiest for libwebp, and then let the color transform swizzle if
      // necessary.
      // Lossy webp is encoded as YUV (so RGBA and BGRA are the same cost).
      // Lossless webp is encoded as BGRA. This means decoding to BGRA is
      // either faster or the same cost as RGBA.
      mode = MODE_BGRA;
    }
    WebPInitDecBuffer(&m_decoderBuffer);
    m_decoderBuffer.colorspace = mode;
    m_decoderBuffer.u.RGBA.stride =
        size().width() * sizeof(ImageFrame::PixelData);
    m_decoderBuffer.u.RGBA.size =
        m_decoderBuffer.u.RGBA.stride * frameRect.height();
    m_decoderBuffer.is_external_memory = 1;
    m_decoder = WebPINewDecoder(&m_decoderBuffer);
    if (!m_decoder)
      return setFailed();
  }

  m_decoderBuffer.u.RGBA.rgba =
      reinterpret_cast<uint8_t*>(buffer.getAddr(frameRect.x(), frameRect.y()));

  switch (WebPIUpdate(m_decoder, dataBytes, dataSize)) {
    case VP8_STATUS_OK:
      applyPostProcessing(frameIndex);
      buffer.setHasAlpha((m_formatFlags & ALPHA_FLAG) ||
                         m_frameBackgroundHasAlpha);
      buffer.setStatus(ImageFrame::FrameComplete);
      clearDecoder();
      return true;
    case VP8_STATUS_SUSPENDED:
      if (!isAllDataReceived() && !frameIsCompleteAtIndex(frameIndex)) {
        applyPostProcessing(frameIndex);
        return false;
      }
    // FALLTHROUGH
    default:
      clear();
      return setFailed();
  }
}
示例#29
0
bool WEBPImageDecoder::updateDemuxer() {
  if (failed())
    return false;

  if (m_haveAlreadyParsedThisData)
    return true;

  m_haveAlreadyParsedThisData = true;

  const unsigned webpHeaderSize = 30;
  if (m_data->size() < webpHeaderSize)
    return false;  // Await VP8X header so WebPDemuxPartial succeeds.

  WebPDemuxDelete(m_demux);
  m_consolidatedData = m_data->getAsSkData();
  WebPData inputData = {
      reinterpret_cast<const uint8_t*>(m_consolidatedData->data()),
      m_consolidatedData->size()};
  m_demux = WebPDemuxPartial(&inputData, &m_demuxState);
  if (!m_demux || (isAllDataReceived() && m_demuxState != WEBP_DEMUX_DONE)) {
    if (!m_demux)
      m_consolidatedData.reset();
    return setFailed();
  }

  ASSERT(m_demuxState > WEBP_DEMUX_PARSING_HEADER);
  if (!WebPDemuxGetI(m_demux, WEBP_FF_FRAME_COUNT))
    return false;  // Wait until the encoded image frame data arrives.

  if (!isDecodedSizeAvailable()) {
    int width = WebPDemuxGetI(m_demux, WEBP_FF_CANVAS_WIDTH);
    int height = WebPDemuxGetI(m_demux, WEBP_FF_CANVAS_HEIGHT);
    if (!setSize(width, height))
      return setFailed();

    m_formatFlags = WebPDemuxGetI(m_demux, WEBP_FF_FORMAT_FLAGS);
    if (!(m_formatFlags & ANIMATION_FLAG)) {
      m_repetitionCount = cAnimationNone;
    } else {
      // Since we have parsed at least one frame, even if partially,
      // the global animation (ANIM) properties have been read since
      // an ANIM chunk must precede the ANMF frame chunks.
      m_repetitionCount = WebPDemuxGetI(m_demux, WEBP_FF_LOOP_COUNT);
      // Repetition count is always <= 16 bits.
      ASSERT(m_repetitionCount == (m_repetitionCount & 0xffff));
      if (!m_repetitionCount)
        m_repetitionCount = cAnimationLoopInfinite;
      // FIXME: Implement ICC profile support for animated images.
      m_formatFlags &= ~ICCP_FLAG;
    }

    if ((m_formatFlags & ICCP_FLAG) && !ignoresColorSpace())
      readColorProfile();
  }

  ASSERT(isDecodedSizeAvailable());

  size_t frameCount = WebPDemuxGetI(m_demux, WEBP_FF_FRAME_COUNT);
  updateAggressivePurging(frameCount);

  return true;
}