コード例 #1
0
// For static images, frameIsCompleteAtIndex(0) should return true if and only
// if the frame is successfully decoded, not when it is fully received.
TEST(StaticPNGTests, VerifyFrameCompleteBehavior) {
  auto decoder =
      createDecoderWithPngData("/LayoutTests/images/resources/png-simple.png");
  EXPECT_EQ(1u, decoder->frameCount());
  EXPECT_FALSE(decoder->frameIsCompleteAtIndex(0));
  EXPECT_EQ(ImageFrame::FrameComplete,
            decoder->frameBufferAtIndex(0)->getStatus());
  EXPECT_TRUE(decoder->frameIsCompleteAtIndex(0));
}
コード例 #2
0
bool WEBPImageDecoder::decodeSingleFrame(const uint8_t* dataBytes, size_t dataSize, size_t frameIndex)
{
    if (failed())
        return false;

    ASSERT(isDecodedSizeAvailable());

    ASSERT(m_frameBufferCache.size() > frameIndex);
    ImageFrame& buffer = m_frameBufferCache[frameIndex];
    ASSERT(buffer.status() != ImageFrame::FrameComplete);

    if (buffer.status() == ImageFrame::FrameEmpty) {
        if (!buffer.setSize(size().width(), size().height()))
            return setFailed();
        buffer.setStatus(ImageFrame::FramePartial);
        // The buffer is transparent outside the decoded area while the image is loading.
        // The correct value of 'hasAlpha' for the frame will be set when it is fully decoded.
        buffer.setHasAlpha(true);
        buffer.setOriginalFrameRect(IntRect(IntPoint(), size()));
    }

    const IntRect& frameRect = buffer.originalFrameRect();
    if (!m_decoder) {
        WEBP_CSP_MODE mode = outputMode(m_formatFlags & ALPHA_FLAG);
        if (!m_premultiplyAlpha)
            mode = outputMode(false);
#if USE(QCMSLIB)
        if (colorTransform())
            mode = MODE_RGBA; // Decode to RGBA for input to libqcms.
#endif
        WebPInitDecBuffer(&m_decoderBuffer);
        m_decoderBuffer.colorspace = mode;
        m_decoderBuffer.u.RGBA.stride = size().width() * sizeof(ImageFrame::PixelData);
        m_decoderBuffer.u.RGBA.size = m_decoderBuffer.u.RGBA.stride * frameRect.height();
        m_decoderBuffer.is_external_memory = 1;
        m_decoder = WebPINewDecoder(&m_decoderBuffer);
        if (!m_decoder)
            return setFailed();
    }

    m_decoderBuffer.u.RGBA.rgba = reinterpret_cast<uint8_t*>(buffer.getAddr(frameRect.x(), frameRect.y()));

    switch (WebPIUpdate(m_decoder, dataBytes, dataSize)) {
    case VP8_STATUS_OK:
        applyPostProcessing(frameIndex);
        buffer.setHasAlpha((m_formatFlags & ALPHA_FLAG) || m_frameBackgroundHasAlpha);
        buffer.setStatus(ImageFrame::FrameComplete);
        clearDecoder();
        return true;
    case VP8_STATUS_SUSPENDED:
        if (!isAllDataReceived() && !frameIsCompleteAtIndex(frameIndex)) {
            applyPostProcessing(frameIndex);
            return false;
        }
        // FALLTHROUGH
    default:
        clear();
        return setFailed();
    }
}
コード例 #3
0
bool ImageSource::frameHasAlphaAtIndex(size_t index)
{
    // When a frame has not finished decoding, always mark it as having alpha.
    // Ports that check the result of this function to determine their
    // compositing op need this in order to not draw the undecoded portion as
    // black.
    // TODO: Perhaps we should ensure that each individual decoder returns true
    // in this case.
    return !frameIsCompleteAtIndex(index)
        || m_decoder->frameBufferAtIndex(index)->hasAlpha();
}
コード例 #4
0
ファイル: ImageSourceCairo.cpp プロジェクト: acss/owb-mirror
bool ImageSource::frameHasAlphaAtIndex(size_t index)
{
    // When a frame has not finished decoding, always mark it as having alpha,
    // so we don't get a black background for the undecoded sections.
    // TODO: A better solution is probably to have the underlying buffer's
    // hasAlpha() return true in these cases, since it is, in fact, technically
    // true.
    if (!frameIsCompleteAtIndex(index))
        return true;

    return m_decoder->frameBufferAtIndex(index)->hasAlpha();
}
コード例 #5
0
ファイル: ImageDecoderCG.cpp プロジェクト: ollie314/webkit
bool ImageDecoder::frameHasAlphaAtIndex(size_t index) const
{
    if (!frameIsCompleteAtIndex(index))
        return true;
    
    CFStringRef imageType = CGImageSourceGetType(m_nativeDecoder.get());
    
    // Return false if there is no image type or the image type is JPEG, because
    // JPEG does not support alpha transparency.
    if (!imageType || CFEqual(imageType, CFSTR("public.jpeg")))
        return false;
    
    // FIXME: Could return false for other non-transparent image formats.
    // FIXME: Could maybe return false for a GIF Frame if we have enough info in the GIF properties dictionary
    // to determine whether or not a transparent color was defined.
    return true;
}
コード例 #6
0
void BitmapImage::startAnimation(bool catchUpIfNecessary)
{
    if (m_frameTimer || !shouldAnimate() || frameCount() <= 1)
        return;

    // If we aren't already animating, set now as the animation start time.
    const double time = monotonicallyIncreasingTime();
    if (!m_desiredFrameStartTime)
        m_desiredFrameStartTime = time;

    // Don't advance the animation to an incomplete frame.
    size_t nextFrame = (m_currentFrame + 1) % frameCount();
    if (!m_allDataReceived && !frameIsCompleteAtIndex(nextFrame))
        return;

    // Don't advance past the last frame if we haven't decoded the whole image
    // yet and our repetition count is potentially unset.  The repetition count
    // in a GIF can potentially come after all the rest of the image data, so
    // wait on it.
    if (!m_allDataReceived && repetitionCount(false) == cAnimationLoopOnce && m_currentFrame >= (frameCount() - 1))
        return;

    // Determine time for next frame to start.  By ignoring paint and timer lag
    // in this calculation, we make the animation appear to run at its desired
    // rate regardless of how fast it's being repainted.
    const double currentDuration = frameDurationAtIndex(m_currentFrame);
    m_desiredFrameStartTime += currentDuration;

    // When an animated image is more than five minutes out of date, the
    // user probably doesn't care about resyncing and we could burn a lot of
    // time looping through frames below.  Just reset the timings.
    const double cAnimationResyncCutoff = 5 * 60;
    if ((time - m_desiredFrameStartTime) > cAnimationResyncCutoff)
        m_desiredFrameStartTime = time + currentDuration;

    // The image may load more slowly than it's supposed to animate, so that by
    // the time we reach the end of the first repetition, we're well behind.
    // Clamp the desired frame start time in this case, so that we don't skip
    // frames (or whole iterations) trying to "catch up".  This is a tradeoff:
    // It guarantees users see the whole animation the second time through and
    // don't miss any repetitions, and is closer to what other browsers do; on
    // the other hand, it makes animations "less accurate" for pages that try to
    // sync an image and some other resource (e.g. audio), especially if users
    // switch tabs (and thus stop drawing the animation, which will pause it)
    // during that initial loop, then switch back later.
    if (nextFrame == 0 && m_repetitionsComplete == 0 && m_desiredFrameStartTime < time)
        m_desiredFrameStartTime = time;

    if (!catchUpIfNecessary || time < m_desiredFrameStartTime) {
        // Haven't yet reached time for next frame to start; delay until then.
        m_frameTimer = new Timer<BitmapImage>(this, &BitmapImage::advanceAnimation);
        m_frameTimer->startOneShot(std::max(m_desiredFrameStartTime - time, 0.));
    } else {
        // We've already reached or passed the time for the next frame to start.
        // See if we've also passed the time for frames after that to start, in
        // case we need to skip some frames entirely.  Remember not to advance
        // to an incomplete frame.
        for (size_t frameAfterNext = (nextFrame + 1) % frameCount(); frameIsCompleteAtIndex(frameAfterNext); frameAfterNext = (nextFrame + 1) % frameCount()) {
            // Should we skip the next frame?
            double frameAfterNextStartTime = m_desiredFrameStartTime + frameDurationAtIndex(nextFrame);
            if (time < frameAfterNextStartTime)
                break;

            // Yes; skip over it without notifying our observers.
            if (!internalAdvanceAnimation(true))
                return;
            m_desiredFrameStartTime = frameAfterNextStartTime;
            nextFrame = frameAfterNext;
        }

        // Draw the next frame immediately.  Note that m_desiredFrameStartTime
        // may be in the past, meaning the next time through this function we'll
        // kick off the next advancement sooner than this frame's duration would
        // suggest.
        if (internalAdvanceAnimation(false)) {
            // The image region has been marked dirty, but once we return to our
            // caller, draw() will clear it, and nothing will cause the
            // animation to advance again.  We need to start the timer for the
            // next frame running, or the animation can hang.  (Compare this
            // with when advanceAnimation() is called, and the region is dirtied
            // while draw() is not in the callstack, meaning draw() gets called
            // to update the region and thus startAnimation() is reached again.)
            // NOTE: For large images with slow or heavily-loaded systems,
            // throwing away data as we go (see destroyDecodedData()) means we
            // can spend so much time re-decoding data above that by the time we
            // reach here we're behind again.  If we let startAnimation() run
            // the catch-up code again, we can get long delays without painting
            // as we race the timer, or even infinite recursion.  In this
            // situation the best we can do is to simply change frames as fast
            // as possible, so force startAnimation() to set a zero-delay timer
            // and bail out if we're not caught up.
            startAnimation(false);
        }
    }
}
コード例 #7
0
ファイル: ImageDecoder.cpp プロジェクト: kjthegod/WebKit
bool ImageDecoder::frameHasAlphaAtIndex(size_t index) const
{
    return !frameIsCompleteAtIndex(index) || m_frameBufferCache[index].hasAlpha();
}
コード例 #8
0
bool BitmapImage::currentFrameIsComplete()
{
    return frameIsCompleteAtIndex(currentFrame());
}
コード例 #9
0
ファイル: BitmapImage.cpp プロジェクト: lokifist/webkit
void BitmapImage::startAnimation(CatchUpAnimation catchUpIfNecessary)
{
    if (m_frameTimer || !shouldAnimate() || frameCount() <= 1)
        return;

    // If we aren't already animating, set now as the animation start time.
    const double time = monotonicallyIncreasingTime();
    if (!m_desiredFrameStartTime)
        m_desiredFrameStartTime = time;

    // Don't advance the animation to an incomplete frame.
    size_t nextFrame = (m_currentFrame + 1) % frameCount();
    if (!m_allDataReceived && !frameIsCompleteAtIndex(nextFrame))
        return;

    // Don't advance past the last frame if we haven't decoded the whole image
    // yet and our repetition count is potentially unset. The repetition count
    // in a GIF can potentially come after all the rest of the image data, so
    // wait on it.
    if (!m_allDataReceived && repetitionCount(false) == cAnimationLoopOnce && m_currentFrame >= (frameCount() - 1))
        return;

    // Determine time for next frame to start. By ignoring paint and timer lag
    // in this calculation, we make the animation appear to run at its desired
    // rate regardless of how fast it's being repainted.
    const double currentDuration = frameDurationAtIndex(m_currentFrame);
    m_desiredFrameStartTime += currentDuration;

#if !PLATFORM(IOS)
    // When an animated image is more than five minutes out of date, the
    // user probably doesn't care about resyncing and we could burn a lot of
    // time looping through frames below. Just reset the timings.
    const double cAnimationResyncCutoff = 5 * 60;
    if ((time - m_desiredFrameStartTime) > cAnimationResyncCutoff)
        m_desiredFrameStartTime = time + currentDuration;
#else
    // Maintaining frame-to-frame delays is more important than
    // maintaining absolute animation timing, so reset the timings each frame.
    m_desiredFrameStartTime = time + currentDuration;
#endif

    // The image may load more slowly than it's supposed to animate, so that by
    // the time we reach the end of the first repetition, we're well behind.
    // Clamp the desired frame start time in this case, so that we don't skip
    // frames (or whole iterations) trying to "catch up". This is a tradeoff:
    // It guarantees users see the whole animation the second time through and
    // don't miss any repetitions, and is closer to what other browsers do; on
    // the other hand, it makes animations "less accurate" for pages that try to
    // sync an image and some other resource (e.g. audio), especially if users
    // switch tabs (and thus stop drawing the animation, which will pause it)
    // during that initial loop, then switch back later.
    if (nextFrame == 0 && m_repetitionsComplete == 0 && m_desiredFrameStartTime < time)
        m_desiredFrameStartTime = time;

    if (catchUpIfNecessary == DoNotCatchUp || time < m_desiredFrameStartTime) {
        // Haven't yet reached time for next frame to start; delay until then.
        startTimer(std::max<double>(m_desiredFrameStartTime - time, 0));
        return;
    }

    ASSERT(!m_frameTimer);

    // We've already reached or passed the time for the next frame to start.
    // See if we've also passed the time for frames after that to start, in
    // case we need to skip some frames entirely. Remember not to advance
    // to an incomplete frame.

#if !LOG_DISABLED
    size_t startCatchupFrameIndex = nextFrame;
#endif
    
    for (size_t frameAfterNext = (nextFrame + 1) % frameCount(); frameIsCompleteAtIndex(frameAfterNext); frameAfterNext = (nextFrame + 1) % frameCount()) {
        // Should we skip the next frame?
        double frameAfterNextStartTime = m_desiredFrameStartTime + frameDurationAtIndex(nextFrame);
        if (time < frameAfterNextStartTime)
            break;

        // Yes; skip over it without notifying our observers. If we hit the end while catching up,
        // tell the observer asynchronously.
        if (!internalAdvanceAnimation(SkippingFramesToCatchUp)) {
            m_animationFinishedWhenCatchingUp = true;
            startTimer(0);
            LOG(Images, "BitmapImage %p startAnimation catching up from frame %lu, ended", this, startCatchupFrameIndex);
            return;
        }
        m_desiredFrameStartTime = frameAfterNextStartTime;
        nextFrame = frameAfterNext;
    }

    LOG(Images, "BitmapImage %p startAnimation catching up jumped from from frame %lu to %d", this, startCatchupFrameIndex, (int)nextFrame - 1);

    // Draw the next frame as soon as possible. Note that m_desiredFrameStartTime
    // may be in the past, meaning the next time through this function we'll
    // kick off the next advancement sooner than this frame's duration would suggest.
    startTimer(0);
}
コード例 #10
0
ファイル: WEBPImageDecoder.cpp プロジェクト: mirror/chromium
bool WEBPImageDecoder::decodeSingleFrame(const uint8_t* dataBytes,
                                         size_t dataSize,
                                         size_t frameIndex) {
  if (failed())
    return false;

  ASSERT(isDecodedSizeAvailable());

  ASSERT(m_frameBufferCache.size() > frameIndex);
  ImageFrame& buffer = m_frameBufferCache[frameIndex];
  ASSERT(buffer.getStatus() != ImageFrame::FrameComplete);

  if (buffer.getStatus() == ImageFrame::FrameEmpty) {
    if (!buffer.setSizeAndColorSpace(size().width(), size().height(),
                                     colorSpace()))
      return setFailed();
    buffer.setStatus(ImageFrame::FramePartial);
    // The buffer is transparent outside the decoded area while the image is
    // loading. The correct alpha value for the frame will be set when it is
    // fully decoded.
    buffer.setHasAlpha(true);
    buffer.setOriginalFrameRect(IntRect(IntPoint(), size()));
  }

  const IntRect& frameRect = buffer.originalFrameRect();
  if (!m_decoder) {
    WEBP_CSP_MODE mode = outputMode(m_formatFlags & ALPHA_FLAG);
    if (!m_premultiplyAlpha)
      mode = outputMode(false);
    if (colorTransform()) {
      // Swizzling between RGBA and BGRA is zero cost in a color transform.
      // So when we have a color transform, we should decode to whatever is
      // easiest for libwebp, and then let the color transform swizzle if
      // necessary.
      // Lossy webp is encoded as YUV (so RGBA and BGRA are the same cost).
      // Lossless webp is encoded as BGRA. This means decoding to BGRA is
      // either faster or the same cost as RGBA.
      mode = MODE_BGRA;
    }
    WebPInitDecBuffer(&m_decoderBuffer);
    m_decoderBuffer.colorspace = mode;
    m_decoderBuffer.u.RGBA.stride =
        size().width() * sizeof(ImageFrame::PixelData);
    m_decoderBuffer.u.RGBA.size =
        m_decoderBuffer.u.RGBA.stride * frameRect.height();
    m_decoderBuffer.is_external_memory = 1;
    m_decoder = WebPINewDecoder(&m_decoderBuffer);
    if (!m_decoder)
      return setFailed();
  }

  m_decoderBuffer.u.RGBA.rgba =
      reinterpret_cast<uint8_t*>(buffer.getAddr(frameRect.x(), frameRect.y()));

  switch (WebPIUpdate(m_decoder, dataBytes, dataSize)) {
    case VP8_STATUS_OK:
      applyPostProcessing(frameIndex);
      buffer.setHasAlpha((m_formatFlags & ALPHA_FLAG) ||
                         m_frameBackgroundHasAlpha);
      buffer.setStatus(ImageFrame::FrameComplete);
      clearDecoder();
      return true;
    case VP8_STATUS_SUSPENDED:
      if (!isAllDataReceived() && !frameIsCompleteAtIndex(frameIndex)) {
        applyPostProcessing(frameIndex);
        return false;
      }
    // FALLTHROUGH
    default:
      clear();
      return setFailed();
  }
}