void WEBPImageDecoder::applyPostProcessing(size_t frameIndex) { ImageFrame& buffer = m_frameBufferCache[frameIndex]; int width; int decodedHeight; if (!WebPIDecGetRGB(m_decoder, &decodedHeight, &width, 0, 0)) return; // See also https://bugs.webkit.org/show_bug.cgi?id=74062 if (decodedHeight <= 0) return; const IntRect& frameRect = buffer.originalFrameRect(); ASSERT_WITH_SECURITY_IMPLICATION(width == frameRect.width()); ASSERT_WITH_SECURITY_IMPLICATION(decodedHeight <= frameRect.height()); const int left = frameRect.x(); const int top = frameRect.y(); #if USE(QCMSLIB) if (qcms_transform* transform = colorTransform()) { for (int y = m_decodedHeight; y < decodedHeight; ++y) { const int canvasY = top + y; uint8_t* row = reinterpret_cast<uint8_t*>(buffer.getAddr(left, canvasY)); qcms_transform_data_type(transform, row, row, width, QCMS_OUTPUT_RGBX); uint8_t* pixel = row; for (int x = 0; x < width; ++x, pixel += 4) { const int canvasX = left + x; buffer.setRGBA(canvasX, canvasY, pixel[0], pixel[1], pixel[2], pixel[3]); } } } #endif // USE(QCMSLIB) // During the decoding of current frame, we may have set some pixels to be transparent (i.e. alpha < 255). // However, the value of each of these pixels should have been determined by blending it against the value // of that pixel in the previous frame if alpha blend source was 'BlendAtopPreviousFrame'. So, we correct these // pixels based on disposal method of the previous frame and the previous frame buffer. // FIXME: This could be avoided if libwebp decoder had an API that used the previous required frame // to do the alpha-blending by itself. if ((m_formatFlags & ANIMATION_FLAG) && frameIndex && buffer.alphaBlendSource() == ImageFrame::BlendAtopPreviousFrame && buffer.requiredPreviousFrameIndex() != kNotFound) { ImageFrame& prevBuffer = m_frameBufferCache[frameIndex - 1]; ASSERT(prevBuffer.status() == ImageFrame::FrameComplete); ImageFrame::DisposalMethod prevDisposalMethod = prevBuffer.disposalMethod(); if (prevDisposalMethod == ImageFrame::DisposeKeep) { // Blend transparent pixels with pixels in previous canvas. for (int y = m_decodedHeight; y < decodedHeight; ++y) { m_blendFunction(buffer, prevBuffer, top + y, left, width); } } else if (prevDisposalMethod == ImageFrame::DisposeOverwriteBgcolor) { const IntRect& prevRect = prevBuffer.originalFrameRect(); // We need to blend a transparent pixel with its value just after initFrame() call. That is: // * Blend with fully transparent pixel if it belongs to prevRect <-- This is a no-op. // * Blend with the pixel in the previous canvas otherwise <-- Needs alpha-blending. for (int y = m_decodedHeight; y < decodedHeight; ++y) { int canvasY = top + y; int left1, width1, left2, width2; findBlendRangeAtRow(frameRect, prevRect, canvasY, left1, width1, left2, width2); if (width1 > 0) m_blendFunction(buffer, prevBuffer, canvasY, left1, width1); if (width2 > 0) m_blendFunction(buffer, prevBuffer, canvasY, left2, width2); } } } m_decodedHeight = decodedHeight; buffer.setPixelsChanged(true); }
void WEBPImageDecoder::applyPostProcessing(size_t frameIndex) { ImageFrame& buffer = m_frameBufferCache[frameIndex]; int width; int decodedHeight; if (!WebPIDecGetRGB(m_decoder, &decodedHeight, &width, 0, 0)) return; // See also https://bugs.webkit.org/show_bug.cgi?id=74062 if (decodedHeight <= 0) return; const IntRect& frameRect = buffer.originalFrameRect(); SECURITY_DCHECK(width == frameRect.width()); SECURITY_DCHECK(decodedHeight <= frameRect.height()); const int left = frameRect.x(); const int top = frameRect.y(); // TODO (msarett): // Here we apply the color space transformation to the dst space. // It does not really make sense to transform to a gamma-encoded // space and then immediately after, perform a linear premultiply // and linear blending. Can we find a way to perform the // premultiplication and blending in a linear space? SkColorSpaceXform* xform = colorTransform(); if (xform) { const SkColorSpaceXform::ColorFormat srcFormat = SkColorSpaceXform::kBGRA_8888_ColorFormat; const SkColorSpaceXform::ColorFormat dstFormat = SkColorSpaceXform::kRGBA_8888_ColorFormat; for (int y = m_decodedHeight; y < decodedHeight; ++y) { const int canvasY = top + y; uint8_t* row = reinterpret_cast<uint8_t*>(buffer.getAddr(left, canvasY)); xform->apply(dstFormat, row, srcFormat, row, width, kUnpremul_SkAlphaType); uint8_t* pixel = row; for (int x = 0; x < width; ++x, pixel += 4) { const int canvasX = left + x; buffer.setRGBA(canvasX, canvasY, pixel[0], pixel[1], pixel[2], pixel[3]); } } } // During the decoding of the current frame, we may have set some pixels to be // transparent (i.e. alpha < 255). If the alpha blend source was // 'BlendAtopPreviousFrame', the values of these pixels should be determined // by blending them against the pixels of the corresponding previous frame. // Compute the correct opaque values now. // FIXME: This could be avoided if libwebp decoder had an API that used the // previous required frame to do the alpha-blending by itself. if ((m_formatFlags & ANIMATION_FLAG) && frameIndex && buffer.getAlphaBlendSource() == ImageFrame::BlendAtopPreviousFrame && buffer.requiredPreviousFrameIndex() != kNotFound) { ImageFrame& prevBuffer = m_frameBufferCache[frameIndex - 1]; ASSERT(prevBuffer.getStatus() == ImageFrame::FrameComplete); ImageFrame::DisposalMethod prevDisposalMethod = prevBuffer.getDisposalMethod(); if (prevDisposalMethod == ImageFrame::DisposeKeep) { // Blend transparent pixels with pixels in previous canvas. for (int y = m_decodedHeight; y < decodedHeight; ++y) { m_blendFunction(buffer, prevBuffer, top + y, left, width); } } else if (prevDisposalMethod == ImageFrame::DisposeOverwriteBgcolor) { const IntRect& prevRect = prevBuffer.originalFrameRect(); // We need to blend a transparent pixel with the starting value (from just // after the initFrame() call). If the pixel belongs to prevRect, the // starting value was fully transparent, so this is a no-op. Otherwise, we // need to blend against the pixel from the previous canvas. for (int y = m_decodedHeight; y < decodedHeight; ++y) { int canvasY = top + y; int left1, width1, left2, width2; findBlendRangeAtRow(frameRect, prevRect, canvasY, left1, width1, left2, width2); if (width1 > 0) m_blendFunction(buffer, prevBuffer, canvasY, left1, width1); if (width2 > 0) m_blendFunction(buffer, prevBuffer, canvasY, left2, width2); } } } m_decodedHeight = decodedHeight; buffer.setPixelsChanged(true); }