Exemplo n.º 1
0
bool JPEGImageDecoder::outputScanlines()
{
    if (m_frameBufferCache.isEmpty())
        return false;

    // Initialize the framebuffer if needed.
    ImageFrame& buffer = m_frameBufferCache[0];
    if (buffer.status() == ImageFrame::FrameEmpty) {
        if (!buffer.setSize(scaledSize().width(), scaledSize().height()))
            return setFailed();
        buffer.setStatus(ImageFrame::FramePartial);
        // The buffer is transparent outside the decoded area while the image is
        // loading. The completed image will be marked fully opaque in jpegComplete().
        buffer.setHasAlpha(true);
        buffer.setColorProfile(m_colorProfile);

        // For JPEGs, the frame always fills the entire image.
        buffer.setOriginalFrameRect(IntRect(IntPoint(), size()));
    }

    jpeg_decompress_struct* info = m_reader->info();

#if defined(TURBO_JPEG_RGB_SWIZZLE)
    if (!m_scaled && turboSwizzled(info->out_color_space)) {
        while (info->output_scanline < info->output_height) {
            unsigned char* row = reinterpret_cast<unsigned char*>(buffer.getAddr(0, info->output_scanline));
            if (jpeg_read_scanlines(info, &row, 1) != 1)
                return false;
#if USE(QCMSLIB)
            if (qcms_transform* transform = m_reader->colorTransform())
                qcms_transform_data_type(transform, row, row, info->output_width, rgbOutputColorSpace() == JCS_EXT_BGRA ? QCMS_OUTPUT_BGRX : QCMS_OUTPUT_RGBX);
#endif
         }
         return true;
     }
#endif

    switch (info->out_color_space) {
    // The code inside outputScanlines<int, bool> will be executed
    // for each pixel, so we want to avoid any extra comparisons there.
    // That is why we use template and template specializations here so
    // the proper code will be generated at compile time.
    case JCS_RGB:
        return outputScanlines<JCS_RGB>(buffer);
    case JCS_CMYK:
        return outputScanlines<JCS_CMYK>(buffer);
    default:
        ASSERT_NOT_REACHED();
    }

    return setFailed();
}
Exemplo n.º 2
0
bool JPEGImageDecoder::outputScanlines()
{
    if (m_frameBufferCache.isEmpty())
        return false;

    jpeg_decompress_struct* info = m_reader->info();

    // Initialize the framebuffer if needed.
    ImageFrame& buffer = m_frameBufferCache[0];
    if (buffer.status() == ImageFrame::FrameEmpty) {
        ASSERT(info->output_width == static_cast<JDIMENSION>(m_decodedSize.width()));
        ASSERT(info->output_height == static_cast<JDIMENSION>(m_decodedSize.height()));

        if (!buffer.setSize(info->output_width, info->output_height))
            return setFailed();
        buffer.setStatus(ImageFrame::FramePartial);
        // The buffer is transparent outside the decoded area while the image is
        // loading. The completed image will be marked fully opaque in jpegComplete().
        buffer.setHasAlpha(true);

        // For JPEGs, the frame always fills the entire image.
        buffer.setOriginalFrameRect(IntRect(IntPoint(), size()));
    }

#if defined(TURBO_JPEG_RGB_SWIZZLE)
    if (turboSwizzled(info->out_color_space)) {
        while (info->output_scanline < info->output_height) {
            unsigned char* row = reinterpret_cast<unsigned char*>(buffer.getAddr(0, info->output_scanline));
            if (jpeg_read_scanlines(info, &row, 1) != 1)
                return false;
#if USE(QCMSLIB)
            if (qcms_transform* transform = m_reader->colorTransform())
                qcms_transform_data_type(transform, row, row, info->output_width, rgbOutputColorSpace() == JCS_EXT_BGRA ? QCMS_OUTPUT_BGRX : QCMS_OUTPUT_RGBX);
#endif
        }
        buffer.setPixelsChanged(true);
        return true;
    }
#endif

    switch (info->out_color_space) {
    case JCS_RGB:
        return outputRows<JCS_RGB>(m_reader.get(), buffer);
    case JCS_CMYK:
        return outputRows<JCS_CMYK>(m_reader.get(), buffer);
    default:
        ASSERT_NOT_REACHED();
    }

    return setFailed();
}
Exemplo n.º 3
0
void WEBPImageDecoder::applyPostProcessing(size_t frameIndex)
{
    ImageFrame& buffer = m_frameBufferCache[frameIndex];
    int width;
    int decodedHeight;
    if (!WebPIDecGetRGB(m_decoder, &decodedHeight, &width, 0, 0))
        return; // See also https://bugs.webkit.org/show_bug.cgi?id=74062
    if (decodedHeight <= 0)
        return;

    const IntRect& frameRect = buffer.originalFrameRect();
    ASSERT_WITH_SECURITY_IMPLICATION(width == frameRect.width());
    ASSERT_WITH_SECURITY_IMPLICATION(decodedHeight <= frameRect.height());
    const int left = frameRect.x();
    const int top = frameRect.y();

#if USE(QCMSLIB)
    if (qcms_transform* transform = colorTransform()) {
        for (int y = m_decodedHeight; y < decodedHeight; ++y) {
            const int canvasY = top + y;
            uint8_t* row = reinterpret_cast<uint8_t*>(buffer.getAddr(left, canvasY));
            qcms_transform_data_type(transform, row, row, width, QCMS_OUTPUT_RGBX);
            uint8_t* pixel = row;
            for (int x = 0; x < width; ++x, pixel += 4) {
                const int canvasX = left + x;
                buffer.setRGBA(canvasX, canvasY, pixel[0], pixel[1], pixel[2], pixel[3]);
            }
        }
    }
#endif // USE(QCMSLIB)

    // During the decoding of current frame, we may have set some pixels to be transparent (i.e. alpha < 255).
    // However, the value of each of these pixels should have been determined by blending it against the value
    // of that pixel in the previous frame if alpha blend source was 'BlendAtopPreviousFrame'. So, we correct these
    // pixels based on disposal method of the previous frame and the previous frame buffer.
    // FIXME: This could be avoided if libwebp decoder had an API that used the previous required frame
    // to do the alpha-blending by itself.
    if ((m_formatFlags & ANIMATION_FLAG) && frameIndex && buffer.alphaBlendSource() == ImageFrame::BlendAtopPreviousFrame && buffer.requiredPreviousFrameIndex() != kNotFound) {
        ImageFrame& prevBuffer = m_frameBufferCache[frameIndex - 1];
        ASSERT(prevBuffer.status() == ImageFrame::FrameComplete);
        ImageFrame::DisposalMethod prevDisposalMethod = prevBuffer.disposalMethod();
        if (prevDisposalMethod == ImageFrame::DisposeKeep) { // Blend transparent pixels with pixels in previous canvas.
            for (int y = m_decodedHeight; y < decodedHeight; ++y) {
                m_blendFunction(buffer, prevBuffer, top + y, left, width);
            }
        } else if (prevDisposalMethod == ImageFrame::DisposeOverwriteBgcolor) {
            const IntRect& prevRect = prevBuffer.originalFrameRect();
            // We need to blend a transparent pixel with its value just after initFrame() call. That is:
            //   * Blend with fully transparent pixel if it belongs to prevRect <-- This is a no-op.
            //   * Blend with the pixel in the previous canvas otherwise <-- Needs alpha-blending.
            for (int y = m_decodedHeight; y < decodedHeight; ++y) {
                int canvasY = top + y;
                int left1, width1, left2, width2;
                findBlendRangeAtRow(frameRect, prevRect, canvasY, left1, width1, left2, width2);
                if (width1 > 0)
                    m_blendFunction(buffer, prevBuffer, canvasY, left1, width1);
                if (width2 > 0)
                    m_blendFunction(buffer, prevBuffer, canvasY, left2, width2);
            }
        }
    }

    m_decodedHeight = decodedHeight;
    buffer.setPixelsChanged(true);
}
Exemplo n.º 4
0
bool JPEGImageDecoder::outputScanlines()
{
    if (m_frameBufferCache.isEmpty())
        return false;

    // Initialize the framebuffer if needed.
    ImageFrame& buffer = m_frameBufferCache[0];
    if (buffer.status() == ImageFrame::FrameEmpty) {
        if (!buffer.setSize(scaledSize().width(), scaledSize().height()))
            return setFailed();
        buffer.setStatus(ImageFrame::FramePartial);
        // The buffer is transparent outside the decoded area while the image is
        // loading. The completed image will be marked fully opaque in jpegComplete().
        buffer.setHasAlpha(true);
        buffer.setColorProfile(m_colorProfile);

        // For JPEGs, the frame always fills the entire image.
        buffer.setOriginalFrameRect(IntRect(IntPoint(), size()));
    }

    jpeg_decompress_struct* info = m_reader->info();

#if !ENABLE(IMAGE_DECODER_DOWN_SAMPLING) && defined(TURBO_JPEG_RGB_SWIZZLE)
    if (turboSwizzled(info->out_color_space)) {
        ASSERT(!m_scaled);
        while (info->output_scanline < info->output_height) {
            unsigned char* row = reinterpret_cast<unsigned char*>(buffer.getAddr(0, info->output_scanline));
            if (jpeg_read_scanlines(info, &row, 1) != 1)
                return false;
#if USE(QCMSLIB)
            if (qcms_transform* transform = m_reader->colorTransform())
                qcms_transform_data_type(transform, row, row, info->output_width, rgbOutputColorSpace() == JCS_EXT_BGRA ? QCMS_OUTPUT_BGRX : QCMS_OUTPUT_RGBX);
#endif
         }
         return true;
     }
#endif

    JSAMPARRAY samples = m_reader->samples();

    while (info->output_scanline < info->output_height) {
        // jpeg_read_scanlines will increase the scanline counter, so we
        // save the scanline before calling it.
        int sourceY = info->output_scanline;
        /* Request one scanline.  Returns 0 or 1 scanlines. */
        if (jpeg_read_scanlines(info, samples, 1) != 1)
            return false;

        int destY = scaledY(sourceY);
        if (destY < 0)
            continue;
#if USE(QCMSLIB)
        if (m_reader->colorTransform() && info->out_color_space == JCS_RGB)
            qcms_transform_data(m_reader->colorTransform(), *samples, *samples, info->output_width);
#endif
        int width = m_scaled ? m_scaledColumns.size() : info->output_width;
        for (int x = 0; x < width; ++x) {
            JSAMPLE* jsample = *samples + (m_scaled ? m_scaledColumns[x] : x) * ((info->out_color_space == JCS_RGB) ? 3 : 4);
            if (info->out_color_space == JCS_RGB)
                buffer.setRGBA(x, destY, jsample[0], jsample[1], jsample[2], 0xFF);
#if defined(TURBO_JPEG_RGB_SWIZZLE)
            else if (info->out_color_space == JCS_EXT_RGBA)
                buffer.setRGBA(x, destY, jsample[0], jsample[1], jsample[2], 0xFF);
            else if (info->out_color_space == JCS_EXT_BGRA)
                buffer.setRGBA(x, destY, jsample[2], jsample[1], jsample[0], 0xFF);
#endif
            else if (info->out_color_space == JCS_CMYK) {
                // Source is 'Inverted CMYK', output is RGB.
                // See: http://www.easyrgb.com/math.php?MATH=M12#text12
                // Or:  http://www.ilkeratalay.com/colorspacesfaq.php#rgb
                // From CMYK to CMY:
                // X =   X    * (1 -   K   ) +   K  [for X = C, M, or Y]
                // Thus, from Inverted CMYK to CMY is:
                // X = (1-iX) * (1 - (1-iK)) + (1-iK) => 1 - iX*iK
                // From CMY (0..1) to RGB (0..1):
                // R = 1 - C => 1 - (1 - iC*iK) => iC*iK  [G and B similar]
                unsigned k = jsample[3];
                buffer.setRGBA(x, destY, jsample[0] * k / 255, jsample[1] * k / 255, jsample[2] * k / 255, 0xFF);
            } else {
                ASSERT_NOT_REACHED();
                return setFailed();
            }
        }
    }

    return true;
}