Esempio n. 1
0
void WEBPImageDecoder::applyColorProfile(const uint8_t* data, size_t size, ImageFrame& buffer)
{
    int width;
    int decodedHeight;
    if (!WebPIDecGetRGB(m_decoder, &decodedHeight, &width, 0, 0))
        return; // See also https://bugs.webkit.org/show_bug.cgi?id=74062
    if (decodedHeight <= 0)
        return;

    if (!m_haveReadProfile) {
        readColorProfile(data, size);
        m_haveReadProfile = true;
    }

    ASSERT(width == scaledSize().width());
    ASSERT(decodedHeight <= scaledSize().height());

    for (int y = m_decodedHeight; y < decodedHeight; ++y) {
        uint8_t* row = reinterpret_cast<uint8_t*>(buffer.getAddr(0, y));
        if (qcms_transform* transform = colorTransform())
            qcms_transform_data_type(transform, row, row, width, QCMS_OUTPUT_RGBX);
        uint8_t* pixel = row;
        for (int x = 0; x < width; ++x, pixel += 4)
            buffer.setRGBA(x, y, pixel[0], pixel[1], pixel[2], pixel[3]);
    }

    m_decodedHeight = decodedHeight;
}
Esempio n. 2
0
bool JPEGImageDecoder::outputScanlines()
{
    if (m_frameBufferCache.isEmpty())
        return false;

    // Initialize the framebuffer if needed.
    RGBA32Buffer& buffer = m_frameBufferCache[0];
    if (buffer.status() == RGBA32Buffer::FrameEmpty) {
        if (!buffer.setSize(scaledSize().width(), scaledSize().height()))
            return setFailed();
        buffer.setStatus(RGBA32Buffer::FramePartial);
        buffer.setHasAlpha(false);
        buffer.setColorProfile(m_colorProfile);

        // For JPEGs, the frame always fills the entire image.
        buffer.setRect(IntRect(IntPoint(), size()));
    }

    jpeg_decompress_struct* info = m_reader->info();
    JSAMPARRAY samples = m_reader->samples();

    while (info->output_scanline < info->output_height) {
        // jpeg_read_scanlines will increase the scanline counter, so we
        // save the scanline before calling it.
        int sourceY = info->output_scanline;
        /* Request one scanline.  Returns 0 or 1 scanlines. */
        if (jpeg_read_scanlines(info, samples, 1) != 1)
            return false;

        int destY = scaledY(sourceY);
        if (destY < 0)
            continue;
        int width = m_scaled ? m_scaledColumns.size() : info->output_width;
        for (int x = 0; x < width; ++x) {
            JSAMPLE* jsample = *samples + (m_scaled ? m_scaledColumns[x] : x) * ((info->out_color_space == JCS_RGB) ? 3 : 4);
            if (info->out_color_space == JCS_RGB)
                buffer.setRGBA(x, destY, jsample[0], jsample[1], jsample[2], 0xFF);
            else if (info->out_color_space == JCS_CMYK) {
                // Source is 'Inverted CMYK', output is RGB.
                // See: http://www.easyrgb.com/math.php?MATH=M12#text12
                // Or:  http://www.ilkeratalay.com/colorspacesfaq.php#rgb
                // From CMYK to CMY:
                // X =   X    * (1 -   K   ) +   K  [for X = C, M, or Y]
                // Thus, from Inverted CMYK to CMY is:
                // X = (1-iX) * (1 - (1-iK)) + (1-iK) => 1 - iX*iK
                // From CMY (0..1) to RGB (0..1):
                // R = 1 - C => 1 - (1 - iC*iK) => iC*iK  [G and B similar]
                unsigned k = jsample[3];
                buffer.setRGBA(x, destY, jsample[0] * k / 255, jsample[1] * k / 255, jsample[2] * k / 255, 0xFF);
            } else {
                ASSERT_NOT_REACHED();
                return setFailed();
            }
        }
    }

    return true;
}
QImage NemoThumbnailProvider::generateThumbnail(const QString &id, const QByteArray &hashData, const QSize &requestedSize, bool crop)
{
    QImage img;
    QSize originalSize;
    QByteArray format;

    // image was not in cache thus we read it
    QImageReader ir(id);
    if (!ir.canRead())
        return img;

    originalSize = ir.size();
    format = ir.format();

    if (originalSize != requestedSize && originalSize.isValid()) {
        if (crop) {
            // scales arbitrary sized source image to requested size scaling either up or down
            // keeping aspect ratio of the original image intact by maximizing either width or height
            // and cropping the rest of the image away
            QSize scaledSize(originalSize);

            // now scale it filling the original rectangle by keeping aspect ratio, but expand if needed.
            scaledSize.scale(requestedSize, Qt::KeepAspectRatioByExpanding);

            // set the adjusted clipping rectangle in the center of the scaled image
            QPoint center((scaledSize.width() - 1) / 2, (scaledSize.height() - 1) / 2);
            QRect cr(0,0,requestedSize.width(), requestedSize.height());
            cr.moveCenter(center);
            ir.setScaledClipRect(cr);

            // set requested target size of a thumbnail
            ir.setScaledSize(scaledSize);
        } else {
            // Maintains correct aspect ratio without cropping, as such the final image may
            // be smaller than requested in one dimension.
            QSize scaledSize(originalSize);
            scaledSize.scale(requestedSize, Qt::KeepAspectRatio);
            ir.setScaledSize(scaledSize);
        }
    }
    img = ir.read();

    NemoImageMetadata meta(id, format);
    if (meta.orientation() != NemoImageMetadata::TopLeft)
        img = rotate(img, meta.orientation());

    // write the scaled image to cache
    if (meta.orientation() != NemoImageMetadata::TopLeft ||
        (originalSize != requestedSize && originalSize.isValid())) {
        writeCacheFile(hashData, img);
    }

    TDEBUG() << Q_FUNC_INFO << "Wrote " << id << " to cache";
    return img;
}
bool JPEGImageDecoder::outputScanlines()
{
    if (m_frameBufferCache.isEmpty())
        return false;

    // Initialize the framebuffer if needed.
    ImageFrame& buffer = m_frameBufferCache[0];
    if (buffer.status() == ImageFrame::FrameEmpty) {
        if (!buffer.setSize(scaledSize().width(), scaledSize().height()))
            return setFailed();
        buffer.setStatus(ImageFrame::FramePartial);
        // The buffer is transparent outside the decoded area while the image is
        // loading. The completed image will be marked fully opaque in jpegComplete().
        buffer.setHasAlpha(true);
        buffer.setColorProfile(m_colorProfile);

        // For JPEGs, the frame always fills the entire image.
        buffer.setOriginalFrameRect(IntRect(IntPoint(), size()));
    }

    jpeg_decompress_struct* info = m_reader->info();

#if defined(TURBO_JPEG_RGB_SWIZZLE)
    if (!m_scaled && turboSwizzled(info->out_color_space)) {
        while (info->output_scanline < info->output_height) {
            unsigned char* row = reinterpret_cast<unsigned char*>(buffer.getAddr(0, info->output_scanline));
            if (jpeg_read_scanlines(info, &row, 1) != 1)
                return false;
#if USE(QCMSLIB)
            if (qcms_transform* transform = m_reader->colorTransform())
                qcms_transform_data_type(transform, row, row, info->output_width, rgbOutputColorSpace() == JCS_EXT_BGRA ? QCMS_OUTPUT_BGRX : QCMS_OUTPUT_RGBX);
#endif
         }
         return true;
     }
#endif

    switch (info->out_color_space) {
    // The code inside outputScanlines<int, bool> will be executed
    // for each pixel, so we want to avoid any extra comparisons there.
    // That is why we use template and template specializations here so
    // the proper code will be generated at compile time.
    case JCS_RGB:
        return outputScanlines<JCS_RGB>(buffer);
    case JCS_CMYK:
        return outputScanlines<JCS_CMYK>(buffer);
    default:
        ASSERT_NOT_REACHED();
    }

    return setFailed();
}
Esempio n. 5
0
QSize mutableSquareImageContainer::setScaledHeight(int heightHint) {

  if (scaledSize().isEmpty()) {
    const int newSquareSize = qMax(heightHint/heightSquareCount_, 2);
    const int newWidth = newSquareSize * widthSquareCount_;
    const int newHeight = newSquareSize * heightSquareCount_;
    const QSize newSize(newWidth, newHeight);
    imageContainer::setScaledSize(newSize);
    return newSize;
  }
  else {
    return scaledSize();
  }
}
Esempio n. 6
0
void QGLTextureCubePrivate::bindImages(QGLTexture2DTextureInfo *info)
{
    QSize scaledSize(size);
#if defined(QT_OPENGL_ES_2)
    if ((bindOptions & QGLTexture2D::MipmapBindOption) ||
            horizontalWrap != QGL::ClampToEdge ||
            verticalWrap != QGL::ClampToEdge) {
        // ES 2.0 does not support NPOT textures when mipmaps are in use,
        // or if the wrap mode isn't ClampToEdge.
        scaledSize = QGL::nextPowerOfTwo(scaledSize);
    }
#endif

    // Handle the first face.
    if (!image.isNull())
        info->tex.uploadFace(GL_TEXTURE_CUBE_MAP_POSITIVE_X, image, scaledSize);
    else if (size.isValid())
        info->tex.createFace(GL_TEXTURE_CUBE_MAP_POSITIVE_X, scaledSize);

    // Handle the other faces.
    for (int face = 1; face < 6; ++face) {
        if (!otherImages[face - 1].isNull()) {
            info->tex.uploadFace(GL_TEXTURE_CUBE_MAP_POSITIVE_X + face,
                                 otherImages[face - 1], scaledSize);
        } else {
            info->tex.createFace(GL_TEXTURE_CUBE_MAP_POSITIVE_X + face, scaledSize);
        }
    }
}
FloatSize VisualViewport::visibleSize() const
{
    FloatSize scaledSize(m_size);
    scaledSize.expand(0, m_topControlsAdjustment);
    scaledSize.scale(1 / m_scale);
    return scaledSize;
}
Esempio n. 8
0
QImage mutableSquareImageContainer::scaledImage() const {

  QHash<triC, QImage> colorSquares;
  const QVector<triC>& squareColors = colors();
  const int curDimension = scaledDimension();
  const grid baseImage(image(), originalDimension_);
  QImage squareImage(curDimension, curDimension, QImage::Format_RGB32);
  for (int i = 0, size = squareColors.size(); i < size; ++i) {
    const triC& thisImageColor = squareColors[i];
    squareImage.fill(thisImageColor.qrgb());
    colorSquares[thisImageColor] = squareImage;
  }

  QImage returnImage(scaledSize(), QImage::Format_RGB32);
  QPainter painter(&returnImage);
  for (int yBox = 0; yBox < heightSquareCount_; ++yBox) {
    for (int xBox = 0; xBox < widthSquareCount_; ++xBox) {
      const triC& thisSquareColor =
        baseImage(xBox*originalDimension_, yBox*originalDimension_);
      painter.drawImage(QPoint(xBox*curDimension, yBox*curDimension),
                        colorSquares[thisSquareColor]);

    }
  }
  return returnImage;
}
Esempio n. 9
0
QSize mutableSquareImageContainer::setScaledSize(const QSize& sizeHint) {

  if (sizeHint.isEmpty()) {
    imageContainer::setScaledSize(sizeHint);
    return sizeHint;
  }

  // adjust sizeHint (smaller) to make the dimensions multiples of a
  // square size
  // yup - we ignore the sizeHint height
  if (scaledSize().isEmpty()) {
    return setScaledWidth(sizeHint.width());
  }
  else {
    return scaledSize();
  }
}
Esempio n. 10
0
	Size GeomUtils::outsideSize(Node* node) {
		const Size& size = scaledSize(node);
		float r = node->getRotation() * M_PI / 180.0f;
		float c = fabsf(cosf(r));
		float s = fabsf(sinf(r));
		return Size(c * size.width + s * size.height,
								s * size.width + c * size.height);
	}
Esempio n. 11
0
void BitmapImage::draw(GraphicsContext* context, const FloatRect& dst, const FloatRect& src, ColorSpace styleColorSpace, CompositeOperator op,
                       BlendMode, RespectImageOrientationEnum shouldRespectImageOrientation)
{
    if (!dst.width() || !dst.height() || !src.width() || !src.height())
        return;

    startAnimation();

    NativeImageCairo* nativeImage = frameAtIndex(m_currentFrame);
    if (!nativeImage) // If it's too early we won't have an image yet.
        return;

    if (mayFillWithSolidColor()) {
        fillWithSolidColor(context, dst, solidColor(), styleColorSpace, op);
        return;
    }

    context->save();

    // Set the compositing operation.
    if (op == CompositeSourceOver && !frameHasAlphaAtIndex(m_currentFrame))
        context->setCompositeOperation(CompositeCopy);
    else
        context->setCompositeOperation(op);

#if ENABLE(IMAGE_DECODER_DOWN_SAMPLING)
    cairo_surface_t* surface = nativeImage->surface();
    IntSize scaledSize(cairo_image_surface_get_width(surface), cairo_image_surface_get_height(surface));
    FloatRect adjustedSrcRect = adjustSourceRectForDownSampling(src, scaledSize);
#else
    FloatRect adjustedSrcRect(src);
#endif

    ImageOrientation orientation = DefaultImageOrientation;
    if (shouldRespectImageOrientation == RespectImageOrientation)
        orientation = frameOrientationAtIndex(m_currentFrame);

    FloatRect dstRect = dst;

    if (orientation != DefaultImageOrientation) {
        // ImageOrientation expects the origin to be at (0, 0).
        context->translate(dstRect.x(), dstRect.y());
        dstRect.setLocation(FloatPoint());
        context->concatCTM(orientation.transformFromDefault(dstRect.size()));
        if (orientation.usesWidthAsHeight()) {
            // The destination rectangle will have it's width and height already reversed for the orientation of
            // the image, as it was needed for page layout, so we need to reverse it back here.
            dstRect = FloatRect(dstRect.x(), dstRect.y(), dstRect.height(), dstRect.width());
        }
    }

    context->platformContext()->drawSurfaceToContext(nativeImage->surface(), dstRect, adjustedSrcRect, context);

    context->restore();

    if (imageObserver())
        imageObserver()->didDraw(this);
}
Esempio n. 12
0
void Image::drawPattern(GraphicsContext* ctxt, const FloatRect& tileRect, const AffineTransform& patternTransform,
                        const FloatPoint& phase, ColorSpace, CompositeOperator op, const FloatRect& destRect)
{
    QPixmap* framePixmap = nativeImageForCurrentFrame();
    if (!framePixmap) // If it's too early we won't have an image yet.
        return;

    // Qt interprets 0 width/height as full width/height so just short circuit.
    QRectF dr = QRectF(destRect).normalized();
    QRect tr = QRectF(tileRect).toRect().normalized();
    if (!dr.width() || !dr.height() || !tr.width() || !tr.height())
        return;

    QPixmap pixmap = *framePixmap;
    if (tr.x() || tr.y() || tr.width() != pixmap.width() || tr.height() != pixmap.height())
        pixmap = pixmap.copy(tr);

    CompositeOperator previousOperator = ctxt->compositeOperation();

    ctxt->setCompositeOperation(!pixmap.hasAlpha() && op == CompositeSourceOver ? CompositeCopy : op);

    QPainter* p = ctxt->platformContext();
    QTransform transform(patternTransform);

    // If this would draw more than one scaled tile, we scale the pixmap first and then use the result to draw.
    if (transform.type() == QTransform::TxScale) {
        QRectF tileRectInTargetCoords = (transform * QTransform().translate(phase.x(), phase.y())).mapRect(tr);

        bool tileWillBePaintedOnlyOnce = tileRectInTargetCoords.contains(dr);
        if (!tileWillBePaintedOnlyOnce) {
            QSizeF scaledSize(float(pixmap.width()) * transform.m11(), float(pixmap.height()) * transform.m22());
            QPixmap scaledPixmap(scaledSize.toSize());
            if (pixmap.hasAlpha())
                scaledPixmap.fill(Qt::transparent);
            {
                QPainter painter(&scaledPixmap);
                painter.setCompositionMode(QPainter::CompositionMode_Source);
                painter.setRenderHints(p->renderHints());
                painter.drawPixmap(QRect(0, 0, scaledPixmap.width(), scaledPixmap.height()), pixmap);
            }
            pixmap = scaledPixmap;
            transform = QTransform::fromTranslate(transform.dx(), transform.dy());
        }
    }

    /* Translate the coordinates as phase is not in world matrix coordinate space but the tile rect origin is. */
    transform *= QTransform().translate(phase.x(), phase.y());
    transform.translate(tr.x(), tr.y());

    QBrush b(pixmap);
    b.setTransform(transform);
    p->fillRect(dr, b);

    ctxt->setCompositeOperation(previousOperator);

    if (imageObserver())
        imageObserver()->didDraw(this);
}
Esempio n. 13
0
PassRefPtr<Uint8ClampedArray> FilterEffect::asPremultipliedImage(const IntRect& rect)
{
    IntSize scaledSize(rect.size());
    ASSERT(!ImageBuffer::sizeNeedsClamping(scaledSize));
    scaledSize.scale(m_filter.filterScale());
    RefPtr<Uint8ClampedArray> imageData = Uint8ClampedArray::createUninitialized(scaledSize.width() * scaledSize.height() * 4);
    copyPremultipliedImage(imageData.get(), rect);
    return imageData.release();
}
Esempio n. 14
0
PassRefPtr<Uint8ClampedArray> FilterEffect::asPremultipliedImage(const IntRect& rect)
{
    ASSERT(isFilterSizeValid(rect));
    IntSize scaledSize(rect.size());
    scaledSize.scale(m_filter->filterScale());
    RefPtr<Uint8ClampedArray> imageData = Uint8ClampedArray::createUninitialized(scaledSize.width() * scaledSize.height() * 4);
    copyPremultipliedImage(imageData.get(), rect);
    return imageData.release();
}
Esempio n. 15
0
bool AcceleratedSurface::resize(const IntSize& size)
{
    IntSize scaledSize(size);
    scaledSize.scale(m_webPage.deviceScaleFactor());
    if (scaledSize == m_size)
        return false;

    m_size = scaledSize;
    return true;
}
Esempio n. 16
0
Image* SVGImageCache::lookupOrCreateBitmapImageForRenderer(const RenderObject* renderer)
{
    if (!renderer)
        return Image::nullImage();

    const CachedImageClient* client = renderer;

    // The cache needs to know the size of the renderer before querying an image for it.
    SizeAndScalesMap::iterator sizeIt = m_sizeAndScalesMap.find(renderer);
    if (sizeIt == m_sizeAndScalesMap.end())
        return Image::nullImage();

    IntSize size = sizeIt->second.size;
    float zoom = sizeIt->second.zoom;
    float scale = sizeIt->second.scale;

    // FIXME (85335): This needs to take CSS transform scale into account as well.
    Page* page = renderer->document()->page();
    if (!scale)
        scale = page->deviceScaleFactor() * page->pageScaleFactor();

    ASSERT(!size.isEmpty());

    // Lookup image for client in cache and eventually update it.
    ImageDataMap::iterator it = m_imageDataMap.find(client);
    if (it != m_imageDataMap.end()) {
        ImageData& data = it->second;

        // Common case: image size & zoom remained the same.
        if (data.sizeAndScales.size == size && data.sizeAndScales.zoom == zoom && data.sizeAndScales.scale == scale)
            return data.image.get();

        // If the image size for the client changed, we have to delete the buffer, remove the item from the cache and recreate it.
        delete data.buffer;
        m_imageDataMap.remove(it);
    }

    FloatSize scaledSize(size);
    scaledSize.scale(scale);

    // Create and cache new image and image buffer at requested size.
    OwnPtr<ImageBuffer> newBuffer = ImageBuffer::create(expandedIntSize(scaledSize), 1);
    if (!newBuffer)
        return Image::nullImage();

    m_svgImage->drawSVGToImageBuffer(newBuffer.get(), size, zoom, scale, SVGImage::DontClearImageBuffer);

    RefPtr<Image> newImage = newBuffer->copyImage(CopyBackingStore);
    Image* newImagePtr = newImage.get();
    ASSERT(newImagePtr);

    m_imageDataMap.add(client, ImageData(newBuffer.leakPtr(), newImage.release(), SizeAndScales(size, zoom, scale)));
    return newImagePtr;
}
Esempio n. 17
0
void nsMediaDecoder::Invalidate()
{
  if (!mElement)
    return;

  nsIFrame* frame = mElement->GetPrimaryFrame();
  PRBool invalidateFrame = PR_FALSE;

  {
    nsAutoLock lock(mVideoUpdateLock);

    // Get mImageContainerSizeChanged while holding the lock.
    invalidateFrame = mImageContainerSizeChanged;
    mImageContainerSizeChanged = PR_FALSE;

    if (mSizeChanged) {
      nsIntSize scaledSize(mRGBWidth, mRGBHeight);
      // Apply the aspect ratio to produce the intrinsic size we report
      // to the element.
      if (mPixelAspectRatio > 1.0) {
        // Increase the intrinsic width
        scaledSize.width =
          ConditionDimension(mPixelAspectRatio*scaledSize.width, scaledSize.width);
      } else {
        // Increase the intrinsic height
        scaledSize.height =
          ConditionDimension(scaledSize.height/mPixelAspectRatio, scaledSize.height);
      }
      mElement->UpdateMediaSize(scaledSize);

      mSizeChanged = PR_FALSE;
      if (frame) {
        nsPresContext* presContext = frame->PresContext();
        nsIPresShell *presShell = presContext->PresShell();
        presShell->FrameNeedsReflow(frame,
                                    nsIPresShell::eStyleChange,
                                    NS_FRAME_IS_DIRTY);
      }
    }
  }

  if (frame) {
    nsRect contentRect = frame->GetContentRect() - frame->GetPosition();
    if (invalidateFrame) {
      frame->Invalidate(contentRect);
    } else {
      frame->InvalidateLayer(contentRect, nsDisplayItem::TYPE_VIDEO);
    }
  }

#ifdef MOZ_SVG
  nsSVGEffects::InvalidateDirectRenderingObservers(mElement);
#endif
}
Esempio n. 18
0
bool ImageBuffer::sizeNeedsClamping(const FloatSize& size, FloatSize& scale)
{
    FloatSize scaledSize(size);
    scaledSize.scale(scale.width(), scale.height());

    if (!sizeNeedsClamping(scaledSize))
        return false;

    // The area of scaled size is bigger than the upper limit, adjust the scale to fit.
    scale.scale(sqrtf(MaxClampedArea / (scaledSize.width() * scaledSize.height())));
    ASSERT(!sizeNeedsClamping(size, scale));
    return true;
}
bool RenderSVGResourceFilter::fitsInMaximumImageSize(const FloatSize& size, FloatSize& scale)
{
    FloatSize scaledSize(size);
    scaledSize.scale(scale.width(), scale.height());
    float scaledArea = scaledSize.width() * scaledSize.height();

    if (scaledArea <= FilterEffect::maxFilterArea())
        return true;

    // If area of scaled size is bigger than the upper limit, adjust the scale
    // to fit.
    scale.scale(sqrt(FilterEffect::maxFilterArea() / scaledArea));
    return false;
}
Esempio n. 20
0
bool GIFImageDecoder::frameComplete(unsigned frameIndex, unsigned frameDuration, RGBA32Buffer::FrameDisposalMethod disposalMethod)
{
    // Initialize the frame if necessary.  Some GIFs insert do-nothing frames,
    // in which case we never reach haveDecodedRow() before getting here.
    RGBA32Buffer& buffer = m_frameBufferCache[frameIndex];
    if ((buffer.status() == RGBA32Buffer::FrameEmpty) && !initFrameBuffer(frameIndex))
        return false; // initFrameBuffer() has already called setFailed().

    buffer.setStatus(RGBA32Buffer::FrameComplete);
    buffer.setDuration(frameDuration);
    buffer.setDisposalMethod(disposalMethod);

	// apollo integrate
	// RHU: need to test bug: 1796134

    if (!m_currentBufferSawAlpha) {
        // The whole frame was non-transparent, so it's possible that the entire
        // resulting buffer was non-transparent, and we can setHasAlpha(false).
        if (buffer.rect().contains(IntRect(IntPoint(), scaledSize())))
            buffer.setHasAlpha(false);
        else if (frameIndex) {
            // Tricky case.  This frame does not have alpha only if everywhere
            // outside its rect doesn't have alpha.  To know whether this is
            // true, we check the start state of the frame -- if it doesn't have
            // alpha, we're safe.
            //
            // First skip over prior DisposeOverwritePrevious frames (since they
            // don't affect the start state of this frame) the same way we do in
            // initFrameBuffer().
            const RGBA32Buffer* prevBuffer = &m_frameBufferCache[--frameIndex];
            while (frameIndex && (prevBuffer->disposalMethod() == RGBA32Buffer::DisposeOverwritePrevious))
                prevBuffer = &m_frameBufferCache[--frameIndex];

            // Now, if we're at a DisposeNotSpecified or DisposeKeep frame, then
            // we can say we have no alpha if that frame had no alpha.  But
            // since in initFrameBuffer() we already copied that frame's alpha
            // state into the current frame's, we need do nothing at all here.
            //
            // The only remaining case is a DisposeOverwriteBgcolor frame.  If
            // it had no alpha, and its rect is contained in the current frame's
            // rect, we know the current frame has no alpha.
            if ((prevBuffer->disposalMethod() == RGBA32Buffer::DisposeOverwriteBgcolor) && !prevBuffer->hasAlpha() && buffer.rect().contains(prevBuffer->rect()))
                buffer.setHasAlpha(false);
        }
    }

    return true;
}
Esempio n. 21
0
void QGLTexture2DPrivate::bindImages(QGLTexture2DTextureInfo *info)
{
    QSize scaledSize(size);
#if defined(QT_OPENGL_ES_2)
    if ((bindOptions & QGLContext::MipmapBindOption) ||
            horizontalWrap != QGL::ClampToEdge ||
            verticalWrap != QGL::ClampToEdge) {
        // ES 2.0 does not support NPOT textures when mipmaps are in use,
        // or if the wrap mode isn't ClampToEdge.
        scaledSize = QGL::nextPowerOfTwo(scaledSize);
    }
#endif
    if (!image.isNull())
        info->tex.uploadFace(GL_TEXTURE_2D, image, scaledSize);
    else if (size.isValid())
        info->tex.createFace(GL_TEXTURE_2D, scaledSize);
}
Esempio n. 22
0
void RedirectedXCompositeWindow::resize(const IntSize& size)
{
    IntSize scaledSize(size);
    scaledSize.scale(m_webPage.deviceScaleFactor());
    if (scaledSize == m_size)
        return;

    // Resize the window to at last 1x1 since X doesn't allow to create empty windows.
    XResizeWindow(m_display, m_window.get(), std::max(1, scaledSize.width()), std::max(1, scaledSize.height()));
    XFlush(m_display);

    m_size = scaledSize;
    if (m_size.isEmpty())
        cleanupPixmapAndPixmapSurface();
    else
        createNewPixampAndPixampSurface();
}
Esempio n. 23
0
std::unique_ptr<ImageBuffer> GraphicsContext::createCompatibleBuffer(const FloatSize& size, bool hasAlpha) const
{
    // Make the buffer larger if the context's transform is scaling it so we need a higher
    // resolution than one pixel per unit. Also set up a corresponding scale factor on the
    // graphics context.

    AffineTransform transform = getCTM(DefinitelyIncludeDeviceScale);
    FloatSize scaledSize(static_cast<int>(ceil(size.width() * transform.xScale())), static_cast<int>(ceil(size.height() * transform.yScale())));

    std::unique_ptr<ImageBuffer> buffer = ImageBuffer::createCompatibleBuffer(scaledSize, 1, ColorSpaceSRGB, *this, hasAlpha);
    if (!buffer)
        return nullptr;

    buffer->context().scale(FloatSize(scaledSize.width() / size.width(), scaledSize.height() / size.height()));

    return buffer;
}
PassOwnPtr<ImageBuffer> GraphicsContext::createCompatibleBuffer(const IntSize& size) const
{
    // Make the buffer larger if the context's transform is scaling it so we need a higher
    // resolution than one pixel per unit. Also set up a corresponding scale factor on the
    // graphics context.

    AffineTransform transform = getCTM(DefinitelyIncludeDeviceScale);
    IntSize scaledSize(static_cast<int>(ceil(size.width() * transform.xScale())), static_cast<int>(ceil(size.height() * transform.yScale())));

    OwnPtr<ImageBuffer> buffer = ImageBuffer::create(scaledSize, 1, ColorSpaceDeviceRGB, isAcceleratedContext() ? Accelerated : Unaccelerated);
    if (!buffer)
        return nullptr;

    buffer->context()->scale(FloatSize(static_cast<float>(scaledSize.width()) / size.width(),
        static_cast<float>(scaledSize.height()) / size.height()));

    return buffer.release();
}
Esempio n. 25
0
void ImageDecoderQt::internalDecodeSize()
{
    ASSERT(m_reader);

    // If we have a QSize() something failed
    QSize size = m_reader->size();
    if (size.isEmpty()) {
        setFailed();
        return clearPointers();
    }

    setSize(size.width(), size.height());

    // We don't need the tables set by prepareScaleDataIfNecessary,
    // but their dimensions are used by ImageDecoder::scaledSize().
    prepareScaleDataIfNecessary();
    if (m_scaled)
        m_reader->setScaledSize(scaledSize());
}
IntPoint VisualViewport::clampDocumentOffsetAtScale(const IntPoint& offset, float scale)
{
    if (!mainFrame() || !mainFrame()->view())
        return IntPoint();

    FrameView* view = mainFrame()->view();

    FloatSize scaledSize(m_size);
    scaledSize.scale(1 / scale);

    IntPoint visualViewportMax = flooredIntPoint(FloatSize(contentsSize()) - scaledSize);
    IntPoint max = view->maximumScrollPosition() + visualViewportMax;
    IntPoint min = view->minimumScrollPosition(); // VisualViewportMin should be (0, 0)

    IntPoint clamped = offset;
    clamped = clamped.shrunkTo(max);
    clamped = clamped.expandedTo(min);
    return clamped;
}
Esempio n. 27
0
void testScaled( const std::string & filename, const Vector3<uint_t> & size, const std::string & datatype, const bool vtkOut )
{
   WALBERLA_LOG_INFO( "Testing scaled" );
   BinaryRawFile brf( filename, size, datatype );

   Vector3<uint_t> scaledSize( std::max( uint_t( 1 ), size[0] / uint_t( 2 ) ),
                               std::max( uint_t( 1 ), size[1] / uint_t( 3 ) ),
                               std::max( uint_t( 1 ), size[2] / uint_t( 5 ) ) );

   auto blocks = blockforest::createUniformBlockGrid( uint_t( 1 ), uint_t( 1 ), uint_t( 1 ),
      scaledSize[0], scaledSize[1], scaledSize[2],
      real_t( 1 ),
      uint_t( 1 ), uint_t( 1 ), uint_t( 1 ) );

   BinaryRawFileInterpolator brfi( blocks->getDomain(), brf, BinaryRawFileInterpolator::NEAREST_NEIGHBOR );

   typedef GhostLayerField< uint8_t, uint_t( 1 ) > ScalarField;

   BlockDataID scalarFieldID = field::addToStorage<ScalarField>( blocks, "BinaryRawFile" );

   for (auto & block : *blocks)
   {
      auto field = block.getData<ScalarField>( scalarFieldID );

      CellInterval ci( 0, 0, 0, cell_idx_c( scaledSize[0] ) - 1, cell_idx_c( scaledSize[1] ) - 1, cell_idx_c( scaledSize[2] ) - 1 );

      for (const Cell & c : ci)
      {
         auto pos = blocks->getBlockLocalCellCenter( block, c );
         field->get( c ) = brfi.get( pos );
      }
   }

   if (vtkOut)
   {
      WALBERLA_LOG_INFO( "Writing scaled" );
      auto vtkOutput = vtk::createVTKOutput_BlockData( blocks, "BinaryRawFileScaled" );
      vtkOutput->addCellDataWriter( make_shared< field::VTKWriter< ScalarField, uint8_t > >( scalarFieldID, "BinaryRawFile" ) );
      writeFiles( vtkOutput, true )();
   }
}
Esempio n. 28
0
FloatRect PinchViewport::visibleRect() const
{
    FloatSize scaledSize(m_size);
    scaledSize.scale(1 / m_scale);
    return FloatRect(m_offset, scaledSize);
}
Esempio n. 29
0
void PNGImageDecoder::rowAvailable(unsigned char* rowBuffer, unsigned rowIndex, int interlacePass)
{
    if (m_frameBufferCache.isEmpty())
        return;

    // Initialize the framebuffer if needed.
    ImageFrame& buffer = m_frameBufferCache[0];
    if (buffer.status() == ImageFrame::FrameEmpty) {
        if (!buffer.setSize(scaledSize().width(), scaledSize().height())) {
            longjmp(JMPBUF(m_reader->pngPtr()), 1);
            return;
        }
        buffer.setStatus(ImageFrame::FramePartial);
        buffer.setHasAlpha(false);
        buffer.setColorProfile(m_colorProfile);

        // For PNGs, the frame always fills the entire image.
        buffer.setOriginalFrameRect(IntRect(IntPoint(), size()));

        if (png_get_interlace_type(m_reader->pngPtr(), m_reader->infoPtr()) != PNG_INTERLACE_NONE)
            m_reader->createInterlaceBuffer((m_reader->hasAlpha() ? 4 : 3) * size().width() * size().height());
    }

    if (!rowBuffer)
        return;

    // libpng comments (pasted in here to explain what follows)
    /*
     * this function is called for every row in the image.  If the
     * image is interlacing, and you turned on the interlace handler,
     * this function will be called for every row in every pass.
     * Some of these rows will not be changed from the previous pass.
     * When the row is not changed, the new_row variable will be NULL.
     * The rows and passes are called in order, so you don't really
     * need the row_num and pass, but I'm supplying them because it
     * may make your life easier.
     *
     * For the non-NULL rows of interlaced images, you must call
     * png_progressive_combine_row() passing in the row and the
     * old row.  You can call this function for NULL rows (it will
     * just return) and for non-interlaced images (it just does the
     * memcpy for you) if it will make the code easier.  Thus, you
     * can just do this for all cases:
     *
     *    png_progressive_combine_row(png_ptr, old_row, new_row);
     *
     * where old_row is what was displayed for previous rows.  Note
     * that the first pass (pass == 0 really) will completely cover
     * the old row, so the rows do not have to be initialized.  After
     * the first pass (and only for interlaced images), you will have
     * to pass the current row, and the function will combine the
     * old row and the new row.
     */

    png_structp png = m_reader->pngPtr();
    bool hasAlpha = m_reader->hasAlpha();
    unsigned colorChannels = hasAlpha ? 4 : 3;
    png_bytep row;
    png_bytep interlaceBuffer = m_reader->interlaceBuffer();
    if (interlaceBuffer) {
        row = interlaceBuffer + (rowIndex * colorChannels * size().width());
        png_progressive_combine_row(png, row, rowBuffer);
    } else
        row = rowBuffer;

    // Copy the data into our buffer.
    int width = scaledSize().width();
    int destY = scaledY(rowIndex);

    // Check that the row is within the image bounds. LibPNG may supply an extra row.
    if (destY < 0 || destY >= scaledSize().height())
        return;
    bool nonTrivialAlpha = false;
    for (int x = 0; x < width; ++x) {
        png_bytep pixel = row + (m_scaled ? m_scaledColumns[x] : x) * colorChannels;
        unsigned alpha = hasAlpha ? pixel[3] : 255;
        buffer.setRGBA(x, destY, pixel[0], pixel[1], pixel[2], alpha);
        nonTrivialAlpha |= alpha < 255;
    }
    if (nonTrivialAlpha && !buffer.hasAlpha())
        buffer.setHasAlpha(nonTrivialAlpha);
}
Esempio n. 30
0
void PNGImageDecoder::rowAvailable(unsigned char* rowBuffer, unsigned rowIndex, int)
{
    if (m_frameBufferCache.isEmpty())
        return;

    // Initialize the framebuffer if needed.
    ImageFrame& buffer = m_frameBufferCache[0];
    if (buffer.status() == ImageFrame::FrameEmpty) {
        png_structp png = m_reader->pngPtr();
        if (!buffer.setSize(scaledSize().width(), scaledSize().height())) {
            longjmp(JMPBUF(png), 1);
            return;
        }

        unsigned colorChannels = m_reader->hasAlpha() ? 4 : 3;
        if (PNG_INTERLACE_ADAM7 == png_get_interlace_type(png, m_reader->infoPtr())) {
            m_reader->createInterlaceBuffer(colorChannels * size().width() * size().height());
            if (!m_reader->interlaceBuffer()) {
                longjmp(JMPBUF(png), 1);
                return;
            }
        }

#if USE(QCMSLIB)
        if (m_reader->colorTransform()) {
            m_reader->createRowBuffer(colorChannels * size().width());
            if (!m_reader->rowBuffer()) {
                longjmp(JMPBUF(png), 1);
                return;
            }
        }
#endif
        buffer.setStatus(ImageFrame::FramePartial);
        buffer.setHasAlpha(false);
        buffer.setColorProfile(m_colorProfile);

        // For PNGs, the frame always fills the entire image.
        buffer.setOriginalFrameRect(IntRect(IntPoint(), size()));
    }

    /* libpng comments (here to explain what follows).
     *
     * this function is called for every row in the image.  If the
     * image is interlacing, and you turned on the interlace handler,
     * this function will be called for every row in every pass.
     * Some of these rows will not be changed from the previous pass.
     * When the row is not changed, the new_row variable will be NULL.
     * The rows and passes are called in order, so you don't really
     * need the row_num and pass, but I'm supplying them because it
     * may make your life easier.
     */

    // Nothing to do if the row is unchanged, or the row is outside
    // the image bounds: libpng may send extra rows, ignore them to
    // make our lives easier.
    if (!rowBuffer)
        return;
    int y = !m_scaled ? rowIndex : scaledY(rowIndex);
    if (y < 0 || y >= scaledSize().height())
        return;

    /* libpng comments (continued).
     *
     * For the non-NULL rows of interlaced images, you must call
     * png_progressive_combine_row() passing in the row and the
     * old row.  You can call this function for NULL rows (it will
     * just return) and for non-interlaced images (it just does the
     * memcpy for you) if it will make the code easier.  Thus, you
     * can just do this for all cases:
     *
     *    png_progressive_combine_row(png_ptr, old_row, new_row);
     *
     * where old_row is what was displayed for previous rows.  Note
     * that the first pass (pass == 0 really) will completely cover
     * the old row, so the rows do not have to be initialized.  After
     * the first pass (and only for interlaced images), you will have
     * to pass the current row, and the function will combine the
     * old row and the new row.
     */

    bool hasAlpha = m_reader->hasAlpha();
    unsigned colorChannels = hasAlpha ? 4 : 3;
    png_bytep row = rowBuffer;

    if (png_bytep interlaceBuffer = m_reader->interlaceBuffer()) {
        row = interlaceBuffer + (rowIndex * colorChannels * size().width());
        png_progressive_combine_row(m_reader->pngPtr(), row, rowBuffer);
    }

#if USE(QCMSLIB)
    if (qcms_transform* transform = m_reader->colorTransform()) {
        qcms_transform_data(transform, row, m_reader->rowBuffer(), size().width());
        row = m_reader->rowBuffer();
    }
#endif

    // Write the decoded row pixels to the frame buffer.
    ImageFrame::PixelData* address = buffer.getAddr(0, y);
    int width = scaledSize().width();
    unsigned char nonTrivialAlphaMask = 0;

#if ENABLE(IMAGE_DECODER_DOWN_SAMPLING)
    if (m_scaled) {
        for (int x = 0; x < width; ++x) {
            png_bytep pixel = row + m_scaledColumns[x] * colorChannels;
            unsigned alpha = hasAlpha ? pixel[3] : 255;
            buffer.setRGBA(address++, pixel[0], pixel[1], pixel[2], alpha);
            nonTrivialAlphaMask |= (255 - alpha);
        }
    } else
#endif
    {
        png_bytep pixel = row;
        if (hasAlpha) {
            if (buffer.premultiplyAlpha()) {
                for (int x = 0; x < width; ++x, pixel += 4)
                    setPixelPremultipliedRGBA(address++, pixel, nonTrivialAlphaMask);
            } else {
                for (int x = 0; x < width; ++x, pixel += 4)
                    setPixelRGBA(address++, pixel, nonTrivialAlphaMask);
            }
        } else {
            for (int x = 0; x < width; ++x, pixel += 3)
                setPixelRGB(address++, pixel);
        }
    }


    if (nonTrivialAlphaMask && !buffer.hasAlpha())
        buffer.setHasAlpha(true);
}