static inline bool createMaskAndSwapContextForTextGradient(GraphicsContext*& context,
                                                           GraphicsContext*& savedContext,
                                                           OwnPtr<ImageBuffer>& imageBuffer,
                                                           RenderObject* object)
{
    RenderObject* textRootBlock = RenderSVGText::locateRenderSVGTextAncestor(object);
    ASSERT(textRootBlock);

    AffineTransform absoluteTransform;
    SVGImageBufferTools::calculateTransformationToOutermostSVGCoordinateSystem(textRootBlock, absoluteTransform);

    FloatRect absoluteTargetRect = absoluteTransform.mapRect(textRootBlock->repaintRectInLocalCoordinates());
    FloatRect clampedAbsoluteTargetRect = SVGImageBufferTools::clampedAbsoluteTargetRectForRenderer(textRootBlock, absoluteTargetRect);
    if (clampedAbsoluteTargetRect.isEmpty())
        return false;

    OwnPtr<ImageBuffer> maskImage;
    if (!SVGImageBufferTools::createImageBuffer(absoluteTargetRect, clampedAbsoluteTargetRect, maskImage, ColorSpaceDeviceRGB))
        return false;

    GraphicsContext* maskImageContext = maskImage->context();
    ASSERT(maskImageContext);

    maskImageContext->translate(-clampedAbsoluteTargetRect.x(), -clampedAbsoluteTargetRect.y());
    maskImageContext->concatCTM(absoluteTransform);

    ASSERT(maskImage);
    savedContext = context;
    context = maskImageContext;
    imageBuffer = maskImage.release();
    return true;
}
void BeginTransform3DDisplayItem::replay(GraphicsContext& context) const
{
    TransformationMatrix transform(m_transform);
    transform.applyTransformOrigin(m_transformOrigin);
    context.save();
    context.concatCTM(transform.toAffineTransform());
}
Beispiel #3
0
PassRefPtr<SkImageFilter> FEImage::createImageFilterForRenderer(RenderObject* renderer, SkiaImageFilterBuilder* builder)
{
    FloatRect dstRect = filterPrimitiveSubregion();

    AffineTransform transform;
    SVGElement* contextNode = toSVGElement(renderer->node());

    if (contextNode->hasRelativeLengths()) {
        SVGLengthContext lengthContext(contextNode);
        FloatSize viewportSize;

        // If we're referencing an element with percentage units, eg. <rect with="30%"> those values were resolved against the viewport.
        // Build up a transformation that maps from the viewport space to the filter primitive subregion.
        if (lengthContext.determineViewport(viewportSize))
            transform = makeMapBetweenRects(FloatRect(FloatPoint(), viewportSize), dstRect);
    } else {
        transform.translate(dstRect.x(), dstRect.y());
    }

    GraphicsContext* context = builder->context();
    if (!context)
        return adoptRef(SkBitmapSource::Create(SkBitmap()));
    AffineTransform contentTransformation;
    context->save();
    context->beginRecording(FloatRect(FloatPoint(), dstRect.size()));
    context->concatCTM(transform);
    SVGRenderingContext::renderSubtree(context, renderer, contentTransformation);
    RefPtr<DisplayList> displayList = context->endRecording();
    context->restore();
    RefPtr<SkImageFilter> result = adoptRef(SkPictureImageFilter::Create(displayList->picture(), dstRect));
    return result.release();
}
static inline bool createMaskAndSwapContextForTextGradient(
    GraphicsContext*& context, GraphicsContext*& savedContext,
    OwnPtr<ImageBuffer>& imageBuffer, const RenderObject* object)
{
    FloatRect maskBBox = const_cast<RenderObject*>(findTextRootObject(object))->relativeBBox(false);
    IntRect maskRect = enclosingIntRect(object->absoluteTransform().mapRect(maskBBox));

    IntSize maskSize(maskRect.width(), maskRect.height());
    clampImageBufferSizeToViewport(object->document()->renderer(), maskSize);

    auto_ptr<ImageBuffer> maskImage = ImageBuffer::create(maskSize, false);

    if (!maskImage.get())
        return false;

    GraphicsContext* maskImageContext = maskImage->context();

    maskImageContext->save();
    maskImageContext->translate(-maskRect.x(), -maskRect.y());
    maskImageContext->concatCTM(object->absoluteTransform());

    imageBuffer.set(maskImage.release());
    savedContext = context;

    context = maskImageContext;

    return true;
}
void TextureMapperImageBuffer::drawTexture(const BitmapTexture& texture, const FloatRect& targetRect, const TransformationMatrix& matrix, float opacity, const BitmapTexture* maskTexture, unsigned /* exposedEdges */)
{
    GraphicsContext* context = currentContext();
    if (!context)
        return;

    const BitmapTextureImageBuffer& textureImageBuffer = static_cast<const BitmapTextureImageBuffer&>(texture);
    ImageBuffer* image = textureImageBuffer.m_image.get();
    OwnPtr<ImageBuffer> maskedImage;

    if (maskTexture && maskTexture->isValid()) {
        const BitmapTextureImageBuffer* mask = static_cast<const BitmapTextureImageBuffer*>(maskTexture);
        maskedImage = ImageBuffer::create(maskTexture->contentSize());
        GraphicsContext* maskContext = maskedImage->context();
        maskContext->drawImageBuffer(image, ColorSpaceDeviceRGB, IntPoint::zero(), CompositeCopy);
        if (opacity < 1) {
            maskContext->setAlpha(opacity);
            opacity = 1;
        }
        maskContext->drawImageBuffer(mask->m_image.get(), ColorSpaceDeviceRGB, IntPoint::zero(), CompositeDestinationIn);
        image = maskedImage.get();
    }

    context->save();
    context->setAlpha(opacity);
#if ENABLE(3D_RENDERING)
    context->concat3DTransform(matrix);
#else
    context->concatCTM(matrix.toAffineTransform());
#endif
    context->drawImageBuffer(image, ColorSpaceDeviceRGB, targetRect);
    context->restore();
}
Beispiel #6
0
void EllipsisBox::paint(PaintInfo& paintInfo, const LayoutPoint& paintOffset, LayoutUnit lineTop, LayoutUnit lineBottom)
{
    GraphicsContext* context = paintInfo.context;
    RenderStyle* style = renderer().style(isFirstLineStyle());
    const Font& font = style->font();
    FloatPoint boxOrigin = locationIncludingFlipping();
    boxOrigin.moveBy(FloatPoint(paintOffset));
    if (!isHorizontal())
        boxOrigin.move(0, -virtualLogicalHeight());
    FloatRect boxRect(boxOrigin, LayoutSize(logicalWidth(), virtualLogicalHeight()));
    GraphicsContextStateSaver stateSaver(*context);
    if (!isHorizontal())
        context->concatCTM(InlineTextBox::rotation(boxRect, InlineTextBox::Clockwise));
    FloatPoint textOrigin(boxOrigin.x(), boxOrigin.y() + font.fontMetrics().ascent());

    bool isPrinting = renderer().document().printing();
    bool haveSelection = !isPrinting && paintInfo.phase != PaintPhaseTextClip && selectionState() != RenderObject::SelectionNone;

    if (haveSelection)
        paintSelection(context, boxOrigin, style, font);
    else if (paintInfo.phase == PaintPhaseSelection)
        return;

    TextPainter::Style textStyle = TextPainter::textPaintingStyle(renderer(), style, paintInfo.forceBlackText(), isPrinting);
    if (haveSelection)
        textStyle = TextPainter::selectionPaintingStyle(renderer(), true, paintInfo.forceBlackText(), isPrinting, textStyle);

    TextRun textRun = constructTextRun(&renderer(), font, m_str, style, TextRun::AllowTrailingExpansion);
    TextPainter textPainter(context, font, textRun, textOrigin, boxRect, isHorizontal());
    textPainter.paint(0, m_str.length(), m_str.length(), textStyle);

    paintMarkupBox(paintInfo, paintOffset, lineTop, lineBottom, style);
}
static inline bool createMaskAndSwapContextForTextGradient(GraphicsContext*& context,
                                                           GraphicsContext*& savedContext,
                                                           OwnPtr<ImageBuffer>& imageBuffer,
                                                           const RenderObject* object)
{
    const RenderObject* textRootBlock = findTextRootObject(object);

    AffineTransform transform = absoluteTransformForRenderer(textRootBlock);
    FloatRect maskAbsoluteBoundingBox = transform.mapRect(textRootBlock->repaintRectInLocalCoordinates());

    IntRect maskImageRect = enclosingIntRect(maskAbsoluteBoundingBox);
    if (maskImageRect.isEmpty())
        return false;

    // Allocate an image buffer as big as the absolute unclipped size of the object
    OwnPtr<ImageBuffer> maskImage = ImageBuffer::create(maskImageRect.size());
    if (!maskImage)
        return false;

    GraphicsContext* maskImageContext = maskImage->context();

    // Transform the mask image coordinate system to absolute screen coordinates
    maskImageContext->translate(-maskAbsoluteBoundingBox.x(), -maskAbsoluteBoundingBox.y());
    maskImageContext->concatCTM(transform);

    imageBuffer.set(maskImage.release());
    savedContext = context;
    context = maskImageContext;

    return true;
}
Beispiel #8
0
void drawNativeImage(const NativeImagePtr& image, GraphicsContext& context, const FloatRect& destRect, const FloatRect& srcRect, const IntSize&, CompositeOperator op, BlendMode mode, const ImageOrientation& orientation)
{
    context.save();
    
    // Set the compositing operation.
    if (op == CompositeSourceOver && mode == BlendModeNormal && !nativeImageHasAlpha(image))
        context.setCompositeOperation(CompositeCopy);
    else
        context.setCompositeOperation(op, mode);
        
#if ENABLE(IMAGE_DECODER_DOWN_SAMPLING)
    IntSize scaledSize = nativeImageSize(image);
    FloatRect adjustedSrcRect = adjustSourceRectForDownSampling(srcRect, scaledSize);
#else
    FloatRect adjustedSrcRect(srcRect);
#endif
        
    FloatRect adjustedDestRect = destRect;
        
    if (orientation != DefaultImageOrientation) {
        // ImageOrientation expects the origin to be at (0, 0).
        context.translate(destRect.x(), destRect.y());
        adjustedDestRect.setLocation(FloatPoint());
        context.concatCTM(orientation.transformFromDefault(adjustedDestRect.size()));
        if (orientation.usesWidthAsHeight()) {
            // The destination rectangle will have it's width and height already reversed for the orientation of
            // the image, as it was needed for page layout, so we need to reverse it back here.
            adjustedDestRect.setSize(adjustedDestRect.size().transposedSize());
        }
    }

    context.platformContext()->drawSurfaceToContext(image.get(), adjustedDestRect, adjustedSrcRect, context);
    context.restore();
}
Beispiel #9
0
void EllipsisBox::paint(PaintInfo& paintInfo, const LayoutPoint& paintOffset, LayoutUnit lineTop, LayoutUnit lineBottom)
{
    GraphicsContext* context = paintInfo.context;
    RenderStyle* style = renderer().style(isFirstLineStyle());

    const Font& font = style->font();
    FloatPoint boxOrigin = locationIncludingFlipping();
    boxOrigin.moveBy(FloatPoint(paintOffset));
    if (!isHorizontal())
        boxOrigin.move(0, -virtualLogicalHeight());
    FloatRect boxRect(boxOrigin, LayoutSize(logicalWidth(), virtualLogicalHeight()));
    GraphicsContextStateSaver stateSaver(*context);
    if (!isHorizontal())
        context->concatCTM(InlineTextBox::rotation(boxRect, InlineTextBox::Clockwise));
    FloatPoint textOrigin = FloatPoint(boxOrigin.x(), boxOrigin.y() + font.fontMetrics().ascent());

    Color styleTextColor = renderer().resolveColor(style, CSSPropertyWebkitTextFillColor);
    if (styleTextColor != context->fillColor())
        context->setFillColor(styleTextColor);

    if (selectionState() != RenderObject::SelectionNone) {
        paintSelection(context, boxOrigin, style, font);

        // Select the correct color for painting the text.
        Color foreground = paintInfo.forceBlackText() ? Color::black : renderer().selectionForegroundColor();
        if (foreground != styleTextColor)
            context->setFillColor(foreground);
    }

    // Text shadows are disabled when printing. http://crbug.com/258321
    const ShadowList* shadowList = context->printing() ? 0 : style->textShadow();
    bool hasShadow = shadowList;
    if (hasShadow) {
        OwnPtr<DrawLooperBuilder> drawLooperBuilder = DrawLooperBuilder::create();
        for (size_t i = shadowList->shadows().size(); i--; ) {
            const ShadowData& shadow = shadowList->shadows()[i];
            float shadowX = isHorizontal() ? shadow.x() : shadow.y();
            float shadowY = isHorizontal() ? shadow.y() : -shadow.x();
            FloatSize offset(shadowX, shadowY);
            drawLooperBuilder->addShadow(offset, shadow.blur(), shadow.color(),
                DrawLooperBuilder::ShadowRespectsTransforms, DrawLooperBuilder::ShadowIgnoresAlpha);
        }
        drawLooperBuilder->addUnmodifiedContent();
        context->setDrawLooper(drawLooperBuilder.release());
    }

    TextRun textRun = RenderBlockFlow::constructTextRun(&renderer(), font, m_str, style, TextRun::AllowTrailingExpansion);
    TextRunPaintInfo textRunPaintInfo(textRun);
    textRunPaintInfo.bounds = boxRect;
    context->drawText(font, textRunPaintInfo, textOrigin);

    // Restore the regular fill color.
    if (styleTextColor != context->fillColor())
        context->setFillColor(styleTextColor);

    if (hasShadow)
        context->clearDrawLooper();

    paintMarkupBox(paintInfo, paintOffset, lineTop, lineBottom, style);
}
void TextureMapperImageBuffer::beginClip(const TransformationMatrix& matrix, const FloatRect& rect)
{
    GraphicsContext* context = currentContext();
    if (!context)
        return;
#if ENABLE(3D_RENDERING)
    TransformationMatrix previousTransform = context->get3DTransform();
#else
    AffineTransform previousTransform = context->getCTM();
#endif
    context->save();

#if ENABLE(3D_RENDERING)
    context->concat3DTransform(matrix);
#else
    context->concatCTM(matrix.toAffineTransform());
#endif

    context->clip(rect);

#if ENABLE(3D_RENDERING)
    context->set3DTransform(previousTransform);
#else
    context->setCTM(previousTransform);
#endif
}
void BitmapImage::draw(GraphicsContext& context, const FloatRect& dst, const FloatRect& src, CompositeOperator op,
    BlendMode blendMode, ImageOrientationDescription description)
{
    if (!dst.width() || !dst.height() || !src.width() || !src.height())
        return;

    startAnimation();

    auto surface = frameImageAtIndex(m_currentFrame);
    if (!surface) // If it's too early we won't have an image yet.
        return;

    Color color = singlePixelSolidColor();
    if (color.isValid()) {
        fillWithSolidColor(context, dst, color, op);
        return;
    }

    context.save();

    // Set the compositing operation.
    if (op == CompositeSourceOver && blendMode == BlendModeNormal && !frameHasAlphaAtIndex(m_currentFrame))
        context.setCompositeOperation(CompositeCopy);
    else
        context.setCompositeOperation(op, blendMode);

#if ENABLE(IMAGE_DECODER_DOWN_SAMPLING)
    IntSize scaledSize = cairoSurfaceSize(surface.get());
    FloatRect adjustedSrcRect = adjustSourceRectForDownSampling(src, scaledSize);
#else
    FloatRect adjustedSrcRect(src);
#endif

    ImageOrientation frameOrientation(description.imageOrientation());
    if (description.respectImageOrientation() == RespectImageOrientation)
        frameOrientation = frameOrientationAtIndex(m_currentFrame);

    FloatRect dstRect = dst;

    if (frameOrientation != DefaultImageOrientation) {
        // ImageOrientation expects the origin to be at (0, 0).
        context.translate(dstRect.x(), dstRect.y());
        dstRect.setLocation(FloatPoint());
        context.concatCTM(frameOrientation.transformFromDefault(dstRect.size()));
        if (frameOrientation.usesWidthAsHeight()) {
            // The destination rectangle will have it's width and height already reversed for the orientation of
            // the image, as it was needed for page layout, so we need to reverse it back here.
            dstRect = FloatRect(dstRect.x(), dstRect.y(), dstRect.height(), dstRect.width());
        }
    }

    context.platformContext()->drawSurfaceToContext(surface.get(), dstRect, adjustedSrcRect, context);

    context.restore();

    if (imageObserver())
        imageObserver()->didDraw(this);
}
Beispiel #12
0
WebBitmap* Frame::imageFromRect(const FloatRect& rect) const
{
    // We should not try to create a bitmap of zero height or width as it is not supported.
	IntRect irect(rect);
    if(irect.size().width() == 0 || irect.size().height() == 0) 
        return NULL;

    FrameLoaderClientApollo* const clientApollo = FrameLoaderClientApollo::clientApollo(this);
    WebHost* webHost = clientApollo->webHost();
    ASSERT(webHost);

    WebBitmap* resultBitmap = webHost->m_pVTable->createBitmap(webHost, irect.size().width(), irect.size().height());
    
	// clear pixels with transparent black (0)
	void *pixelData = resultBitmap->m_pVTable->getPixelData(resultBitmap);
    unsigned long stride = resultBitmap->m_pVTable->getStride(resultBitmap);
    memset( pixelData, 0, stride * irect.height() );
	

	// now, paint the selection in a graphics context
	WebIntRect sourceRect;
		
	sourceRect.m_left = 0; 
	sourceRect.m_top = 0; 
	sourceRect.m_right = irect.width();
	sourceRect.m_bottom = irect.height();
	
	GraphicsContext gc (resultBitmap,&sourceRect);

	IntSize offset = view()->scrollOffset();
    irect.move(-offset.width(), -offset.height());
    irect = view()->convertToContainingWindow(irect);

#if OS(WINDOWS) ||  OS(DARWIN)
	// changing origin from top-left to bottom-left 
	gc.concatCTM(TransformationMatrix(1, 0, 0, -1, 0, irect.height()).toAffineTransform());
#endif 

	// this transformation moves the clip into the origin of the user space
	gc.concatCTM(TransformationMatrix().translate(-irect.x(), -irect.y()).toAffineTransform());
	
	view()->paint(&gc,irect);
	
    return resultBitmap;
}
Beispiel #13
0
void SVGRenderingContext::clipToImageBuffer(GraphicsContext& context, const AffineTransform& absoluteTransform, const FloatRect& targetRect, std::unique_ptr<ImageBuffer>& imageBuffer, bool safeToClear)
{
    if (!imageBuffer)
        return;

    FloatRect absoluteTargetRect = calculateImageBufferRect(targetRect, absoluteTransform);

    // The mask image has been created in the absolute coordinate space, as the image should not be scaled.
    // So the actual masking process has to be done in the absolute coordinate space as well.
    context.concatCTM(absoluteTransform.inverse().valueOr(AffineTransform()));
    context.clipToImageBuffer(*imageBuffer, absoluteTargetRect);
    context.concatCTM(absoluteTransform);

    // When nesting resources, with objectBoundingBox as content unit types, there's no use in caching the
    // resulting image buffer as the parent resource already caches the result.
    if (safeToClear && !currentContentTransformation().isIdentity())
        imageBuffer.reset();
}
Beispiel #14
0
bool RenderSVGResourceClipper::applyClippingToContext(RenderObject* object, const FloatRect& objectBoundingBox,
        const FloatRect& repaintRect, GraphicsContext* context)
{
    bool missingClipperData = !m_clipper.contains(object);
    if (missingClipperData)
        m_clipper.set(object, new ClipperData);

    bool shouldCreateClipData = false;
    AffineTransform animatedLocalTransform = static_cast<SVGClipPathElement*>(node())->animatedLocalTransform();
    ClipperData* clipperData = m_clipper.get(object);
    if (!clipperData->clipMaskImage) {
        if (pathOnlyClipping(context, animatedLocalTransform, objectBoundingBox))
            return true;
        shouldCreateClipData = true;
    }

    AffineTransform absoluteTransform;
    SVGRenderingContext::calculateTransformationToOutermostCoordinateSystem(object, absoluteTransform);

    if (shouldCreateClipData && !repaintRect.isEmpty()) {
        if (!SVGRenderingContext::createImageBuffer(repaintRect, absoluteTransform, clipperData->clipMaskImage, ColorSpaceDeviceRGB, Unaccelerated))
            return false;

        GraphicsContext* maskContext = clipperData->clipMaskImage->context();
        ASSERT(maskContext);

        maskContext->concatCTM(animatedLocalTransform);

        // clipPath can also be clipped by another clipPath.
        SVGResources* resources = SVGResourcesCache::cachedResourcesForRenderObject(this);
        RenderSVGResourceClipper* clipper;
        bool succeeded;
        if (resources && (clipper = resources->clipper())) {
            GraphicsContextStateSaver stateSaver(*maskContext);

            if (!clipper->applyClippingToContext(this, objectBoundingBox, repaintRect, maskContext))
                return false;

            succeeded = drawContentIntoMaskImage(clipperData, objectBoundingBox);
            // The context restore applies the clipping on non-CG platforms.
        } else
            succeeded = drawContentIntoMaskImage(clipperData, objectBoundingBox);

        if (!succeeded)
            clipperData->clipMaskImage.clear();
    }

    if (!clipperData->clipMaskImage)
        return false;

    SVGRenderingContext::clipToImageBuffer(context, absoluteTransform, repaintRect, clipperData->clipMaskImage, missingClipperData);
    return true;
}
void CanvasRenderingContext2D::setTransform(float m11, float m12, float m21, float m22, float dx, float dy)
{
    GraphicsContext* c = drawingContext();
    if (!c)
        return;
    
    if (!isfinite(m11) | !isfinite(m21) | !isfinite(dx) | 
        !isfinite(m12) | !isfinite(m22) | !isfinite(dy))
        return;

    AffineTransform ctm = state().m_transform;
    if (!ctm.isInvertible())
        return;
    c->concatCTM(c->getCTM().inverse());
    c->concatCTM(canvas()->baseTransform());
    state().m_transform.multiply(ctm.inverse());
    m_path.transform(ctm);

    state().m_invertibleCTM = true;
    transform(m11, m12, m21, m22, dx, dy);
}
Beispiel #16
0
bool RenderSVGResourceClipper::applyClippingToContext(RenderObject* object, const FloatRect& objectBoundingBox,
                                                      const FloatRect& repaintRect, GraphicsContext* context)
{
    if (!m_clipper.contains(object))
        m_clipper.set(object, new ClipperData);

    bool shouldCreateClipData = false;
    ClipperData* clipperData = m_clipper.get(object);
    if (!clipperData->clipMaskImage) {
        if (pathOnlyClipping(context, objectBoundingBox))
            return true;
        shouldCreateClipData = true;
    }

    AffineTransform absoluteTransform;
    SVGImageBufferTools::calculateTransformationToOutermostSVGCoordinateSystem(object, absoluteTransform);

    FloatRect absoluteTargetRect = absoluteTransform.mapRect(repaintRect);
    FloatRect clampedAbsoluteTargetRect = SVGImageBufferTools::clampedAbsoluteTargetRectForRenderer(object, absoluteTargetRect);

    if (shouldCreateClipData && !clampedAbsoluteTargetRect.isEmpty()) {
        if (!SVGImageBufferTools::createImageBuffer(absoluteTargetRect, clampedAbsoluteTargetRect, clipperData->clipMaskImage, ColorSpaceDeviceRGB))
            return false;

        GraphicsContext* maskContext = clipperData->clipMaskImage->context();
        ASSERT(maskContext);

        // The save/restore pair is needed for clipToImageBuffer - it doesn't work without it on non-Cg platforms.
        maskContext->save();
        maskContext->translate(-clampedAbsoluteTargetRect.x(), -clampedAbsoluteTargetRect.y());
        maskContext->concatCTM(absoluteTransform);

        // clipPath can also be clipped by another clipPath.
        if (SVGResources* resources = SVGResourcesCache::cachedResourcesForRenderObject(this)) {
            if (RenderSVGResourceClipper* clipper = resources->clipper()) {
                if (!clipper->applyClippingToContext(this, objectBoundingBox, repaintRect, maskContext)) {
                    maskContext->restore();
                    return false;
                }
            }
        }

        drawContentIntoMaskImage(clipperData, objectBoundingBox);
        maskContext->restore();
    }

    if (!clipperData->clipMaskImage)
        return false;

    SVGImageBufferTools::clipToImageBuffer(context, absoluteTransform, clampedAbsoluteTargetRect, clipperData->clipMaskImage);
    return true;
}
void CanvasRenderingContext2D::setTransform(float m11, float m12, float m21, float m22, float dx, float dy)
{
    GraphicsContext* c = drawingContext();
    if (!c)
        return;
    
    // HTML5 3.14.11.1 -- ignore any calls that pass non-finite numbers
    if (!isfinite(m11) | !isfinite(m21) | !isfinite(dx) | 
        !isfinite(m12) | !isfinite(m22) | !isfinite(dy))
        return;

    TransformationMatrix ctm = state().m_transform;
    if (!ctm.isInvertible())
        return;
    c->concatCTM(c->getCTM().inverse());
    c->concatCTM(m_canvas->baseTransform());
    state().m_transform.multiply(ctm.inverse());
    m_path.transform(ctm);

    state().m_invertibleCTM = true;
    transform(m11, m12, m21, m22, dx, dy);
}
Beispiel #18
0
void RenderSVGResourceMasker::drawContentIntoMaskImage(MaskerData* maskerData, const SVGMaskElement* maskElement, RenderObject* object)
{
    GraphicsContext* maskImageContext = maskerData->maskImage->context();
    ASSERT(maskImageContext);

    // Eventually adjust the mask image context according to the target objectBoundingBox.
    AffineTransform maskContentTransformation;
    if (maskElement->maskContentUnits() == SVGUnitTypes::SVG_UNIT_TYPE_OBJECTBOUNDINGBOX) {
        FloatRect objectBoundingBox = object->objectBoundingBox();
        maskContentTransformation.translate(objectBoundingBox.x(), objectBoundingBox.y());
        maskContentTransformation.scaleNonUniform(objectBoundingBox.width(), objectBoundingBox.height());
        maskImageContext->concatCTM(maskContentTransformation);
    }

    // Draw the content into the ImageBuffer.
    for (Node* node = maskElement->firstChild(); node; node = node->nextSibling()) {
        RenderObject* renderer = node->renderer();
        if (!node->isSVGElement() || !static_cast<SVGElement*>(node)->isStyled() || !renderer)
            continue;
        RenderStyle* style = renderer->style();
        if (!style || style->display() == NONE || style->visibility() != VISIBLE)
            continue;
        SVGImageBufferTools::renderSubtreeToImageBuffer(maskerData->maskImage.get(), renderer, maskContentTransformation);
    }

    maskImageContext->restore();

#if !PLATFORM(CG)
    maskerData->maskImage->transformColorSpace(ColorSpaceDeviceRGB, ColorSpaceLinearRGB);
#endif

    // Create the luminance mask.
    IntRect maskImageRect(IntPoint(), maskerData->maskImage->size());
    RefPtr<ImageData> imageData = maskerData->maskImage->getUnmultipliedImageData(maskImageRect);
    ByteArray* srcPixelArray = imageData->data()->data();

    unsigned pixelArrayLength = srcPixelArray->length();
    for (unsigned pixelOffset = 0; pixelOffset < pixelArrayLength; pixelOffset += 4) {
        unsigned char a = srcPixelArray->get(pixelOffset + 3);
        if (!a)
            continue;
        unsigned char r = srcPixelArray->get(pixelOffset);
        unsigned char g = srcPixelArray->get(pixelOffset + 1);
        unsigned char b = srcPixelArray->get(pixelOffset + 2);

        double luma = (r * 0.2125 + g * 0.7154 + b * 0.0721) * ((double)a / 255.0);
        srcPixelArray->set(pixelOffset + 3, luma);
    }

    maskerData->maskImage->putUnmultipliedImageData(imageData.get(), maskImageRect, IntPoint());
}
bool RenderSVGResourceMasker::applyResource(RenderObject* object, RenderStyle*, GraphicsContext*& context, unsigned short resourceMode)
{
    ASSERT(object);
    ASSERT(context);
#ifndef NDEBUG
    ASSERT(resourceMode == ApplyToDefaultMode);
#else
    UNUSED_PARAM(resourceMode);
#endif

    if (!m_masker.contains(object))
        m_masker.set(object, new MaskerData);

    MaskerData* maskerData = m_masker.get(object);

    AffineTransform absoluteTransform;
    SVGImageBufferTools::calculateTransformationToOutermostSVGCoordinateSystem(object, absoluteTransform);

    FloatRect absoluteTargetRect = absoluteTransform.mapRect(object->repaintRectInLocalCoordinates());
    FloatRect clampedAbsoluteTargetRect = SVGImageBufferTools::clampedAbsoluteTargetRect(absoluteTargetRect);

    if (!maskerData->maskImage && !clampedAbsoluteTargetRect.isEmpty()) {
        SVGMaskElement* maskElement = static_cast<SVGMaskElement*>(node());
        if (!maskElement)
            return false;

        ASSERT(style());
        const SVGRenderStyle* svgStyle = style()->svgStyle();
        ASSERT(svgStyle);
        ColorSpace colorSpace = svgStyle->colorInterpolation() == CI_LINEARRGB ? ColorSpaceLinearRGB : ColorSpaceDeviceRGB;
        if (!SVGImageBufferTools::createImageBuffer(absoluteTargetRect, clampedAbsoluteTargetRect, maskerData->maskImage, colorSpace))
            return false;

        GraphicsContext* maskImageContext = maskerData->maskImage->context();
        ASSERT(maskImageContext);

        // The save/restore pair is needed for clipToImageBuffer - it doesn't work without it on non-Cg platforms.
        maskImageContext->save();
        maskImageContext->translate(-clampedAbsoluteTargetRect.x(), -clampedAbsoluteTargetRect.y());
        maskImageContext->concatCTM(absoluteTransform);

        drawContentIntoMaskImage(maskerData, colorSpace, maskElement, object);
    }

    if (!maskerData->maskImage)
        return false;

    SVGImageBufferTools::clipToImageBuffer(context, absoluteTransform, clampedAbsoluteTargetRect, maskerData->maskImage);
    return true;
}
void CanvasRenderingContext2D::transform(float m11, float m12, float m21, float m22, float dx, float dy)
{
    GraphicsContext* c = drawingContext();
    if (!c)
        return;
    
    // HTML5 3.14.11.1 -- ignore any calls that pass non-finite numbers
    if (!isfinite(m11) | !isfinite(m21) | !isfinite(dx) | 
        !isfinite(m12) | !isfinite(m22) | !isfinite(dy))
        return;
    AffineTransform transform(m11, m12, m21, m22, dx, dy);
    c->concatCTM(transform);
    state().m_transform.multiply(transform);
    m_path.transform(transform.inverse());
}
void TextureMapperImageBuffer::drawSolidColor(const FloatRect& rect, const TransformationMatrix& matrix, const Color& color)
{
    GraphicsContext* context = currentContext();
    if (!context)
        return;

    context->save();
#if ENABLE(3D_RENDERING)
    context->concat3DTransform(matrix);
#else
    context->concatCTM(matrix.toAffineTransform());
#endif

    context->fillRect(rect, color, ColorSpaceDeviceRGB);
    context->restore();
}
PassOwnPtr<ImageBuffer> RenderSVGResourcePattern::createTileImage(RenderObject* object,
                                                                  const PatternAttributes& attributes,
                                                                  const FloatRect& tileBoundaries,
                                                                  const FloatRect& absoluteTileBoundaries,
                                                                  const AffineTransform& tileImageTransform) const
{
    ASSERT(object);

    // Clamp tile image size against SVG viewport size, as last resort, to avoid allocating huge image buffers.
    FloatRect contentBoxRect = SVGRenderSupport::findTreeRootObject(object)->contentBoxRect();

    FloatRect clampedAbsoluteTileBoundaries = absoluteTileBoundaries;
    if (clampedAbsoluteTileBoundaries.width() > contentBoxRect.width())
        clampedAbsoluteTileBoundaries.setWidth(contentBoxRect.width());

    if (clampedAbsoluteTileBoundaries.height() > contentBoxRect.height())
        clampedAbsoluteTileBoundaries.setHeight(contentBoxRect.height());

    OwnPtr<ImageBuffer> tileImage;

    if (!SVGImageBufferTools::createImageBuffer(absoluteTileBoundaries, clampedAbsoluteTileBoundaries, tileImage, ColorSpaceDeviceRGB))
        return PassOwnPtr<ImageBuffer>();

    GraphicsContext* tileImageContext = tileImage->context();
    ASSERT(tileImageContext);

    // The image buffer represents the final rendered size, so the content has to be scaled (to avoid pixelation).
    tileImageContext->scale(FloatSize(absoluteTileBoundaries.width() / tileBoundaries.width(),
                                      absoluteTileBoundaries.height() / tileBoundaries.height()));

    // Apply tile image transformations.
    if (!tileImageTransform.isIdentity())
        tileImageContext->concatCTM(tileImageTransform);

    AffineTransform contentTransformation;
    if (attributes.boundingBoxModeContent())
        contentTransformation = tileImageTransform;

    // Draw the content into the ImageBuffer.
    for (Node* node = attributes.patternContentElement()->firstChild(); node; node = node->nextSibling()) {
        if (!node->isSVGElement() || !static_cast<SVGElement*>(node)->isStyled() || !node->renderer())
            continue;
        SVGImageBufferTools::renderSubtreeToImageBuffer(tileImage.get(), node->renderer(), contentTransformation);
    }

    return tileImage.release();
}
PassOwnPtr<ImageBuffer> RenderSVGResourcePattern::createTileImage(const PatternAttributes& attributes,
                                                                  const FloatRect& tileBoundaries,
                                                                  const FloatRect& absoluteTileBoundaries,
                                                                  const AffineTransform& tileImageTransform) const
{
    FloatRect clampedAbsoluteTileBoundaries = SVGRenderingContext::clampedAbsoluteTargetRect(absoluteTileBoundaries);

    IntSize imageSize(roundedIntSize(clampedAbsoluteTileBoundaries.size()));
    if (imageSize.isEmpty())
        return nullptr;
    OwnPtr<ImageBuffer> tileImage = ImageBuffer::create(imageSize);
    if (!tileImage)
        return nullptr;

    GraphicsContext* tileImageContext = tileImage->context();
    ASSERT(tileImageContext);
    IntSize unclampedImageSize(roundedIntSize(absoluteTileBoundaries.size()));
    tileImageContext->scale(unclampedImageSize.width() / absoluteTileBoundaries.width(), unclampedImageSize.height() / absoluteTileBoundaries.height());

    // The image buffer represents the final rendered size, so the content has to be scaled (to avoid pixelation).
    tileImageContext->scale(
        clampedAbsoluteTileBoundaries.width() / tileBoundaries.width(),
        clampedAbsoluteTileBoundaries.height() / tileBoundaries.height());

    // Apply tile image transformations.
    if (!tileImageTransform.isIdentity())
        tileImageContext->concatCTM(tileImageTransform);

    AffineTransform contentTransformation;
    if (attributes.patternContentUnits() == SVGUnitTypes::SVG_UNIT_TYPE_OBJECTBOUNDINGBOX)
        contentTransformation = tileImageTransform;

    // Draw the content into the ImageBuffer.
    for (Element* element = ElementTraversal::firstWithin(*attributes.patternContentElement()); element; element = ElementTraversal::nextSibling(*element)) {
        if (!element->isSVGElement() || !element->renderer())
            continue;
        if (element->renderer()->needsLayout())
            return nullptr;
        SVGRenderingContext::renderSubtree(tileImage->context(), element->renderer(), contentTransformation);
    }

    return tileImage.release();
}
void TextureMapperImageBuffer::drawTexture(const BitmapTexture& texture, const FloatRect& targetRect, const TransformationMatrix& matrix, float opacity, unsigned /* exposedEdges */)
{
    GraphicsContext* context = currentContext();
    if (!context)
        return;

    const BitmapTextureImageBuffer& textureImageBuffer = static_cast<const BitmapTextureImageBuffer&>(texture);
    ImageBuffer* image = textureImageBuffer.m_image.get();
    context->save();
    context->setCompositeOperation(isInMaskMode() ? CompositeDestinationIn : CompositeSourceOver);
    context->setAlpha(opacity);
#if ENABLE(3D_RENDERING)
    context->concat3DTransform(matrix);
#else
    context->concatCTM(matrix.toAffineTransform());
#endif
    context->drawImageBuffer(image, ColorSpaceDeviceRGB, targetRect);
    context->restore();
}
bool RenderSVGResourceMasker::drawContentIntoMaskImage(MaskerData* maskerData, ColorSpace colorSpace, RenderObject* object)
{
    GraphicsContext* maskImageContext = maskerData->maskImage->context();
    ASSERT(maskImageContext);

    // Eventually adjust the mask image context according to the target objectBoundingBox.
    AffineTransform maskContentTransformation;
    if (maskElement().maskContentUnits() == SVGUnitTypes::SVG_UNIT_TYPE_OBJECTBOUNDINGBOX) {
        FloatRect objectBoundingBox = object->objectBoundingBox();
        maskContentTransformation.translate(objectBoundingBox.x(), objectBoundingBox.y());
        maskContentTransformation.scaleNonUniform(objectBoundingBox.width(), objectBoundingBox.height());
        maskImageContext->concatCTM(maskContentTransformation);
    }

    // Draw the content into the ImageBuffer.
    auto children = childrenOfType<SVGElement>(maskElement());
    for (auto it = children.begin(), end = children.end(); it != end; ++it) {
        SVGElement& child = *it;
        auto renderer = child.renderer();
        if (!renderer)
            continue;
        if (renderer->needsLayout())
            return false;
        const RenderStyle& style = renderer->style();
        if (style.display() == NONE || style.visibility() != VISIBLE)
            continue;
        SVGRenderingContext::renderSubtreeToImageBuffer(maskerData->maskImage.get(), *renderer, maskContentTransformation);
    }

#if !USE(CG)
    maskerData->maskImage->transformColorSpace(ColorSpaceDeviceRGB, colorSpace);
#else
    UNUSED_PARAM(colorSpace);
#endif

    ASSERT(style().svgStyle());
    // Create the luminance mask.
    if (style().svgStyle()->maskType() == MT_LUMINANCE)
        maskerData->maskImage->convertToLuminanceMask();

    return true;
}
static void paintFilteredContent(const LayoutObject& object, GraphicsContext& context, FilterData* filterData)
{
    ASSERT(filterData->m_state == FilterData::ReadyToPaint);
    ASSERT(filterData->filter->sourceGraphic());

    filterData->m_state = FilterData::PaintingFilter;

    SkiaImageFilterBuilder builder;
    RefPtr<SkImageFilter> imageFilter = builder.build(filterData->filter->lastEffect(), ColorSpaceDeviceRGB);
    FloatRect boundaries = filterData->filter->filterRegion();
    context.save();

    // Clip drawing of filtered image to the minimum required paint rect.
    FilterEffect* lastEffect = filterData->filter->lastEffect();
    context.clipRect(lastEffect->determineAbsolutePaintRect(lastEffect->maxEffectRect()));

#ifdef CHECK_CTM_FOR_TRANSFORMED_IMAGEFILTER
    // TODO: Remove this workaround once skew/rotation support is added in Skia
    // (https://code.google.com/p/skia/issues/detail?id=3288, crbug.com/446935).
    // If the CTM contains rotation or shearing, apply the filter to
    // the unsheared/unrotated matrix, and do the shearing/rotation
    // as a final pass.
    AffineTransform ctm = SVGLayoutSupport::deprecatedCalculateTransformToLayer(&object);
    if (ctm.b() || ctm.c()) {
        AffineTransform scaleAndTranslate;
        scaleAndTranslate.translate(ctm.e(), ctm.f());
        scaleAndTranslate.scale(ctm.xScale(), ctm.yScale());
        ASSERT(scaleAndTranslate.isInvertible());
        AffineTransform shearAndRotate = scaleAndTranslate.inverse();
        shearAndRotate.multiply(ctm);
        context.concatCTM(shearAndRotate.inverse());
        imageFilter = builder.buildTransform(shearAndRotate, imageFilter.get());
    }
#endif

    context.beginLayer(1, SkXfermode::kSrcOver_Mode, &boundaries, ColorFilterNone, imageFilter.get());
    context.endLayer();
    context.restore();

    filterData->m_state = FilterData::ReadyToPaint;
}
Beispiel #27
0
PassOwnPtr<ImageBuffer> RenderSVGResourcePattern::createTileImage(const PatternAttributes& attributes,
                                                                  const FloatRect& tileBoundaries,
                                                                  const FloatRect& absoluteTileBoundaries,
                                                                  const AffineTransform& tileImageTransform,
                                                                  FloatRect& clampedAbsoluteTileBoundaries) const
{
    clampedAbsoluteTileBoundaries = SVGRenderingContext::clampedAbsoluteTargetRect(absoluteTileBoundaries);

    OwnPtr<ImageBuffer> tileImage;

    if (!SVGRenderingContext::createImageBufferForPattern(absoluteTileBoundaries, clampedAbsoluteTileBoundaries, tileImage, ColorSpaceDeviceRGB, Unaccelerated))
        return nullptr;

    GraphicsContext* tileImageContext = tileImage->context();
    ASSERT(tileImageContext);

    // The image buffer represents the final rendered size, so the content has to be scaled (to avoid pixelation).
    tileImageContext->scale(FloatSize(clampedAbsoluteTileBoundaries.width() / tileBoundaries.width(),
                                      clampedAbsoluteTileBoundaries.height() / tileBoundaries.height()));

    // Apply tile image transformations.
    if (!tileImageTransform.isIdentity())
        tileImageContext->concatCTM(tileImageTransform);

    AffineTransform contentTransformation;
    if (attributes.patternContentUnits() == SVGUnitTypes::SVG_UNIT_TYPE_OBJECTBOUNDINGBOX)
        contentTransformation = tileImageTransform;

    // Draw the content into the ImageBuffer.
    for (Node* node = attributes.patternContentElement()->firstChild(); node; node = node->nextSibling()) {
        if (!node->isSVGElement() || !static_cast<SVGElement*>(node)->isStyled() || !node->renderer())
            continue;
        if (node->renderer()->needsLayout())
            return nullptr;
        SVGRenderingContext::renderSubtreeToImageBuffer(tileImage.get(), node->renderer(), contentTransformation);
    }

    return tileImage.release();
}
void CanvasRenderingContext2D::transform(float m11, float m12, float m21, float m22, float dx, float dy)
{
    GraphicsContext* c = drawingContext();
    if (!c)
        return;
    if (!state().m_invertibleCTM)
        return;

    if (!isfinite(m11) | !isfinite(m21) | !isfinite(dx) | 
        !isfinite(m12) | !isfinite(m22) | !isfinite(dy))
        return;

    AffineTransform transform(m11, m12, m21, m22, dx, dy);
    AffineTransform newTransform = transform * state().m_transform;
    if (!newTransform.isInvertible()) {
        state().m_invertibleCTM = false;
        return;
    }

    state().m_transform = newTransform;
    c->concatCTM(transform);
    m_path.transform(transform.inverse());
}
Beispiel #29
0
std::unique_ptr<ImageBuffer> RenderSVGResourcePattern::createTileImage(const PatternAttributes& attributes, const FloatRect& tileBoundaries, const FloatRect& absoluteTileBoundaries, const AffineTransform& tileImageTransform, FloatRect& clampedAbsoluteTileBoundaries) const
{
    clampedAbsoluteTileBoundaries = SVGRenderingContext::clampedAbsoluteTargetRect(absoluteTileBoundaries);

    std::unique_ptr<ImageBuffer> tileImage;

    if (!SVGRenderingContext::createImageBufferForPattern(absoluteTileBoundaries, clampedAbsoluteTileBoundaries, tileImage, ColorSpaceDeviceRGB, Unaccelerated))
        return nullptr;

    GraphicsContext* tileImageContext = tileImage->context();
    ASSERT(tileImageContext);

    // The image buffer represents the final rendered size, so the content has to be scaled (to avoid pixelation).
    tileImageContext->scale(FloatSize(clampedAbsoluteTileBoundaries.width() / tileBoundaries.width(),
                                      clampedAbsoluteTileBoundaries.height() / tileBoundaries.height()));

    // Apply tile image transformations.
    if (!tileImageTransform.isIdentity())
        tileImageContext->concatCTM(tileImageTransform);

    AffineTransform contentTransformation;
    if (attributes.patternContentUnits() == SVGUnitTypes::SVG_UNIT_TYPE_OBJECTBOUNDINGBOX)
        contentTransformation = tileImageTransform;

    // Draw the content into the ImageBuffer.
    auto children = childrenOfType<SVGElement>(*attributes.patternContentElement());
    for (auto it = children.begin(), end = children.end(); it != end; ++it) {
        const SVGElement& child = *it;
        if (!child.renderer())
            continue;
        if (child.renderer()->needsLayout())
            return nullptr;
        SVGRenderingContext::renderSubtreeToImageBuffer(tileImage.get(), *child.renderer(), contentTransformation);
    }

    return tileImage;
}
bool RenderSVGResourceMasker::drawContentIntoMaskImage(MaskerData* maskerData, ColorSpace colorSpace, const SVGMaskElement* maskElement, RenderObject* object)
{
    GraphicsContext* maskImageContext = maskerData->maskImage->context();
    ASSERT(maskImageContext);

    // Eventually adjust the mask image context according to the target objectBoundingBox.
    AffineTransform maskContentTransformation;
    if (maskElement->maskContentUnits() == SVGUnitTypes::SVG_UNIT_TYPE_OBJECTBOUNDINGBOX) {
        FloatRect objectBoundingBox = object->objectBoundingBox();
        maskContentTransformation.translate(objectBoundingBox.x(), objectBoundingBox.y());
        maskContentTransformation.scaleNonUniform(objectBoundingBox.width(), objectBoundingBox.height());
        maskImageContext->concatCTM(maskContentTransformation);
    }

    // Draw the content into the ImageBuffer.
    for (Node* node = maskElement->firstChild(); node; node = node->nextSibling()) {
        RenderObject* renderer = node->renderer();
        if (!node->isSVGElement() || !static_cast<SVGElement*>(node)->isStyled() || !renderer)
            continue;
        if (renderer->needsLayout())
            return false;
        RenderStyle* style = renderer->style();
        if (!style || style->display() == NONE || style->visibility() != VISIBLE)
            continue;
        SVGImageBufferTools::renderSubtreeToImageBuffer(maskerData->maskImage.get(), renderer, maskContentTransformation);
    }

#if !USE(CG)
    maskerData->maskImage->transformColorSpace(ColorSpaceDeviceRGB, colorSpace);
#else
    UNUSED_PARAM(colorSpace);
#endif

    // Create the luminance mask.
    maskerData->maskImage->convertToLuminanceMask();
    return true;
}