static void drawDeferredFilter(GraphicsContext* context, FilterData* filterData, SVGFilterElement* filterElement) { SkiaImageFilterBuilder builder(context); SourceGraphic* sourceGraphic = static_cast<SourceGraphic*>(filterData->builder->getEffectById(SourceGraphic::effectName())); ASSERT(sourceGraphic); builder.setSourceGraphic(sourceGraphic); RefPtr<ImageFilter> imageFilter = builder.build(filterData->builder->lastEffect(), ColorSpaceDeviceRGB); FloatRect boundaries = filterData->boundaries; context->save(); FloatSize deviceSize = context->getCTM().mapSize(boundaries.size()); float scaledArea = deviceSize.width() * deviceSize.height(); // If area of scaled size is bigger than the upper limit, adjust the scale // to fit. Note that this only really matters in the non-impl-side painting // case, since the impl-side case never allocates a full-sized backing // store, only tile-sized. // FIXME: remove this once all platforms are using impl-side painting. // crbug.com/169282. if (scaledArea > FilterEffect::maxFilterArea()) { float scale = sqrtf(FilterEffect::maxFilterArea() / scaledArea); context->scale(scale, scale); } // Clip drawing of filtered image to the minimum required paint rect. FilterEffect* lastEffect = filterData->builder->lastEffect(); context->clipRect(lastEffect->determineAbsolutePaintRect(lastEffect->maxEffectRect())); if (filterElement->hasAttribute(SVGNames::filterResAttr)) { // Get boundaries in device coords. // FIXME: See crbug.com/382491. Is the use of getCTM OK here, given it does not include device // zoom or High DPI adjustments? FloatSize size = context->getCTM().mapSize(boundaries.size()); // Compute the scale amount required so that the resulting offscreen is exactly filterResX by filterResY pixels. float filterResScaleX = filterElement->filterResX()->currentValue()->value() / size.width(); float filterResScaleY = filterElement->filterResY()->currentValue()->value() / size.height(); // Scale the CTM so the primitive is drawn to filterRes. context->scale(filterResScaleX, filterResScaleY); // Create a resize filter with the inverse scale. AffineTransform resizeMatrix; resizeMatrix.scale(1 / filterResScaleX, 1 / filterResScaleY); imageFilter = builder.buildTransform(resizeMatrix, imageFilter.get()); } // If the CTM contains rotation or shearing, apply the filter to // the unsheared/unrotated matrix, and do the shearing/rotation // as a final pass. AffineTransform ctm = context->getCTM(); if (ctm.b() || ctm.c()) { AffineTransform scaleAndTranslate; scaleAndTranslate.translate(ctm.e(), ctm.f()); scaleAndTranslate.scale(ctm.xScale(), ctm.yScale()); ASSERT(scaleAndTranslate.isInvertible()); AffineTransform shearAndRotate = scaleAndTranslate.inverse(); shearAndRotate.multiply(ctm); context->setCTM(scaleAndTranslate); imageFilter = builder.buildTransform(shearAndRotate, imageFilter.get()); } context->beginLayer(1, CompositeSourceOver, &boundaries, ColorFilterNone, imageFilter.get()); context->endLayer(); context->restore(); }
AffineTransform EwkView::transformFromScene() const { AffineTransform transform; // Note that we apply both page and device scale factors. transform.scale(1 / pageScaleFactor()); transform.scale(1 / deviceScaleFactor()); transform.translate(pagePosition().x(), pagePosition().y()); Ewk_View_Smart_Data* sd = smartData(); transform.translate(-sd->view.x, -sd->view.y); return transform; }
AffineTransform makeMapBetweenRects(const FloatRect& source, const FloatRect& dest) { AffineTransform transform; transform.translate(dest.x() - source.x(), dest.y() - source.y()); transform.scale(dest.width() / source.width(), dest.height() / source.height()); return transform; }
AffineTransform SVGRenderingContext::calculateTransformationToOutermostCoordinateSystem(const RenderObject& renderer) { AffineTransform absoluteTransform = currentContentTransformation(); float deviceScaleFactor = renderer.document().deviceScaleFactor(); // Walk up the render tree, accumulating SVG transforms. const RenderObject* ancestor = &renderer; while (ancestor) { absoluteTransform = ancestor->localToParentTransform() * absoluteTransform; if (ancestor->isSVGRoot()) break; ancestor = ancestor->parent(); } // Continue walking up the layer tree, accumulating CSS transforms. RenderLayer* layer = ancestor ? ancestor->enclosingLayer() : nullptr; while (layer) { if (TransformationMatrix* layerTransform = layer->transform()) absoluteTransform = layerTransform->toAffineTransform() * absoluteTransform; // We can stop at compositing layers, to match the backing resolution. if (layer->isComposited()) break; layer = layer->parent(); } absoluteTransform.scale(deviceScaleFactor); return absoluteTransform; }
void SVGResourceClipper::applyClip(GraphicsContext* context, const FloatRect& boundingBox) const { if (m_clipData.clipData().isEmpty()) return; bool heterogenousClipRules = false; WindRule clipRule = m_clipData.clipData()[0].windRule; context->beginPath(); for (unsigned x = 0; x < m_clipData.clipData().size(); x++) { ClipData clipData = m_clipData.clipData()[x]; if (clipData.windRule != clipRule) heterogenousClipRules = true; Path clipPath = clipData.path; if (clipData.bboxUnits) { AffineTransform transform; transform.translate(boundingBox.x(), boundingBox.y()); transform.scale(boundingBox.width(), boundingBox.height()); clipPath.transform(transform); } context->addPath(clipPath); } // FIXME! // We don't currently allow for heterogenous clip rules. // we would have to detect such, draw to a mask, and then clip // to that mask context->clipPath(clipRule); }
void GeneratorGeneratedImage::drawPattern(GraphicsContext* destContext, const FloatRect& srcRect, const AffineTransform& patternTransform, const FloatPoint& phase, ColorSpace styleColorSpace, CompositeOperator compositeOp, const FloatRect& destRect, BlendMode) { // Allow the generator to provide visually-equivalent tiling parameters for better performance. IntSize adjustedSize = m_size; FloatRect adjustedSrcRect = srcRect; m_gradient->adjustParametersForTiledDrawing(adjustedSize, adjustedSrcRect); // Factor in the destination context's scale to generate at the best resolution AffineTransform destContextCTM = destContext->getCTM(GraphicsContext::DefinitelyIncludeDeviceScale); double xScale = fabs(destContextCTM.xScale()); double yScale = fabs(destContextCTM.yScale()); AffineTransform adjustedPatternCTM = patternTransform; adjustedPatternCTM.scale(1.0 / xScale, 1.0 / yScale); adjustedSrcRect.scale(xScale, yScale); unsigned generatorHash = m_gradient->hash(); if (!m_cachedImageBuffer || m_cachedGeneratorHash != generatorHash || m_cachedAdjustedSize != adjustedSize || !destContext->isCompatibleWithBuffer(m_cachedImageBuffer.get())) { m_cachedImageBuffer = destContext->createCompatibleBuffer(adjustedSize, m_gradient->hasAlpha()); if (!m_cachedImageBuffer) return; // Fill with the generated image. m_cachedImageBuffer->context()->fillRect(FloatRect(FloatPoint(), adjustedSize), *m_gradient); m_cachedGeneratorHash = generatorHash; m_cachedAdjustedSize = adjustedSize; } m_cachedImageBuffer->setSpaceSize(spaceSize()); // Tile the image buffer into the context. m_cachedImageBuffer->drawPattern(destContext, adjustedSrcRect, adjustedPatternCTM, phase, styleColorSpace, compositeOp, destRect); }
AffineTransform SVGGlyphToPathTranslator::transform() { AffineTransform glyphPathTransform; glyphPathTransform.translate(m_currentPoint.x() + m_glyphOrigin.x(), m_currentPoint.y() + m_glyphOrigin.y()); glyphPathTransform.scale(m_scale, -m_scale); return glyphPathTransform; }
void SVGResourceClipper::applyClip(GraphicsContext* context, const FloatRect& boundingBox) const { cairo_t* cr = context->platformContext(); if (m_clipData.clipData().size() < 1) return; cairo_reset_clip(cr); context->beginPath(); for (unsigned int x = 0; x < m_clipData.clipData().size(); x++) { ClipData data = m_clipData.clipData()[x]; Path path = data.path; if (path.isEmpty()) continue; path.closeSubpath(); if (data.bboxUnits) { // Make use of the clipping units AffineTransform transform; transform.translate(boundingBox.x(), boundingBox.y()); transform.scale(boundingBox.width(), boundingBox.height()); path.transform(transform); } cairo_path_t* clipPath = cairo_copy_path(path.platformPath()->m_cr); cairo_append_path(cr, clipPath); cairo_set_fill_rule(cr, data.windRule == RULE_EVENODD ? CAIRO_FILL_RULE_EVEN_ODD : CAIRO_FILL_RULE_WINDING); } cairo_clip(cr); }
void GeneratorGeneratedImage::drawPattern(GraphicsContext* destContext, const FloatRect& srcRect, const AffineTransform& patternTransform, const FloatPoint& phase, ColorSpace styleColorSpace, CompositeOperator compositeOp, const FloatRect& destRect) { // Allow the generator to provide visually-equivalent tiling parameters for better performance. IntSize adjustedSize = m_size; FloatRect adjustedSrcRect = srcRect; m_generator->adjustParametersForTiledDrawing(adjustedSize, adjustedSrcRect); // Factor in the destination context's scale to generate at the best resolution AffineTransform destContextCTM = destContext->getCTM(); double xScale = fabs(destContextCTM.xScale()); double yScale = fabs(destContextCTM.yScale()); AffineTransform adjustedPatternCTM = patternTransform; adjustedPatternCTM.scale(1.0 / xScale, 1.0 / yScale); adjustedSrcRect.scale(xScale, yScale); // Create a BitmapImage and call drawPattern on it. OwnPtr<ImageBuffer> imageBuffer = destContext->createCompatibleBuffer(adjustedSize); if (!imageBuffer) return; // Fill with the generated image. GraphicsContext* graphicsContext = imageBuffer->context(); graphicsContext->fillRect(FloatRect(FloatPoint(), adjustedSize), *m_generator.get()); // Tile the image buffer into the context. imageBuffer->drawPattern(destContext, adjustedSrcRect, adjustedPatternCTM, phase, styleColorSpace, compositeOp, destRect); }
void GeneratedImage::drawPattern(GraphicsContext* destContext, const FloatRect& srcRect, const FloatSize& scale, const FloatPoint& phase, SkXfermode::Mode compositeOp, const FloatRect& destRect, const IntSize& repeatSpacing) { FloatRect tileRect = srcRect; tileRect.expand(repeatSpacing); SkPictureBuilder builder(tileRect, nullptr, destContext); builder.context().beginRecording(tileRect); drawTile(&builder.context(), srcRect); RefPtr<const SkPicture> tilePicture = builder.endRecording(); AffineTransform patternTransform; patternTransform.translate(phase.x(), phase.y()); patternTransform.scale(scale.width(), scale.height()); patternTransform.translate(tileRect.x(), tileRect.y()); RefPtr<Pattern> picturePattern = Pattern::createPicturePattern(tilePicture); picturePattern->setPatternSpaceTransform(patternTransform); SkPaint fillPaint = destContext->fillPaint(); picturePattern->applyToPaint(fillPaint); fillPaint.setColor(SK_ColorBLACK); fillPaint.setXfermodeMode(compositeOp); destContext->drawRect(destRect, fillPaint); }
void SVGInlineTextBox::paintTextWithShadows(GraphicsContext* context, RenderStyle* style, TextRun& textRun, const SVGTextFragment& fragment, int startPosition, int endPosition) { RenderSVGInlineText* textRenderer = toRenderSVGInlineText(this->textRenderer()); ASSERT(textRenderer); float scalingFactor = textRenderer->scalingFactor(); ASSERT(scalingFactor); const Font& scaledFont = textRenderer->scaledFont(); const ShadowData* shadow = style->textShadow(); FloatPoint textOrigin(fragment.x, fragment.y); FloatSize textSize(fragment.width, fragment.height); if (scalingFactor != 1) { textOrigin.scale(scalingFactor, scalingFactor); textSize.scale(scalingFactor); } FloatRect shadowRect(FloatPoint(textOrigin.x(), textOrigin.y() - scaledFont.fontMetrics().floatAscent()), textSize); do { if (!prepareGraphicsContextForTextPainting(context, scalingFactor, textRun, style)) break; FloatSize extraOffset; if (shadow) extraOffset = applyShadowToGraphicsContext(context, shadow, shadowRect, false /* stroked */, true /* opaque */, true /* horizontal */); AffineTransform originalTransform; if (scalingFactor != 1) { originalTransform = context->getCTM(); AffineTransform newTransform = originalTransform; newTransform.scale(1 / scalingFactor); normalizeTransform(newTransform); context->setCTM(newTransform); } scaledFont.drawText(context, textRun, textOrigin + extraOffset, startPosition, endPosition); if (scalingFactor != 1) context->setCTM(originalTransform); restoreGraphicsContextAfterTextPainting(context, textRun); if (!shadow) break; if (shadow->next()) context->restore(); else context->clearShadow(); shadow = shadow->next(); } while (shadow); }
// Tests scale mode with an additional copy for transparency. This will happen // if we have a scaled textbox, for example. WebKit will create a new // transparency layer, draw the text field, then draw the text into it, then // composite this down with an opacity. TEST(TransparencyWin, ScaleTransparency) { // Create an opaque white buffer. OwnPtr<ImageBuffer> src(ImageBuffer::create(IntSize(16, 16), 1)); FloatRect fullBuffer(0, 0, 16, 16); src->context()->fillRect(fullBuffer, Color::white); // Make another layer (which duplicates how WebKit will make this). We fill // the top half with red, and have the layer be 50% opaque. src->context()->beginTransparencyLayer(0.5); FloatRect topHalf(0, 0, 16, 8); src->context()->fillRect(topHalf, Color(0xFFFF0000)); // Scale by 2x. src->context()->save(); AffineTransform scale; scale.scale(2.0); src->context()->concatCTM(scale); // Make a layer inset two pixels (because of scaling, this is 2->14). And // will it with 50% black. { TransparencyWin helper; helper.init(src->context(), TransparencyWin::OpaqueCompositeLayer, TransparencyWin::ScaleTransform, IntRect(1, 1, 6, 6)); helper.context()->fillRect(helper.drawRect(), Color(0x7f000000)); clearTopLayerAlphaChannel(helper.context()); helper.composite(); } // Finish the layer. src->context()->restore(); src->context()->endLayer(); Color redBackground(0xFFFF8080); // 50% red composited on white. EXPECT_EQ(redBackground, getPixelAt(src->context(), 0, 0)); EXPECT_EQ(redBackground, getPixelAt(src->context(), 1, 1)); // Top half (minus two pixel border) should be 50% gray atop opaque // red = 0xFF804141. Then that's composited with 50% transparency on solid // white = 0xFFC0A1A1. Color darkRed(0xFFBF8080); EXPECT_EQ(darkRed, getPixelAt(src->context(), 2, 2)); EXPECT_EQ(darkRed, getPixelAt(src->context(), 7, 7)); // Bottom half (minus a two pixel border) should be a layer with 5% gray // with another 50% opacity composited atop white. Color darkWhite(0xFFBFBFBF); EXPECT_EQ(darkWhite, getPixelAt(src->context(), 8, 8)); EXPECT_EQ(darkWhite, getPixelAt(src->context(), 13, 13)); Color white(0xFFFFFFFF); // Background in the lower-right. EXPECT_EQ(white, getPixelAt(src->context(), 14, 14)); EXPECT_EQ(white, getPixelAt(src->context(), 15, 15)); }
AffineTransform GraphicsContext::getCTM() const { // TODO: Maybe this needs to use the accumulated transform? AffineTransform matrix; BPoint origin = m_data->view()->Origin(); matrix.translate(origin.x, origin.y); matrix.scale(m_data->view()->Scale()); return matrix; }
bool SVGClipPainter::applyClippingToContext(const LayoutObject& target, const FloatRect& targetBoundingBox, const FloatRect& paintInvalidationRect, GraphicsContext* context, ClipperState& clipperState) { ASSERT(context); ASSERT(clipperState == ClipperNotApplied); ASSERT_WITH_SECURITY_IMPLICATION(!m_clip.needsLayout()); if (paintInvalidationRect.isEmpty() || m_clip.hasCycle()) return false; SVGClipExpansionCycleHelper inClipExpansionChange(m_clip); AffineTransform animatedLocalTransform = toSVGClipPathElement(m_clip.element())->calculateAnimatedLocalTransform(); // When drawing a clip for non-SVG elements, the CTM does not include the zoom factor. // In this case, we need to apply the zoom scale explicitly - but only for clips with // userSpaceOnUse units (the zoom is accounted for objectBoundingBox-resolved lengths). if (!target.isSVG() && m_clip.clipPathUnits() == SVGUnitTypes::SVG_UNIT_TYPE_USERSPACEONUSE) { ASSERT(m_clip.style()); animatedLocalTransform.scale(m_clip.style()->effectiveZoom()); } // First, try to apply the clip as a clipPath. if (m_clip.tryPathOnlyClipping(target, context, animatedLocalTransform, targetBoundingBox)) { clipperState = ClipperAppliedPath; return true; } // Fall back to masking. clipperState = ClipperAppliedMask; // Begin compositing the clip mask. CompositingRecorder::beginCompositing(*context, target, SkXfermode::kSrcOver_Mode, 1, &paintInvalidationRect); { TransformRecorder recorder(*context, target, animatedLocalTransform); // clipPath can also be clipped by another clipPath. SVGResources* resources = SVGResourcesCache::cachedResourcesForLayoutObject(&m_clip); LayoutSVGResourceClipper* clipPathClipper = resources ? resources->clipper() : 0; ClipperState clipPathClipperState = ClipperNotApplied; if (clipPathClipper && !SVGClipPainter(*clipPathClipper).applyClippingToContext(m_clip, targetBoundingBox, paintInvalidationRect, context, clipPathClipperState)) { // End the clip mask's compositor. CompositingRecorder::endCompositing(*context, target); return false; } drawClipMaskContent(context, target, targetBoundingBox); if (clipPathClipper) SVGClipPainter(*clipPathClipper).postApplyStatefulResource(m_clip, context, clipPathClipperState); } // Masked content layer start. CompositingRecorder::beginCompositing(*context, target, SkXfermode::kSrcIn_Mode, 1, &paintInvalidationRect); return true; }
static void affineTransformCompose(AffineTransform& m, const double sr[9]) { m.setA(sr[3]); m.setB(sr[4]); m.setC(sr[5]); m.setD(sr[6]); m.setE(sr[7]); m.setF(sr[8]); m.rotate(rad2deg(sr[2])); m.scale(sr[0], sr[1]); }
AffineTransform RenderSVGContainer::absoluteTransform() const { AffineTransform ctm = RenderContainer::absoluteTransform(); if (!parent()->isSVGContainer()) { SVGSVGElement* svg = static_cast<SVGSVGElement*>(element()); ctm.scale(svg->currentScale()); ctm.translate(svg->currentTranslate().x(), svg->currentTranslate().y()); } ctm.translate(viewport().x(), viewport().y()); return viewportTransform() * ctm; }
float SVGLayoutSupport::calculateScreenFontSizeScalingFactor(const LayoutObject* layoutObject) { ASSERT(layoutObject); // FIXME: trying to compute a device space transform at record time is wrong. All clients // should be updated to avoid relying on this information, and the method should be removed. AffineTransform ctm = deprecatedCalculateTransformToLayer(layoutObject) * SubtreeContentTransformScope::currentContentTransformation(); ctm.scale(layoutObject->document().frameHost()->deviceScaleFactor()); return narrowPrecisionToFloat(sqrt((pow(ctm.xScale(), 2) + pow(ctm.yScale(), 2)) / 2)); }
FloatRect FilterEffect::drawingRegionOfInputImage(const IntRect& srcRect) const { ASSERT(hasResult()); FloatSize scale; ImageBuffer::clampedSize(m_absolutePaintRect.size(), scale); AffineTransform transform; transform.scale(scale).translate(-m_absolutePaintRect.location()); return transform.mapRect(srcRect); }
AffineTransform RenderSVGContainer::getAspectRatio(const FloatRect& logical, const FloatRect& physical) const { AffineTransform temp; float logicX = logical.x(); float logicY = logical.y(); float logicWidth = logical.width(); float logicHeight = logical.height(); float physWidth = physical.width(); float physHeight = physical.height(); float vpar = logicWidth / logicHeight; float svgar = physWidth / physHeight; if (align() == ALIGN_NONE) { temp.scale(physWidth / logicWidth, physHeight / logicHeight); temp.translate(-logicX, -logicY); } else if ((vpar < svgar && !slice()) || (vpar >= svgar && slice())) { temp.scale(physHeight / logicHeight, physHeight / logicHeight); if (align() == ALIGN_XMINYMIN || align() == ALIGN_XMINYMID || align() == ALIGN_XMINYMAX) temp.translate(-logicX, -logicY); else if (align() == ALIGN_XMIDYMIN || align() == ALIGN_XMIDYMID || align() == ALIGN_XMIDYMAX) temp.translate(-logicX - (logicWidth - physWidth * logicHeight / physHeight) / 2, -logicY); else temp.translate(-logicX - (logicWidth - physWidth * logicHeight / physHeight), -logicY); } else { temp.scale(physWidth / logicWidth, physWidth / logicWidth); if (align() == ALIGN_XMINYMIN || align() == ALIGN_XMIDYMIN || align() == ALIGN_XMAXYMIN) temp.translate(-logicX, -logicY); else if (align() == ALIGN_XMINYMID || align() == ALIGN_XMIDYMID || align() == ALIGN_XMAXYMID) temp.translate(-logicX, -logicY - (logicHeight - physHeight * logicWidth / physWidth) / 2); else temp.translate(-logicX, -logicY - (logicHeight - physHeight * logicWidth / physWidth)); } return temp; }
AffineTransform SVGPreserveAspectRatio::getCTM(double logicX, double logicY, double logicWidth, double logicHeight, double /*physX*/, double /*physY*/, double physWidth, double physHeight) { AffineTransform temp; if (align() == SVG_PRESERVEASPECTRATIO_UNKNOWN) return temp; double vpar = logicWidth / logicHeight; double svgar = physWidth / physHeight; if (align() == SVG_PRESERVEASPECTRATIO_NONE) { temp.scale(physWidth / logicWidth, physHeight / logicHeight); temp.translate(-logicX, -logicY); } else if (vpar < svgar && (meetOrSlice() == SVG_MEETORSLICE_MEET) || vpar >= svgar && (meetOrSlice() == SVG_MEETORSLICE_SLICE)) { temp.scale(physHeight / logicHeight, physHeight / logicHeight); if (align() == SVG_PRESERVEASPECTRATIO_XMINYMIN || align() == SVG_PRESERVEASPECTRATIO_XMINYMID || align() == SVG_PRESERVEASPECTRATIO_XMINYMAX) temp.translate(-logicX, -logicY); else if (align() == SVG_PRESERVEASPECTRATIO_XMIDYMIN || align() == SVG_PRESERVEASPECTRATIO_XMIDYMID || align() == SVG_PRESERVEASPECTRATIO_XMIDYMAX) temp.translate(-logicX - (logicWidth - physWidth * logicHeight / physHeight) / 2, -logicY); else temp.translate(-logicX - (logicWidth - physWidth * logicHeight / physHeight), -logicY); } else { temp.scale(physWidth / logicWidth, physWidth / logicWidth); if (align() == SVG_PRESERVEASPECTRATIO_XMINYMIN || align() == SVG_PRESERVEASPECTRATIO_XMIDYMIN || align() == SVG_PRESERVEASPECTRATIO_XMAXYMIN) temp.translate(-logicX, -logicY); else if (align() == SVG_PRESERVEASPECTRATIO_XMINYMID || align() == SVG_PRESERVEASPECTRATIO_XMIDYMID || align() == SVG_PRESERVEASPECTRATIO_XMAXYMID) temp.translate(-logicX, -logicY - (logicHeight - physHeight * logicWidth / physWidth) / 2); else temp.translate(-logicX, -logicY - (logicHeight - physHeight * logicWidth / physWidth)); } return temp; }
AffineTransform SVGRootPainter::transformToPixelSnappedBorderBox( const LayoutPoint& paintOffset) const { const IntRect snappedSize = pixelSnappedSize(paintOffset); AffineTransform paintOffsetToBorderBox = AffineTransform::translation(snappedSize.x(), snappedSize.y()); LayoutSize size = m_layoutSVGRoot.size(); if (!size.isEmpty()) { paintOffsetToBorderBox.scale( snappedSize.width() / size.width().toFloat(), snappedSize.height() / size.height().toFloat()); } paintOffsetToBorderBox.multiply(m_layoutSVGRoot.localToBorderBoxTransform()); return paintOffsetToBorderBox; }
static AffineTransform toWebContentTransform(Ewk_View_Smart_Data* smartData) { AffineTransform transform; EWK_VIEW_IMPL_GET_BY_SD_OR_RETURN(smartData, impl, transform); transform.translate(-smartData->view.x, -smartData->view.y); #if USE(TILED_BACKING_STORE) IntPoint scrollPos = impl->pageViewportControllerClient()->scrollPosition(); transform.translate(scrollPos.x(), scrollPos.y()); transform.scale(1 / impl->pageViewportControllerClient()->scaleFactor()); #endif return transform; }
static v8::Handle<v8::Value> scaleCallback(const v8::Arguments& args) { INC_STATS("DOM.SVGMatrix.scale"); V8SVGPODTypeWrapper<AffineTransform>* impWrapper = V8SVGPODTypeWrapper<AffineTransform>::toNative(args.Holder()); AffineTransform impInstance = *impWrapper; AffineTransform* imp = &impInstance; EXCEPTION_BLOCK(float, scaleFactor, static_cast<float>(args[0]->NumberValue())); AffineTransform result = *imp; result.scale(scaleFactor); RefPtr<V8SVGPODTypeWrapper<AffineTransform> > wrapper = V8SVGStaticPODTypeWrapper<AffineTransform>::create(result); SVGElement* context = V8Proxy::svgContext(impWrapper); V8Proxy::setSVGContext(wrapper.get(), context); impWrapper->commitChange(impInstance, context); return toV8(wrapper.release()); }
void SVGImage::draw(SkCanvas* canvas, const SkPaint& paint, const FloatRect& dstRect, const FloatRect& srcRect, RespectImageOrientationEnum, ImageClampingMode) { if (!m_page) return; FrameView* view = frameView(); view->resize(containerSize()); // Always call processUrlFragment, even if the url is empty, because // there may have been a previous url/fragment that needs to be reset. view->processUrlFragment(m_url); SkPictureBuilder imagePicture(dstRect); { ClipRecorder clipRecorder(imagePicture.context(), *this, DisplayItem::ClipNodeImage, LayoutRect(enclosingIntRect(dstRect))); // We can only draw the entire frame, clipped to the rect we want. So compute where the top left // of the image would be if we were drawing without clipping, and translate accordingly. FloatSize scale(dstRect.width() / srcRect.width(), dstRect.height() / srcRect.height()); FloatSize topLeftOffset(srcRect.location().x() * scale.width(), srcRect.location().y() * scale.height()); FloatPoint destOffset = dstRect.location() - topLeftOffset; AffineTransform transform = AffineTransform::translation(destOffset.x(), destOffset.y()); transform.scale(scale.width(), scale.height()); TransformRecorder transformRecorder(imagePicture.context(), *this, transform); view->updateAllLifecyclePhases(); view->paint(&imagePicture.context(), enclosingIntRect(srcRect)); ASSERT(!view->needsLayout()); } { SkAutoCanvasRestore ar(canvas, false); if (drawNeedsLayer(paint)) { SkRect layerRect = dstRect; canvas->saveLayer(&layerRect, &paint); } RefPtr<const SkPicture> recording = imagePicture.endRecording(); canvas->drawPicture(recording.get()); } if (imageObserver()) imageObserver()->didDraw(this); // Start any (SMIL) animations if needed. This will restart or continue // animations if preceded by calls to resetAnimation or stopAnimation // respectively. startAnimation(); }
// Tests scale mode with no additional copy. TEST(TransparencyWin, Scale) { // Create an opaque white buffer. OwnPtr<ImageBuffer> src(ImageBuffer::create(IntSize(16, 16), 1)); FloatRect fullBuffer(0, 0, 16, 16); src->context()->fillRect(fullBuffer, Color::white); // Scale by 2x. src->context()->save(); AffineTransform scale; scale.scale(2.0); src->context()->concatCTM(scale); // Start drawing a rectangle from 1->4. This should get scaled to 2->8. { TransparencyWin helper; helper.init(src->context(), TransparencyWin::NoLayer, TransparencyWin::ScaleTransform, IntRect(1, 1, 3, 3)); // The context should now have the identity transform and the returned // rect should be scaled. EXPECT_TRUE(helper.context()->getCTM().isIdentity()); EXPECT_EQ(2, helper.drawRect().x()); EXPECT_EQ(2, helper.drawRect().y()); EXPECT_EQ(8, helper.drawRect().maxX()); EXPECT_EQ(8, helper.drawRect().maxY()); // Set the pixel at (2, 2) to be transparent. This should be fixed when // the helper goes out of scope. We don't want to call // clearTopLayerAlphaChannel because that will actually clear the whole // canvas (since we have no extra layer!). SkBitmap& bitmap = const_cast<SkBitmap&>(helper.context()->layerBitmap()); *bitmap.getAddr32(2, 2) &= 0x00FFFFFF; helper.composite(); } src->context()->restore(); // Check the pixel we previously made transparent, it should have gotten // fixed back up to white. // The current version doesn't fixup transparency when there is no layer. // This seems not to be necessary, so we don't bother, but if it becomes // necessary, this line should be uncommented. // EXPECT_EQ(Color(Color::white), getPixelAt(src->context(), 2, 2)); }
bool SVGClipPainter::prepareEffect(const LayoutObject& target, const FloatRect& targetBoundingBox, const FloatRect& paintInvalidationRect, GraphicsContext& context, ClipperState& clipperState) { ASSERT(clipperState == ClipperNotApplied); ASSERT_WITH_SECURITY_IMPLICATION(!m_clip.needsLayout()); m_clip.clearInvalidationMask(); if (paintInvalidationRect.isEmpty() || m_clip.hasCycle()) return false; SVGClipExpansionCycleHelper inClipExpansionChange(m_clip); AffineTransform animatedLocalTransform = toSVGClipPathElement(m_clip.element())->calculateAnimatedLocalTransform(); // When drawing a clip for non-SVG elements, the CTM does not include the zoom factor. // In this case, we need to apply the zoom scale explicitly - but only for clips with // userSpaceOnUse units (the zoom is accounted for objectBoundingBox-resolved lengths). if (!target.isSVG() && m_clip.clipPathUnits() == SVGUnitTypes::SVG_UNIT_TYPE_USERSPACEONUSE) { ASSERT(m_clip.style()); animatedLocalTransform.scale(m_clip.style()->effectiveZoom()); } // First, try to apply the clip as a clipPath. Path clipPath; if (m_clip.asPath(animatedLocalTransform, targetBoundingBox, clipPath)) { clipperState = ClipperAppliedPath; context.getPaintController().createAndAppend<BeginClipPathDisplayItem>(target, clipPath); return true; } // Fall back to masking. clipperState = ClipperAppliedMask; // Begin compositing the clip mask. CompositingRecorder::beginCompositing(context, target, SkXfermode::kSrcOver_Mode, 1, &paintInvalidationRect); { if (!drawClipAsMask(context, target, targetBoundingBox, paintInvalidationRect, animatedLocalTransform)) { // End the clip mask's compositor. CompositingRecorder::endCompositing(context, target); return false; } } // Masked content layer start. CompositingRecorder::beginCompositing(context, target, SkXfermode::kSrcIn_Mode, 1, &paintInvalidationRect); return true; }
Path SVGGlyphToPathTranslator::nextPath() { if (m_isVerticalText) { m_glyphOrigin.setX(m_svgGlyph.verticalOriginX * m_scale); m_glyphOrigin.setY(m_svgGlyph.verticalOriginY * m_scale); } AffineTransform glyphPathTransform; glyphPathTransform.translate(m_currentPoint.x() + m_glyphOrigin.x(), m_currentPoint.y() + m_glyphOrigin.y()); glyphPathTransform.scale(m_scale, -m_scale); Path glyphPath = m_svgGlyph.pathData; glyphPath.transform(glyphPathTransform); incrementIndex(); return glyphPath; }
static bool shouldTransformOnTextPainting(RenderObject* object, AffineTransform& resourceTransform) { ASSERT(object); // This method should only be called for RenderObjects that deal with text rendering. Cmp. RenderObject.h's is*() methods. ASSERT(object->isSVGText() || object->isSVGTextPath() || object->isSVGInline()); // In text drawing, the scaling part of the graphics context CTM is removed, compare SVGInlineTextBox::paintTextWithShadows. // So, we use that scaling factor here, too, and then push it down to pattern or gradient space // in order to keep the pattern or gradient correctly scaled. float scalingFactor = SVGRenderingContext::calculateScreenFontSizeScalingFactor(object); if (scalingFactor == 1) return false; resourceTransform.scale(scalingFactor); return true; }
TEST(TransparencyWin, DISABLED_TranslateScaleOpaqueCompositeLayer) { OwnPtr<ImageBuffer> src(ImageBuffer::create(IntSize(16, 16), 1)); // The background is white on top with red on bottom. Color white(0xFFFFFFFF); FloatRect topRect(0, 0, 16, 8); src->context()->fillRect(topRect, white); Color red(0xFFFF0000); FloatRect bottomRect(0, 8, 16, 8); src->context()->fillRect(bottomRect, red); src->context()->save(); // Translate left by one pixel. AffineTransform left; left.translate(-1, 0); // Scale by 2x. AffineTransform scale; scale.scale(2.0); src->context()->concatCTM(scale); // Then translate up by one pixel (which will actually be 2 due to scaling). AffineTransform up; up.translate(0, -1); src->context()->concatCTM(up); // Now draw 50% red square. { // Create a transparency helper inset one pixel in the buffer. The // coordinates are before transforming into this space, and maps to // IntRect(1, 1, 14, 14). TransparencyWin helper; helper.init(src->context(), TransparencyWin::OpaqueCompositeLayer, TransparencyWin::KeepTransform, IntRect(1, -15, 14, 14)); // Fill with red. helper.context()->fillRect(helper.drawRect(), Color(0x7f7f0000)); clearTopLayerAlphaChannel(helper.context()); helper.composite(); } }
std::unique_ptr<PatternData> LayoutSVGResourcePattern::buildPatternData( const LayoutObject& object) { // If we couldn't determine the pattern content element root, stop here. const PatternAttributes& attributes = this->attributes(); if (!attributes.patternContentElement()) return nullptr; // An empty viewBox disables layout. if (attributes.hasViewBox() && attributes.viewBox().isEmpty()) return nullptr; ASSERT(element()); // Compute tile metrics. FloatRect clientBoundingBox = object.objectBoundingBox(); FloatRect tileBounds = SVGLengthContext::resolveRectangle( element(), attributes.patternUnits(), clientBoundingBox, *attributes.x(), *attributes.y(), *attributes.width(), *attributes.height()); if (tileBounds.isEmpty()) return nullptr; AffineTransform tileTransform; if (attributes.hasViewBox()) { if (attributes.viewBox().isEmpty()) return nullptr; tileTransform = SVGFitToViewBox::viewBoxToViewTransform( attributes.viewBox(), attributes.preserveAspectRatio(), tileBounds.width(), tileBounds.height()); } else { // A viewbox overrides patternContentUnits, per spec. if (attributes.patternContentUnits() == SVGUnitTypes::kSvgUnitTypeObjectboundingbox) tileTransform.scale(clientBoundingBox.width(), clientBoundingBox.height()); } std::unique_ptr<PatternData> patternData = wrapUnique(new PatternData); patternData->pattern = Pattern::createPicturePattern(asPicture(tileBounds, tileTransform)); // Compute pattern space transformation. patternData->transform.translate(tileBounds.x(), tileBounds.y()); patternData->transform.preMultiply(attributes.patternTransform()); return patternData; }