// EXTEND_PAD won't help us here; we have to create a temporary surface to hold // the subimage of pixels we're allowed to sample. static already_AddRefed<gfxDrawable> CreateSamplingRestrictedDrawable(gfxDrawable* aDrawable, gfxContext* aContext, const ImageRegion& aRegion, const SurfaceFormat aFormat) { PROFILER_LABEL("gfxUtils", "CreateSamplingRestricedDrawable", js::ProfileEntry::Category::GRAPHICS); gfxRect clipExtents = aContext->GetClipExtents(); // Inflate by one pixel because bilinear filtering will sample at most // one pixel beyond the computed image pixel coordinate. clipExtents.Inflate(1.0); gfxRect needed = aRegion.IntersectAndRestrict(clipExtents); needed.RoundOut(); // if 'needed' is empty, nothing will be drawn since aFill // must be entirely outside the clip region, so it doesn't // matter what we do here, but we should avoid trying to // create a zero-size surface. if (needed.IsEmpty()) return nullptr; gfxIntSize size(int32_t(needed.Width()), int32_t(needed.Height())); RefPtr<DrawTarget> target = gfxPlatform::GetPlatform()->CreateOffscreenContentDrawTarget(ToIntSize(size), aFormat); if (!target) { return nullptr; } nsRefPtr<gfxContext> tmpCtx = new gfxContext(target); tmpCtx->SetOperator(OptimalFillOperator()); aDrawable->Draw(tmpCtx, needed - needed.TopLeft(), true, GraphicsFilter::FILTER_FAST, 1.0, gfxMatrix().Translate(needed.TopLeft())); RefPtr<SourceSurface> surface = target->Snapshot(); nsRefPtr<gfxDrawable> drawable = new gfxSurfaceDrawable(surface, size, gfxMatrix().Translate(-needed.TopLeft())); return drawable.forget(); }
nsresult nsFilterInstance::BuildSourcePaint(SourceInfo *aSource, DrawTarget* aTargetDT) { MOZ_ASSERT(mTargetFrame); nsIntRect neededRect = aSource->mNeededBounds; RefPtr<DrawTarget> offscreenDT = gfxPlatform::GetPlatform()->CreateOffscreenContentDrawTarget( neededRect.Size(), SurfaceFormat::B8G8R8A8); if (!offscreenDT) { return NS_ERROR_OUT_OF_MEMORY; } gfxMatrix deviceToFilterSpace = GetFilterSpaceToDeviceSpaceTransform(); if (!deviceToFilterSpace.Invert()) { return NS_ERROR_FAILURE; } if (!mPaintTransform.IsSingular()) { RefPtr<gfxContext> gfx = new gfxContext(offscreenDT); gfx->Save(); gfx->Multiply(mPaintTransform * deviceToFilterSpace * gfxMatrix::Translation(-neededRect.TopLeft())); GeneralPattern pattern; if (aSource == &mFillPaint) { nsSVGUtils::MakeFillPatternFor(mTargetFrame, gfx, &pattern); } else if (aSource == &mStrokePaint) { nsSVGUtils::MakeStrokePatternFor(mTargetFrame, gfx, &pattern); } if (pattern.GetPattern()) { offscreenDT->FillRect(ToRect(FilterSpaceToUserSpace(ThebesRect(neededRect))), pattern); } gfx->Restore(); } aSource->mSourceSurface = offscreenDT->Snapshot(); aSource->mSurfaceRect = neededRect; return NS_OK; }
already_AddRefed<gfx::SourceSurface> BasicPlanarYCbCrImage::GetAsSourceSurface() { NS_ASSERTION(NS_IsMainThread(), "Must be main thread"); if (mSourceSurface) { RefPtr<gfx::SourceSurface> surface(mSourceSurface); return surface.forget(); } if (!mDecodedBuffer) { return PlanarYCbCrImage::GetAsSourceSurface(); } gfxImageFormat format = GetOffscreenFormat(); RefPtr<gfx::SourceSurface> surface; { // Create a DrawTarget so that we can own the data inside mDecodeBuffer. // We create the target out of mDecodedBuffer, and get a snapshot from it. // The draw target is destroyed on scope exit and the surface owns the data. RefPtr<gfx::DrawTarget> drawTarget = gfxPlatform::GetPlatform()->CreateDrawTargetForData(mDecodedBuffer, mSize, mStride, gfx::ImageFormatToSurfaceFormat(format)); if (!drawTarget) { return nullptr; } surface = drawTarget->Snapshot(); } mRecycleBin->RecycleBuffer(mDecodedBuffer.forget(), mSize.height * mStride); mSourceSurface = surface; return surface.forget(); }
already_AddRefed<gfxSurfaceDrawable> gfxCallbackDrawable::MakeSurfaceDrawable(const SamplingFilter aSamplingFilter) { SurfaceFormat format = gfxPlatform::GetPlatform()->Optimal2DFormatForContent(gfxContentType::COLOR_ALPHA); RefPtr<DrawTarget> dt = gfxPlatform::GetPlatform()->CreateOffscreenContentDrawTarget(mSize, format); if (!dt || !dt->IsValid()) return nullptr; RefPtr<gfxContext> ctx = gfxContext::CreateOrNull(dt); MOZ_ASSERT(ctx); // already checked for target above Draw(ctx, gfxRect(0, 0, mSize.width, mSize.height), ExtendMode::CLAMP, aSamplingFilter); RefPtr<SourceSurface> surface = dt->Snapshot(); if (surface) { RefPtr<gfxSurfaceDrawable> drawable = new gfxSurfaceDrawable(surface, mSize); return drawable.forget(); } return nullptr; }
VectorImage::GetFrameAtSize(const IntSize& aSize, uint32_t aWhichFrame, uint32_t aFlags) { MOZ_ASSERT(aWhichFrame <= FRAME_MAX_VALUE); if (aSize.IsEmpty()) { return nullptr; } if (aWhichFrame > FRAME_MAX_VALUE) { return nullptr; } if (mError || !mIsFullyLoaded) { return nullptr; } // Make our surface the size of what will ultimately be drawn to it. // (either the full image size, or the restricted region) RefPtr<DrawTarget> dt = gfxPlatform::GetPlatform()-> CreateOffscreenContentDrawTarget(aSize, SurfaceFormat::B8G8R8A8); if (!dt || !dt->IsValid()) { NS_ERROR("Could not create a DrawTarget"); return nullptr; } RefPtr<gfxContext> context = gfxContext::CreateOrNull(dt); MOZ_ASSERT(context); // already checked the draw target above auto result = Draw(context, aSize, ImageRegion::Create(aSize), aWhichFrame, SamplingFilter::POINT, Nothing(), aFlags, 1.0); return result == DrawResult::SUCCESS ? dt->Snapshot() : nullptr; }
already_AddRefed<SourceSurface> ImageBitmap::PrepareForDrawTarget(gfx::DrawTarget* aTarget) { MOZ_ASSERT(aTarget); if (!mSurface) { mSurface = mData->GetAsSourceSurface(); } if (!mSurface) { return nullptr; } RefPtr<DrawTarget> target = aTarget; IntRect surfRect(0, 0, mSurface->GetSize().width, mSurface->GetSize().height); // Check if we still need to crop our surface if (!mPictureRect.IsEqualEdges(surfRect)) { IntRect surfPortion = surfRect.Intersect(mPictureRect); // the crop lies entirely outside the surface area, nothing to draw if (surfPortion.IsEmpty()) { mSurface = nullptr; RefPtr<gfx::SourceSurface> surface(mSurface); return surface.forget(); } IntPoint dest(std::max(0, surfPortion.X() - mPictureRect.X()), std::max(0, surfPortion.Y() - mPictureRect.Y())); // Do not initialize this target with mPictureRect.Size(). // In the Windows8 D2D1 backend, it might trigger "partial upload" from a // non-SourceSurfaceD2D1 surface to a D2D1Image in the following // CopySurface() step. However, the "partial upload" only supports uploading // a rectangle starts from the upper-left point, which means it cannot // upload an arbitrary part of the source surface and this causes problems // if the mPictureRect is not starts from the upper-left point. target = target->CreateSimilarDrawTarget(mSurface->GetSize(), target->GetFormat()); if (!target) { mSurface = nullptr; RefPtr<gfx::SourceSurface> surface(mSurface); return surface.forget(); } // Make mCropRect match new surface we've cropped to mPictureRect.MoveTo(0, 0); target->CopySurface(mSurface, surfPortion, dest); mSurface = target->Snapshot(); } // Replace our surface with one optimized for the target we're about to draw // to, under the assumption it'll likely be drawn again to that target. // This call should be a no-op for already-optimized surfaces mSurface = target->OptimizeSourceSurface(mSurface); RefPtr<gfx::SourceSurface> surface(mSurface); return surface.forget(); }
nsresult nsBaseDragService::DrawDragForImage(nsPresContext* aPresContext, nsIImageLoadingContent* aImageLoader, HTMLCanvasElement* aCanvas, LayoutDeviceIntRect* aScreenDragRect, RefPtr<SourceSurface>* aSurface) { nsCOMPtr<imgIContainer> imgContainer; if (aImageLoader) { nsCOMPtr<imgIRequest> imgRequest; nsresult rv = aImageLoader->GetRequest(nsIImageLoadingContent::CURRENT_REQUEST, getter_AddRefs(imgRequest)); NS_ENSURE_SUCCESS(rv, rv); if (!imgRequest) return NS_ERROR_NOT_AVAILABLE; rv = imgRequest->GetImage(getter_AddRefs(imgContainer)); NS_ENSURE_SUCCESS(rv, rv); if (!imgContainer) return NS_ERROR_NOT_AVAILABLE; // use the size of the image as the size of the drag image int32_t imageWidth, imageHeight; rv = imgContainer->GetWidth(&imageWidth); NS_ENSURE_SUCCESS(rv, rv); rv = imgContainer->GetHeight(&imageHeight); NS_ENSURE_SUCCESS(rv, rv); aScreenDragRect->SizeTo(aPresContext->CSSPixelsToDevPixels(imageWidth), aPresContext->CSSPixelsToDevPixels(imageHeight)); } else { // XXX The canvas size should be converted to dev pixels. NS_ASSERTION(aCanvas, "both image and canvas are null"); nsIntSize sz = aCanvas->GetSize(); aScreenDragRect->SizeTo(sz.width, sz.height); } nsIntSize destSize; destSize.width = aScreenDragRect->Width(); destSize.height = aScreenDragRect->Height(); if (destSize.width == 0 || destSize.height == 0) return NS_ERROR_FAILURE; nsresult result = NS_OK; if (aImageLoader) { RefPtr<DrawTarget> dt = gfxPlatform::GetPlatform()-> CreateOffscreenContentDrawTarget(destSize, SurfaceFormat::B8G8R8A8); if (!dt || !dt->IsValid()) return NS_ERROR_FAILURE; RefPtr<gfxContext> ctx = gfxContext::CreateOrNull(dt); if (!ctx) return NS_ERROR_FAILURE; ImgDrawResult res = imgContainer->Draw(ctx, destSize, ImageRegion::Create(destSize), imgIContainer::FRAME_CURRENT, SamplingFilter::GOOD, /* no SVGImageContext */ Nothing(), imgIContainer::FLAG_SYNC_DECODE, 1.0); if (res == ImgDrawResult::BAD_IMAGE || res == ImgDrawResult::BAD_ARGS) { return NS_ERROR_FAILURE; } *aSurface = dt->Snapshot(); } else { *aSurface = aCanvas->GetSurfaceSnapshot(); } return result; }
already_AddRefed<SourceSurface> ImageBitmap::PrepareForDrawTarget(gfx::DrawTarget* aTarget) { MOZ_ASSERT(aTarget); if (!mData) { return nullptr; } if (!mSurface) { mSurface = mData->GetAsSourceSurface(); } if (!mSurface) { return nullptr; } RefPtr<DrawTarget> target = aTarget; IntRect surfRect(0, 0, mSurface->GetSize().width, mSurface->GetSize().height); // Check if we still need to crop our surface if (!mPictureRect.IsEqualEdges(surfRect)) { IntRect surfPortion = surfRect.Intersect(mPictureRect); // the crop lies entirely outside the surface area, nothing to draw if (surfPortion.IsEmpty()) { mSurface = nullptr; RefPtr<gfx::SourceSurface> surface(mSurface); return surface.forget(); } IntPoint dest(std::max(0, surfPortion.X() - mPictureRect.X()), std::max(0, surfPortion.Y() - mPictureRect.Y())); // We must initialize this target with mPictureRect.Size() because the // specification states that if the cropping area is given, then return an // ImageBitmap with the size equals to the cropping area. target = target->CreateSimilarDrawTarget(mPictureRect.Size(), target->GetFormat()); if (!target) { mSurface = nullptr; RefPtr<gfx::SourceSurface> surface(mSurface); return surface.forget(); } // We need to fall back to generic copying and cropping for the Windows8.1, // D2D1 backend. // In the Windows8.1 D2D1 backend, it might trigger "partial upload" from a // non-SourceSurfaceD2D1 surface to a D2D1Image in the following // CopySurface() step. However, the "partial upload" only supports uploading // a rectangle starts from the upper-left point, which means it cannot // upload an arbitrary part of the source surface and this causes problems // if the mPictureRect is not starts from the upper-left point. if (target->GetBackendType() == BackendType::DIRECT2D1_1 && mSurface->GetType() != SurfaceType::D2D1_1_IMAGE) { RefPtr<DataSourceSurface> dataSurface = mSurface->GetDataSurface(); if (NS_WARN_IF(!dataSurface)) { mSurface = nullptr; RefPtr<gfx::SourceSurface> surface(mSurface); return surface.forget(); } mSurface = CropAndCopyDataSourceSurface(dataSurface, mPictureRect); } else { target->CopySurface(mSurface, surfPortion, dest); mSurface = target->Snapshot(); } // Make mCropRect match new surface we've cropped to mPictureRect.MoveTo(0, 0); } // Replace our surface with one optimized for the target we're about to draw // to, under the assumption it'll likely be drawn again to that target. // This call should be a no-op for already-optimized surfaces mSurface = target->OptimizeSourceSurface(mSurface); RefPtr<gfx::SourceSurface> surface(mSurface); return surface.forget(); }
nsresult nsBaseDragService::DrawDragForImage(nsIImageLoadingContent* aImageLoader, HTMLCanvasElement* aCanvas, int32_t aScreenX, int32_t aScreenY, nsIntRect* aScreenDragRect, RefPtr<SourceSurface>* aSurface) { nsCOMPtr<imgIContainer> imgContainer; if (aImageLoader) { nsCOMPtr<imgIRequest> imgRequest; nsresult rv = aImageLoader->GetRequest(nsIImageLoadingContent::CURRENT_REQUEST, getter_AddRefs(imgRequest)); NS_ENSURE_SUCCESS(rv, rv); if (!imgRequest) return NS_ERROR_NOT_AVAILABLE; rv = imgRequest->GetImage(getter_AddRefs(imgContainer)); NS_ENSURE_SUCCESS(rv, rv); if (!imgContainer) return NS_ERROR_NOT_AVAILABLE; // use the size of the image as the size of the drag image imgContainer->GetWidth(&aScreenDragRect->width); imgContainer->GetHeight(&aScreenDragRect->height); } else { NS_ASSERTION(aCanvas, "both image and canvas are null"); nsIntSize sz = aCanvas->GetSize(); aScreenDragRect->width = sz.width; aScreenDragRect->height = sz.height; } nsIntSize srcSize = aScreenDragRect->Size(); nsIntSize destSize = srcSize; if (destSize.width == 0 || destSize.height == 0) return NS_ERROR_FAILURE; nsresult result = NS_OK; if (aImageLoader) { RefPtr<DrawTarget> dt = gfxPlatform::GetPlatform()-> CreateOffscreenContentDrawTarget(destSize, SurfaceFormat::B8G8R8A8); if (!dt || !dt->IsValid()) return NS_ERROR_FAILURE; RefPtr<gfxContext> ctx = gfxContext::ForDrawTarget(dt); if (!ctx) return NS_ERROR_FAILURE; DrawResult res = imgContainer->Draw(ctx, destSize, ImageRegion::Create(destSize), imgIContainer::FRAME_CURRENT, Filter::GOOD, /* no SVGImageContext */ Nothing(), imgIContainer::FLAG_SYNC_DECODE); if (res == DrawResult::BAD_IMAGE || res == DrawResult::BAD_ARGS) { return NS_ERROR_FAILURE; } *aSurface = dt->Snapshot(); } else { *aSurface = aCanvas->GetSurfaceSnapshot(); } return result; }
void nsSVGIntegrationUtils::PaintFramesWithEffects(const PaintFramesParams& aParams) { #ifdef DEBUG NS_ASSERTION(!(aParams.frame->GetStateBits() & NS_FRAME_SVG_LAYOUT) || (NS_SVGDisplayListPaintingEnabled() && !(aParams.frame->GetStateBits() & NS_FRAME_IS_NONDISPLAY)), "Should not use nsSVGIntegrationUtils on this SVG frame"); #endif /* SVG defines the following rendering model: * * 1. Render geometry * 2. Apply filter * 3. Apply clipping, masking, group opacity * * We follow this, but perform a couple of optimizations: * * + Use cairo's clipPath when representable natively (single object * clip region). * * + Merge opacity and masking if both used together. */ nsIFrame* frame = aParams.frame; const nsIContent* content = frame->GetContent(); bool hasSVGLayout = (frame->GetStateBits() & NS_FRAME_SVG_LAYOUT); if (hasSVGLayout) { nsISVGChildFrame *svgChildFrame = do_QueryFrame(frame); if (!svgChildFrame || !frame->GetContent()->IsSVGElement()) { NS_ASSERTION(false, "why?"); return; } if (!static_cast<const nsSVGElement*>(content)->HasValidDimensions()) { return; // The SVG spec says not to draw _anything_ } } float opacity = frame->StyleEffects()->mOpacity; if (opacity == 0.0f) { return; } if (opacity != 1.0f && (nsSVGUtils::CanOptimizeOpacity(frame) || aParams.callerPaintsOpacity)) { opacity = 1.0f; } MOZ_ASSERT(!nsSVGUtils::CanOptimizeOpacity(frame) || !aParams.callerPaintsOpacity, "How can we be optimizing the opacity into the svg as well as having the caller paint it?"); /* Properties are added lazily and may have been removed by a restyle, so make sure all applicable ones are set again. */ nsIFrame* firstFrame = nsLayoutUtils::FirstContinuationOrIBSplitSibling(frame); nsSVGEffects::EffectProperties effectProperties = nsSVGEffects::GetEffectProperties(firstFrame); bool isOK = effectProperties.HasNoFilterOrHasValidFilter(); nsSVGClipPathFrame *clipPathFrame = effectProperties.GetClipPathFrame(&isOK); bool isTrivialClip = clipPathFrame ? clipPathFrame->IsTrivial() : true; gfxContext& context = aParams.ctx; DrawTarget* drawTarget = context.GetDrawTarget(); gfxContextMatrixAutoSaveRestore matrixAutoSaveRestore(&context); nsPoint firstFrameOffset = GetOffsetToBoundingBox(firstFrame); nsPoint offsetToBoundingBox = aParams.builder->ToReferenceFrame(firstFrame) - firstFrameOffset; if (!firstFrame->IsFrameOfType(nsIFrame::eSVG)) { /* Snap the offset if the reference frame is not a SVG frame, * since other frames will be snapped to pixel when rendering. */ offsetToBoundingBox = nsPoint( frame->PresContext()->RoundAppUnitsToNearestDevPixels(offsetToBoundingBox.x), frame->PresContext()->RoundAppUnitsToNearestDevPixels(offsetToBoundingBox.y)); } // After applying only "offsetToBoundingBox", aCtx would have its origin at // the top left corner of frame's bounding box (over all continuations). // However, SVG painting needs the origin to be located at the origin of the // SVG frame's "user space", i.e. the space in which, for example, the // frame's BBox lives. // SVG geometry frames and foreignObject frames apply their own offsets, so // their position is relative to their user space. So for these frame types, // if we want aCtx to be in user space, we first need to subtract the // frame's position so that SVG painting can later add it again and the // frame is painted in the right place. gfxPoint toUserSpaceGfx = nsSVGUtils::FrameSpaceInCSSPxToUserSpaceOffset(frame); nsPoint toUserSpace(nsPresContext::CSSPixelsToAppUnits(float(toUserSpaceGfx.x)), nsPresContext::CSSPixelsToAppUnits(float(toUserSpaceGfx.y))); nsPoint offsetToUserSpace = offsetToBoundingBox - toUserSpace; NS_ASSERTION(hasSVGLayout || offsetToBoundingBox == offsetToUserSpace, "For non-SVG frames there shouldn't be any additional offset"); gfxPoint devPixelOffsetToUserSpace = nsLayoutUtils::PointToGfxPoint(offsetToUserSpace, frame->PresContext()->AppUnitsPerDevPixel()); context.SetMatrix(context.CurrentMatrix().Translate(devPixelOffsetToUserSpace)); gfxMatrix cssPxToDevPxMatrix = GetCSSPxToDevPxMatrix(frame); const nsStyleSVGReset *svgReset = firstFrame->StyleSVGReset(); nsTArray<nsSVGMaskFrame *> maskFrames = effectProperties.GetMaskFrames(); // For a HTML doc: // According to css-masking spec, always create a mask surface when we // have any item in maskFrame even if all of those items are // non-resolvable <mask-sources> or <images>, we still need to create a // transparent black mask layer under this condition. // For a SVG doc: // SVG 1.1 say that if we fail to resolve a mask, we should draw the // object unmasked. nsIDocument* currentDoc = frame->PresContext()->Document(); bool shouldGenerateMaskLayer = currentDoc->IsSVGDocument() ? maskFrames.Length() == 1 && maskFrames[0] : maskFrames.Length() > 0; // These are used if we require a temporary surface for a custom blend mode. RefPtr<gfxContext> target = &aParams.ctx; IntPoint targetOffset; bool complexEffects = false; /* Check if we need to do additional operations on this child's * rendering, which necessitates rendering into another surface. */ if (opacity != 1.0f || (clipPathFrame && !isTrivialClip) || frame->StyleEffects()->mMixBlendMode != NS_STYLE_BLEND_NORMAL || shouldGenerateMaskLayer) { complexEffects = true; context.Save(); nsRect clipRect = frame->GetVisualOverflowRectRelativeToSelf() + toUserSpace; context.Clip(NSRectToSnappedRect(clipRect, frame->PresContext()->AppUnitsPerDevPixel(), *drawTarget)); Matrix maskTransform; RefPtr<SourceSurface> maskSurface; if (shouldGenerateMaskLayer) { GenerateMaskSurface(aParams, opacity, firstFrame->StyleContext(), maskFrames, offsetToUserSpace, maskTransform, maskSurface); } if (shouldGenerateMaskLayer && !maskSurface) { // Entire surface is clipped out. context.Restore(); return; } if (frame->StyleEffects()->mMixBlendMode != NS_STYLE_BLEND_NORMAL) { // Create a temporary context to draw to so we can blend it back with // another operator. gfxRect clipRect; { gfxContextMatrixAutoSaveRestore matRestore(&context); context.SetMatrix(gfxMatrix()); clipRect = context.GetClipExtents(); } IntRect drawRect = RoundedOut(ToRect(clipRect)); RefPtr<DrawTarget> targetDT = context.GetDrawTarget()->CreateSimilarDrawTarget(drawRect.Size(), SurfaceFormat::B8G8R8A8); if (!targetDT || !targetDT->IsValid()) { context.Restore(); return; } target = gfxContext::CreateOrNull(targetDT); MOZ_ASSERT(target); // already checked the draw target above target->SetMatrix(context.CurrentMatrix() * gfxMatrix::Translation(-drawRect.TopLeft())); targetOffset = drawRect.TopLeft(); } if (clipPathFrame && !isTrivialClip) { Matrix clippedMaskTransform; RefPtr<SourceSurface> clipMaskSurface = clipPathFrame->GetClipMask(context, frame, cssPxToDevPxMatrix, &clippedMaskTransform, maskSurface, maskTransform); if (clipMaskSurface) { maskSurface = clipMaskSurface; maskTransform = clippedMaskTransform; } } if (opacity != 1.0f || shouldGenerateMaskLayer || (clipPathFrame && !isTrivialClip)) { target->PushGroupForBlendBack(gfxContentType::COLOR_ALPHA, opacity, maskSurface, maskTransform); } } /* If this frame has only a trivial clipPath, set up cairo's clipping now so * we can just do normal painting and get it clipped appropriately. */ if (clipPathFrame && isTrivialClip) { context.Save(); clipPathFrame->ApplyClipPath(context, frame, cssPxToDevPxMatrix); } else if (!clipPathFrame && svgReset->HasClipPath()) { context.Save(); nsCSSClipPathInstance::ApplyBasicShapeClip(context, frame); } /* Paint the child */ if (effectProperties.HasValidFilter()) { RegularFramePaintCallback callback(aParams.builder, aParams.layerManager, offsetToUserSpace); nsRegion dirtyRegion = aParams.dirtyRect - offsetToBoundingBox; gfxMatrix tm = nsSVGIntegrationUtils::GetCSSPxToDevPxMatrix(frame); nsFilterInstance::PaintFilteredFrame(frame, target->GetDrawTarget(), tm, &callback, &dirtyRegion); } else { target->SetMatrix(matrixAutoSaveRestore.Matrix()); BasicLayerManager* basic = static_cast<BasicLayerManager*>(aParams.layerManager); RefPtr<gfxContext> oldCtx = basic->GetTarget(); basic->SetTarget(target); aParams.layerManager->EndTransaction(FrameLayerBuilder::DrawPaintedLayer, aParams.builder); basic->SetTarget(oldCtx); } if ((clipPathFrame && isTrivialClip) || (!clipPathFrame && svgReset->HasClipPath())) { context.Restore(); } /* No more effects, we're done. */ if (!complexEffects) { return; } if (opacity != 1.0f || shouldGenerateMaskLayer || (clipPathFrame && !isTrivialClip)) { target->PopGroupAndBlend(); } if (frame->StyleEffects()->mMixBlendMode != NS_STYLE_BLEND_NORMAL) { RefPtr<DrawTarget> targetDT = target->GetDrawTarget(); target = nullptr; RefPtr<SourceSurface> targetSurf = targetDT->Snapshot(); context.SetMatrix(gfxMatrix()); // This will be restored right after. RefPtr<gfxPattern> pattern = new gfxPattern(targetSurf, Matrix::Translation(targetOffset.x, targetOffset.y)); context.SetPattern(pattern); context.Paint(); } context.Restore(); }
void nsSVGIntegrationUtils::PaintMask(const PaintFramesParams& aParams) { nsSVGUtils::MaskUsage maskUsage; nsSVGUtils::DetermineMaskUsage(aParams.frame, aParams.handleOpacity, maskUsage); nsIFrame* frame = aParams.frame; if (!ValidateSVGFrame(frame)) { return; } gfxContext& ctx = aParams.ctx; nsIFrame* firstFrame = nsLayoutUtils::FirstContinuationOrIBSplitSibling(frame); SVGObserverUtils::EffectProperties effectProperties = SVGObserverUtils::GetEffectProperties(firstFrame); RefPtr<DrawTarget> maskTarget = ctx.GetDrawTarget(); if (maskUsage.shouldGenerateMaskLayer && (maskUsage.shouldGenerateClipMaskLayer || maskUsage.shouldApplyClipPath)) { // We will paint both mask of positioned mask and clip-path into // maskTarget. // // Create one extra draw target for drawing positioned mask, so that we do // not have to copy the content of maskTarget before painting // clip-path into it. maskTarget = maskTarget->CreateSimilarDrawTarget(maskTarget->GetSize(), SurfaceFormat::A8); } nsTArray<nsSVGMaskFrame *> maskFrames = effectProperties.GetMaskFrames(); AutoPopGroup autoPop; bool shouldPushOpacity = (maskUsage.opacity != 1.0) && (maskFrames.Length() != 1); if (shouldPushOpacity) { ctx.PushGroupForBlendBack(gfxContentType::COLOR_ALPHA, maskUsage.opacity); autoPop.SetContext(&ctx); } gfxContextMatrixAutoSaveRestore matSR; // Paint clip-path-basic-shape onto ctx gfxContextAutoSaveRestore basicShapeSR; if (maskUsage.shouldApplyBasicShape) { matSR.SetContext(&ctx); MoveContextOriginToUserSpace(firstFrame, aParams); basicShapeSR.SetContext(&ctx); nsCSSClipPathInstance::ApplyBasicShapeClip(ctx, frame); if (!maskUsage.shouldGenerateMaskLayer) { // Only have basic-shape clip-path effect. Fill clipped region by // opaque white. ctx.SetColor(Color(1.0, 1.0, 1.0, 1.0)); ctx.Fill(); return; } } // Paint mask onto ctx. if (maskUsage.shouldGenerateMaskLayer) { matSR.Restore(); matSR.SetContext(&ctx); EffectOffsets offsets = MoveContextOriginToUserSpace(frame, aParams); PaintMaskSurface(aParams, maskTarget, shouldPushOpacity ? 1.0 : maskUsage.opacity, firstFrame->StyleContext(), maskFrames, ctx.CurrentMatrix(), offsets.offsetToUserSpace); } // Paint clip-path onto ctx. if (maskUsage.shouldGenerateClipMaskLayer || maskUsage.shouldApplyClipPath) { matSR.Restore(); matSR.SetContext(&ctx); MoveContextOriginToUserSpace(firstFrame, aParams); Matrix clipMaskTransform; gfxMatrix cssPxToDevPxMatrix = nsSVGUtils::GetCSSPxToDevPxMatrix(frame); nsSVGClipPathFrame *clipPathFrame = effectProperties.GetClipPathFrame(); RefPtr<SourceSurface> maskSurface = maskUsage.shouldGenerateMaskLayer ? maskTarget->Snapshot() : nullptr; clipPathFrame->PaintClipMask(ctx, frame, cssPxToDevPxMatrix, &clipMaskTransform, maskSurface, ctx.CurrentMatrix()); } }
static MaskPaintResult CreateAndPaintMaskSurface(const PaintFramesParams& aParams, float aOpacity, nsStyleContext* aSC, const nsTArray<nsSVGMaskFrame*>& aMaskFrames, const nsPoint& aOffsetToUserSpace) { const nsStyleSVGReset *svgReset = aSC->StyleSVGReset(); MOZ_ASSERT(aMaskFrames.Length() > 0); MaskPaintResult paintResult; gfxContext& ctx = aParams.ctx; // Optimization for single SVG mask. if (((aMaskFrames.Length() == 1) && aMaskFrames[0])) { gfxMatrix cssPxToDevPxMatrix = nsSVGUtils::GetCSSPxToDevPxMatrix(aParams.frame); paintResult.opacityApplied = true; nsSVGMaskFrame::MaskParams params(&ctx, aParams.frame, cssPxToDevPxMatrix, aOpacity, &paintResult.maskTransform, svgReset->mMask.mLayers[0].mMaskMode, aParams.imgParams); paintResult.maskSurface = aMaskFrames[0]->GetMaskForMaskedFrame(params); if (!paintResult.maskSurface) { paintResult.transparentBlackMask = true; } return paintResult; } const IntRect& maskSurfaceRect = aParams.maskRect; if (maskSurfaceRect.IsEmpty()) { paintResult.transparentBlackMask = true; return paintResult; } RefPtr<DrawTarget> maskDT = ctx.GetDrawTarget()->CreateSimilarDrawTarget(maskSurfaceRect.Size(), SurfaceFormat::A8); if (!maskDT || !maskDT->IsValid()) { return paintResult; } // We can paint mask along with opacity only if // 1. There is only one mask, or // 2. No overlap among masks. // Collision detect in #2 is not that trivial, we only accept #1 here. paintResult.opacityApplied = (aMaskFrames.Length() == 1); // Set context's matrix on maskContext, offset by the maskSurfaceRect's // position. This makes sure that we combine the masks in device space. Matrix maskSurfaceMatrix = ctx.CurrentMatrix() * Matrix::Translation(-aParams.maskRect.TopLeft()); PaintMaskSurface(aParams, maskDT, paintResult.opacityApplied ? aOpacity : 1.0, aSC, aMaskFrames, maskSurfaceMatrix, aOffsetToUserSpace); if (aParams.imgParams.result != ImgDrawResult::SUCCESS) { // Now we know the status of mask resource since we used it while painting. // According to the return value of PaintMaskSurface, we know whether mask // resource is resolvable or not. // // For a HTML doc: // According to css-masking spec, always create a mask surface when // we have any item in maskFrame even if all of those items are // non-resolvable <mask-sources> or <images>. // Set paintResult.transparentBlackMask as true, the caller should stop // painting masked content as if this mask is a transparent black one. // For a SVG doc: // SVG 1.1 say that if we fail to resolve a mask, we should draw the // object unmasked. // Left patinResult.maskSurface empty, the caller should paint all // masked content as if this mask is an opaque white one(no mask). paintResult.transparentBlackMask = !(aParams.frame->GetStateBits() & NS_FRAME_SVG_LAYOUT); MOZ_ASSERT(!paintResult.maskSurface); return paintResult; } paintResult.maskTransform = maskSurfaceMatrix; if (!paintResult.maskTransform.Invert()) { return paintResult; } paintResult.maskSurface = maskDT->Snapshot(); return paintResult; }
void MediaEngineTabVideoSource::Draw() { IntSize size(mBufW, mBufH); nsresult rv; float scale = 1.0; nsCOMPtr<nsPIDOMWindow> win = do_QueryInterface(mWindow); if (!win) { return; } // take a screenshot, as wide as possible, proportional to the destination size nsCOMPtr<nsIDOMWindowUtils> utils = do_GetInterface(win); if (!utils) { return; } nsCOMPtr<nsIDOMClientRect> rect; rv = utils->GetRootBounds(getter_AddRefs(rect)); NS_ENSURE_SUCCESS_VOID(rv); if (!rect) { return; } float left, top, width, height; rect->GetLeft(&left); rect->GetTop(&top); rect->GetWidth(&width); rect->GetHeight(&height); if (width == 0 || height == 0) { return; } int32_t srcX = left; int32_t srcY = top; int32_t srcW; int32_t srcH; float aspectRatio = ((float) size.width) / size.height; if (width / aspectRatio < height) { srcW = width; srcH = width / aspectRatio; } else { srcW = height * aspectRatio; srcH = height; } nsRefPtr<nsPresContext> presContext; nsIDocShell* docshell = win->GetDocShell(); if (docshell) { docshell->GetPresContext(getter_AddRefs(presContext)); } if (!presContext) { return; } nscolor bgColor = NS_RGB(255, 255, 255); nsCOMPtr<nsIPresShell> presShell = presContext->PresShell(); uint32_t renderDocFlags = (nsIPresShell::RENDER_IGNORE_VIEWPORT_SCROLLING | nsIPresShell::RENDER_DOCUMENT_RELATIVE); nsRect r(nsPresContext::CSSPixelsToAppUnits(srcX / scale), nsPresContext::CSSPixelsToAppUnits(srcY / scale), nsPresContext::CSSPixelsToAppUnits(srcW / scale), nsPresContext::CSSPixelsToAppUnits(srcH / scale)); gfxImageFormat format = gfxImageFormat::RGB24; uint32_t stride = gfxASurface::FormatStrideForWidth(format, size.width); nsRefPtr<layers::ImageContainer> container = layers::LayerManager::CreateImageContainer(); RefPtr<DrawTarget> dt = Factory::CreateDrawTargetForData(BackendType::CAIRO, mData.rwget(), size, stride, SurfaceFormat::B8G8R8X8); if (!dt) { return; } nsRefPtr<gfxContext> context = new gfxContext(dt); gfxPoint pt(0, 0); context->Translate(pt); context->Scale(scale * size.width / srcW, scale * size.height / srcH); rv = presShell->RenderDocument(r, renderDocFlags, bgColor, context); NS_ENSURE_SUCCESS_VOID(rv); RefPtr<SourceSurface> surface = dt->Snapshot(); if (!surface) { return; } layers::CairoImage::Data cairoData; cairoData.mSize = size; cairoData.mSourceSurface = surface; nsRefPtr<layers::CairoImage> image = new layers::CairoImage(); image->SetData(cairoData); MonitorAutoLock mon(mMonitor); mImage = image; }
nsresult imgFrame::InitWithDrawable(gfxDrawable* aDrawable, const nsIntSize& aSize, const SurfaceFormat aFormat, SamplingFilter aSamplingFilter, uint32_t aImageFlags) { // Assert for properties that should be verified by decoders, // warn for properties related to bad content. if (!AllowedImageSize(aSize.width, aSize.height)) { NS_WARNING("Should have legal image size"); mAborted = true; return NS_ERROR_FAILURE; } mImageSize = aSize; mFrameRect = IntRect(IntPoint(0, 0), aSize); mFormat = aFormat; mPaletteDepth = 0; RefPtr<DrawTarget> target; bool canUseDataSurface = gfxPlatform::GetPlatform()->CanRenderContentToDataSurface(); if (canUseDataSurface) { // It's safe to use data surfaces for content on this platform, so we can // get away with using volatile buffers. MOZ_ASSERT(!mImageSurface, "Called imgFrame::InitWithDrawable() twice?"); mVBuf = AllocateBufferForImage(mFrameRect.Size(), mFormat); if (!mVBuf) { mAborted = true; return NS_ERROR_OUT_OF_MEMORY; } int32_t stride = VolatileSurfaceStride(mFrameRect.Size(), mFormat); VolatileBufferPtr<uint8_t> ptr(mVBuf); if (!ptr) { mAborted = true; return NS_ERROR_OUT_OF_MEMORY; } mImageSurface = CreateLockedSurface(mVBuf, mFrameRect.Size(), mFormat); if (!mImageSurface) { NS_WARNING("Failed to create ImageSurface"); mAborted = true; return NS_ERROR_OUT_OF_MEMORY; } if (!ClearSurface(mVBuf, mFrameRect.Size(), mFormat)) { NS_WARNING("Could not clear allocated buffer"); mAborted = true; return NS_ERROR_OUT_OF_MEMORY; } target = gfxPlatform::GetPlatform()-> CreateDrawTargetForData(ptr, mFrameRect.Size(), stride, mFormat); } else { // We can't use data surfaces for content, so we'll create an offscreen // surface instead. This means if someone later calls RawAccessRef(), we // may have to do an expensive readback, but we warned callers about that in // the documentation for this method. MOZ_ASSERT(!mOptSurface, "Called imgFrame::InitWithDrawable() twice?"); target = gfxPlatform::GetPlatform()-> CreateOffscreenContentDrawTarget(mFrameRect.Size(), mFormat); } if (!target || !target->IsValid()) { mAborted = true; return NS_ERROR_OUT_OF_MEMORY; } // Draw using the drawable the caller provided. RefPtr<gfxContext> ctx = gfxContext::CreateOrNull(target); MOZ_ASSERT(ctx); // Already checked the draw target above. gfxUtils::DrawPixelSnapped(ctx, aDrawable, mFrameRect.Size(), ImageRegion::Create(ThebesRect(mFrameRect)), mFormat, aSamplingFilter, aImageFlags); if (canUseDataSurface && !mImageSurface) { NS_WARNING("Failed to create VolatileDataSourceSurface"); mAborted = true; return NS_ERROR_OUT_OF_MEMORY; } if (!canUseDataSurface) { // We used an offscreen surface, which is an "optimized" surface from // imgFrame's perspective. mOptSurface = target->Snapshot(); } // If we reach this point, we should regard ourselves as complete. mDecoded = GetRect(); mFinished = true; #ifdef DEBUG MonitorAutoLock lock(mMonitor); MOZ_ASSERT(AreAllPixelsWritten()); #endif return NS_OK; }
static void GenerateMaskSurface(const nsSVGIntegrationUtils::PaintFramesParams& aParams, float aOpacity, nsStyleContext* aSC, nsSVGEffects::EffectProperties& aEffectProperties, const gfxPoint& aOffest, Matrix& aOutMaskTransform, RefPtr<SourceSurface>& aOutMaskSurface) { const nsStyleSVGReset *svgReset = aSC->StyleSVGReset(); MOZ_ASSERT(HasMaskToDraw(svgReset, aEffectProperties)); nsTArray<nsSVGMaskFrame *> svgMaskFrames = aEffectProperties.GetMaskFrames(); MOZ_ASSERT(svgMaskFrames.Length() == svgReset->mMask.mImageCount); gfxMatrix cssPxToDevPxMatrix = nsSVGIntegrationUtils::GetCSSPxToDevPxMatrix(aParams.frame); gfxContext& ctx = aParams.ctx; // There is only one mask. And that mask is a SVG mask. if ((svgMaskFrames.Length() == 1) && svgMaskFrames[0]) { aOutMaskSurface = svgMaskFrames[0]->GetMaskForMaskedFrame(&ctx, aParams.frame, cssPxToDevPxMatrix, aOpacity, &aOutMaskTransform, svgReset->mMask.mLayers[0].mMaskMode); return; } ctx.Save(); ctx.SetMatrix(gfxMatrix()); gfxRect clipExtents = ctx.GetClipExtents(); IntRect maskSurfaceRect = RoundedOut(ToRect(clipExtents)); ctx.Restore(); // Mask composition result on CoreGraphic::A8 surface is not correct // when mask-mode is not add(source over). Switch to skia when CG backend // detected. RefPtr<DrawTarget> maskDT = (ctx.GetDrawTarget()->GetBackendType() == BackendType::COREGRAPHICS) ? Factory::CreateDrawTarget(BackendType::SKIA, maskSurfaceRect.Size(), SurfaceFormat::A8) : ctx.GetDrawTarget()->CreateSimilarDrawTarget(maskSurfaceRect.Size(), SurfaceFormat::A8); RefPtr<gfxContext> maskContext = gfxContext::ForDrawTarget(maskDT); // Set ctx's matrix on maskContext, offset by the maskSurfaceRect's position. // This makes sure that we combine the masks in device space. gfxMatrix maskSurfaceMatrix = ctx.CurrentMatrix() * gfxMatrix::Translation(-maskSurfaceRect.TopLeft()); maskContext->SetMatrix(maskSurfaceMatrix); // Multiple SVG masks interleave with image mask. Paint each layer onto maskDT // one at a time. for (int i = svgMaskFrames.Length() - 1; i >= 0 ; i--) { nsSVGMaskFrame *maskFrame = svgMaskFrames[i]; CompositionOp compositionOp = (i == int(svgMaskFrames.Length() - 1)) ? CompositionOp::OP_OVER : nsCSSRendering::GetGFXCompositeMode(svgReset->mMask.mLayers[i].mComposite); // maskFrame != nullptr means we get a SVG mask. // maskFrame == nullptr means we get an image mask. if (maskFrame) { Matrix svgMaskMatrix; RefPtr<SourceSurface> svgMask = maskFrame->GetMaskForMaskedFrame(maskContext, aParams.frame, cssPxToDevPxMatrix, aOpacity, &svgMaskMatrix, svgReset->mMask.mLayers[i].mMaskMode); if (svgMask) { gfxContextMatrixAutoSaveRestore matRestore(maskContext); maskContext->Multiply(ThebesMatrix(svgMaskMatrix)); Rect drawRect = IntRectToRect(IntRect(IntPoint(0, 0), svgMask->GetSize())); maskDT->DrawSurface(svgMask, drawRect, drawRect, DrawSurfaceOptions(), DrawOptions(1.0f, compositionOp)); } } else { gfxContextMatrixAutoSaveRestore matRestore(maskContext); maskContext->Multiply(gfxMatrix::Translation(-aOffest)); nsRenderingContext rc(maskContext); nsCSSRendering::PaintBGParams params = nsCSSRendering::PaintBGParams::ForSingleLayer(*aParams.frame->PresContext(), rc, aParams.dirtyRect, aParams.borderArea, aParams.frame, aParams.builder->GetBackgroundPaintFlags() | nsCSSRendering::PAINTBG_MASK_IMAGE, i, compositionOp); // FIXME We should use the return value, see bug 1258510. Unused << nsCSSRendering::PaintBackgroundWithSC(params, aSC, *aParams.frame->StyleBorder()); } } aOutMaskTransform = ToMatrix(maskSurfaceMatrix); if (!aOutMaskTransform.Invert()) { return; } aOutMaskSurface = maskDT->Snapshot(); }
void BasicCompositor::DrawQuad(const gfx::Rect& aRect, const gfx::Rect& aClipRect, const EffectChain &aEffectChain, gfx::Float aOpacity, const gfx::Matrix4x4& aTransform, const gfx::Rect& aVisibleRect) { RefPtr<DrawTarget> buffer = mRenderTarget->mDrawTarget; // For 2D drawing, |dest| and |buffer| are the same surface. For 3D drawing, // |dest| is a temporary surface. RefPtr<DrawTarget> dest = buffer; buffer->PushClipRect(aClipRect); AutoRestoreTransform autoRestoreTransform(dest); Matrix newTransform; Rect transformBounds; gfx3DMatrix new3DTransform; IntPoint offset = mRenderTarget->GetOrigin(); if (aTransform.Is2D()) { newTransform = aTransform.As2D(); } else { // Create a temporary surface for the transform. dest = gfxPlatform::GetPlatform()->CreateOffscreenContentDrawTarget(RoundOut(aRect).Size(), SurfaceFormat::B8G8R8A8); if (!dest) { return; } dest->SetTransform(Matrix::Translation(-aRect.x, -aRect.y)); // Get the bounds post-transform. new3DTransform = To3DMatrix(aTransform); gfxRect bounds = new3DTransform.TransformBounds(ThebesRect(aRect)); bounds.IntersectRect(bounds, gfxRect(offset.x, offset.y, buffer->GetSize().width, buffer->GetSize().height)); transformBounds = ToRect(bounds); transformBounds.RoundOut(); // Propagate the coordinate offset to our 2D draw target. newTransform = Matrix::Translation(transformBounds.x, transformBounds.y); // When we apply the 3D transformation, we do it against a temporary // surface, so undo the coordinate offset. new3DTransform = gfx3DMatrix::Translation(aRect.x, aRect.y, 0) * new3DTransform; } newTransform.PostTranslate(-offset.x, -offset.y); buffer->SetTransform(newTransform); RefPtr<SourceSurface> sourceMask; Matrix maskTransform; if (aEffectChain.mSecondaryEffects[EffectTypes::MASK]) { EffectMask *effectMask = static_cast<EffectMask*>(aEffectChain.mSecondaryEffects[EffectTypes::MASK].get()); sourceMask = effectMask->mMaskTexture->AsSourceBasic()->GetSurface(dest); MOZ_ASSERT(effectMask->mMaskTransform.Is2D(), "How did we end up with a 3D transform here?!"); MOZ_ASSERT(!effectMask->mIs3D); maskTransform = effectMask->mMaskTransform.As2D(); maskTransform.PreTranslate(-offset.x, -offset.y); } switch (aEffectChain.mPrimaryEffect->mType) { case EffectTypes::SOLID_COLOR: { EffectSolidColor* effectSolidColor = static_cast<EffectSolidColor*>(aEffectChain.mPrimaryEffect.get()); FillRectWithMask(dest, aRect, effectSolidColor->mColor, DrawOptions(aOpacity), sourceMask, &maskTransform); break; } case EffectTypes::RGB: { TexturedEffect* texturedEffect = static_cast<TexturedEffect*>(aEffectChain.mPrimaryEffect.get()); TextureSourceBasic* source = texturedEffect->mTexture->AsSourceBasic(); if (texturedEffect->mPremultiplied) { DrawSurfaceWithTextureCoords(dest, aRect, source->GetSurface(dest), texturedEffect->mTextureCoords, texturedEffect->mFilter, aOpacity, sourceMask, &maskTransform); } else { RefPtr<DataSourceSurface> srcData = source->GetSurface(dest)->GetDataSurface(); // Yes, we re-create the premultiplied data every time. // This might be better with a cache, eventually. RefPtr<DataSourceSurface> premultData = gfxUtils::CreatePremultipliedDataSurface(srcData); DrawSurfaceWithTextureCoords(dest, aRect, premultData, texturedEffect->mTextureCoords, texturedEffect->mFilter, aOpacity, sourceMask, &maskTransform); } break; } case EffectTypes::YCBCR: { NS_RUNTIMEABORT("Can't (easily) support component alpha with BasicCompositor!"); break; } case EffectTypes::RENDER_TARGET: { EffectRenderTarget* effectRenderTarget = static_cast<EffectRenderTarget*>(aEffectChain.mPrimaryEffect.get()); RefPtr<BasicCompositingRenderTarget> surface = static_cast<BasicCompositingRenderTarget*>(effectRenderTarget->mRenderTarget.get()); RefPtr<SourceSurface> sourceSurf = surface->mDrawTarget->Snapshot(); DrawSurfaceWithTextureCoords(dest, aRect, sourceSurf, effectRenderTarget->mTextureCoords, effectRenderTarget->mFilter, aOpacity, sourceMask, &maskTransform); break; } case EffectTypes::COMPONENT_ALPHA: { NS_RUNTIMEABORT("Can't (easily) support component alpha with BasicCompositor!"); break; } default: { NS_RUNTIMEABORT("Invalid effect type!"); break; } } if (!aTransform.Is2D()) { dest->Flush(); RefPtr<SourceSurface> snapshot = dest->Snapshot(); RefPtr<DataSourceSurface> source = snapshot->GetDataSurface(); RefPtr<DataSourceSurface> temp = Factory::CreateDataSourceSurface(RoundOut(transformBounds).Size(), SurfaceFormat::B8G8R8A8 #ifdef MOZ_ENABLE_SKIA , true #endif ); if (NS_WARN_IF(!temp)) { buffer->PopClip(); return; } Transform(temp, source, new3DTransform, transformBounds.TopLeft()); transformBounds.MoveTo(0, 0); buffer->DrawSurface(temp, transformBounds, transformBounds); } buffer->PopClip(); }
void MediaEngineTabVideoSource::Draw() { if (!mWindow) { return; } if (mScrollWithPage || mViewportWidth == INT32_MAX) { mWindow->GetInnerWidth(&mViewportWidth); } if (mScrollWithPage || mViewportHeight == INT32_MAX) { mWindow->GetInnerHeight(&mViewportHeight); } if (!mViewportWidth || !mViewportHeight) { return; } IntSize size; { float pixelRatio; mWindow->GetDevicePixelRatio(&pixelRatio); const int32_t deviceWidth = (int32_t)(pixelRatio * mViewportWidth); const int32_t deviceHeight = (int32_t)(pixelRatio * mViewportHeight); if ((deviceWidth <= mBufWidthMax) && (deviceHeight <= mBufHeightMax)) { size = IntSize(deviceWidth, deviceHeight); } else { const float scaleWidth = (float)mBufWidthMax / (float)deviceWidth; const float scaleHeight = (float)mBufHeightMax / (float)deviceHeight; const float scale = scaleWidth < scaleHeight ? scaleWidth : scaleHeight; size = IntSize((int)(scale * deviceWidth), (int)(scale * deviceHeight)); } } gfxImageFormat format = SurfaceFormat::X8R8G8B8_UINT32; uint32_t stride = gfxASurface::FormatStrideForWidth(format, size.width); if (mDataSize < static_cast<size_t>(stride * size.height)) { mDataSize = stride * size.height; mData = static_cast<unsigned char*>(malloc(mDataSize)); } if (!mData) { return; } nsCOMPtr<nsIPresShell> presShell; { RefPtr<nsPresContext> presContext; nsIDocShell* docshell = mWindow->GetDocShell(); if (docshell) { docshell->GetPresContext(getter_AddRefs(presContext)); } if (!presContext) { return; } presShell = presContext->PresShell(); } nscolor bgColor = NS_RGB(255, 255, 255); uint32_t renderDocFlags = mScrollWithPage? 0 : (nsIPresShell::RENDER_IGNORE_VIEWPORT_SCROLLING | nsIPresShell::RENDER_DOCUMENT_RELATIVE); nsRect r(nsPresContext::CSSPixelsToAppUnits((float)mViewportOffsetX), nsPresContext::CSSPixelsToAppUnits((float)mViewportOffsetY), nsPresContext::CSSPixelsToAppUnits((float)mViewportWidth), nsPresContext::CSSPixelsToAppUnits((float)mViewportHeight)); RefPtr<layers::ImageContainer> container = layers::LayerManager::CreateImageContainer(); RefPtr<DrawTarget> dt = Factory::CreateDrawTargetForData(BackendType::CAIRO, mData.rwget(), size, stride, SurfaceFormat::B8G8R8X8); if (!dt) { return; } RefPtr<gfxContext> context = new gfxContext(dt); context->SetMatrix(context->CurrentMatrix().Scale((((float) size.width)/mViewportWidth), (((float) size.height)/mViewportHeight))); NS_ENSURE_SUCCESS_VOID(presShell->RenderDocument(r, renderDocFlags, bgColor, context)); RefPtr<SourceSurface> surface = dt->Snapshot(); if (!surface) { return; } RefPtr<layers::SourceSurfaceImage> image = new layers::SourceSurfaceImage(size, surface); MonitorAutoLock mon(mMonitor); mImage = image; }
void nsSVGIntegrationUtils::PaintFramesWithEffects(gfxContext& aContext, nsIFrame* aFrame, const nsRect& aDirtyRect, const nsRect& aBorderArea, nsDisplayListBuilder* aBuilder, LayerManager *aLayerManager) { #ifdef DEBUG NS_ASSERTION(!(aFrame->GetStateBits() & NS_FRAME_SVG_LAYOUT) || (NS_SVGDisplayListPaintingEnabled() && !(aFrame->GetStateBits() & NS_FRAME_IS_NONDISPLAY)), "Should not use nsSVGIntegrationUtils on this SVG frame"); #endif /* SVG defines the following rendering model: * * 1. Render geometry * 2. Apply filter * 3. Apply clipping, masking, group opacity * * We follow this, but perform a couple of optimizations: * * + Use cairo's clipPath when representable natively (single object * clip region). * * + Merge opacity and masking if both used together. */ const nsIContent* content = aFrame->GetContent(); bool hasSVGLayout = (aFrame->GetStateBits() & NS_FRAME_SVG_LAYOUT); if (hasSVGLayout) { nsISVGChildFrame *svgChildFrame = do_QueryFrame(aFrame); if (!svgChildFrame || !aFrame->GetContent()->IsSVGElement()) { NS_ASSERTION(false, "why?"); return; } if (!static_cast<const nsSVGElement*>(content)->HasValidDimensions()) { return; // The SVG spec says not to draw _anything_ } } float opacity = aFrame->StyleDisplay()->mOpacity; if (opacity == 0.0f) { return; } if (opacity != 1.0f && hasSVGLayout && nsSVGUtils::CanOptimizeOpacity(aFrame)) { opacity = 1.0f; } /* Properties are added lazily and may have been removed by a restyle, so make sure all applicable ones are set again. */ nsIFrame* firstFrame = nsLayoutUtils::FirstContinuationOrIBSplitSibling(aFrame); nsSVGEffects::EffectProperties effectProperties = nsSVGEffects::GetEffectProperties(firstFrame); bool isOK = effectProperties.HasNoFilterOrHasValidFilter(); nsSVGClipPathFrame *clipPathFrame = effectProperties.GetClipPathFrame(&isOK); bool isTrivialClip = clipPathFrame ? clipPathFrame->IsTrivial() : true; DrawTarget* drawTarget = aContext.GetDrawTarget(); gfxContextMatrixAutoSaveRestore matrixAutoSaveRestore(&aContext); nsPoint firstFrameOffset = GetOffsetToBoundingBox(firstFrame); nsPoint offsetToBoundingBox = aBuilder->ToReferenceFrame(firstFrame) - firstFrameOffset; if (!firstFrame->IsFrameOfType(nsIFrame::eSVG)) { /* Snap the offset if the reference frame is not a SVG frame, * since other frames will be snapped to pixel when rendering. */ offsetToBoundingBox = nsPoint( aFrame->PresContext()->RoundAppUnitsToNearestDevPixels(offsetToBoundingBox.x), aFrame->PresContext()->RoundAppUnitsToNearestDevPixels(offsetToBoundingBox.y)); } // After applying only "offsetToBoundingBox", aCtx would have its origin at // the top left corner of aFrame's bounding box (over all continuations). // However, SVG painting needs the origin to be located at the origin of the // SVG frame's "user space", i.e. the space in which, for example, the // frame's BBox lives. // SVG geometry frames and foreignObject frames apply their own offsets, so // their position is relative to their user space. So for these frame types, // if we want aCtx to be in user space, we first need to subtract the // frame's position so that SVG painting can later add it again and the // frame is painted in the right place. gfxPoint toUserSpaceGfx = nsSVGUtils::FrameSpaceInCSSPxToUserSpaceOffset(aFrame); nsPoint toUserSpace(nsPresContext::CSSPixelsToAppUnits(float(toUserSpaceGfx.x)), nsPresContext::CSSPixelsToAppUnits(float(toUserSpaceGfx.y))); nsPoint offsetToUserSpace = offsetToBoundingBox - toUserSpace; NS_ASSERTION(hasSVGLayout || offsetToBoundingBox == offsetToUserSpace, "For non-SVG frames there shouldn't be any additional offset"); gfxPoint devPixelOffsetToUserSpace = nsLayoutUtils::PointToGfxPoint(offsetToUserSpace, aFrame->PresContext()->AppUnitsPerDevPixel()); aContext.SetMatrix(aContext.CurrentMatrix().Translate(devPixelOffsetToUserSpace)); gfxMatrix cssPxToDevPxMatrix = GetCSSPxToDevPxMatrix(aFrame); const nsStyleSVGReset *svgReset = firstFrame->StyleSVGReset(); // Keep moving forward even if svgMaskFrame is nullptr or isOK is false. // This source is not a svg mask, but it still can be a correct mask image. nsSVGMaskFrame *svgMaskFrame = effectProperties.GetMaskFrame(&isOK); bool complexEffects = false; bool hasValidLayers = svgReset->mMask.HasLayerWithImage(); // These are used if we require a temporary surface for a custom blend mode. RefPtr<gfxContext> target = &aContext; IntPoint targetOffset; /* Check if we need to do additional operations on this child's * rendering, which necessitates rendering into another surface. */ if (opacity != 1.0f || (clipPathFrame && !isTrivialClip) || aFrame->StyleDisplay()->mMixBlendMode != NS_STYLE_BLEND_NORMAL || svgMaskFrame || hasValidLayers) { complexEffects = true; aContext.Save(); nsRect clipRect = aFrame->GetVisualOverflowRectRelativeToSelf() + toUserSpace; aContext.Clip(NSRectToSnappedRect(clipRect, aFrame->PresContext()->AppUnitsPerDevPixel(), *drawTarget)); Matrix maskTransform; RefPtr<SourceSurface> maskSurface; if (svgMaskFrame) { maskSurface = svgMaskFrame->GetMaskForMaskedFrame(&aContext, aFrame, cssPxToDevPxMatrix, opacity, &maskTransform); } else if (hasValidLayers) { gfxRect clipRect = aContext.GetClipExtents(); { gfxContextMatrixAutoSaveRestore matRestore(&aContext); aContext.SetMatrix(gfxMatrix()); clipRect = aContext.GetClipExtents(); } IntRect drawRect = RoundedOut(ToRect(clipRect)); RefPtr<DrawTarget> targetDT = aContext.GetDrawTarget()->CreateSimilarDrawTarget(drawRect.Size(), SurfaceFormat::A8); if (!targetDT) { aContext.Restore(); return; } RefPtr<gfxContext> target = new gfxContext(targetDT); target->SetMatrix(matrixAutoSaveRestore.Matrix() * gfxMatrix::Translation(-drawRect.TopLeft())); // Generate mask surface. uint32_t flags = aBuilder->GetBackgroundPaintFlags() | nsCSSRendering::PAINTBG_MASK_IMAGE; nsRenderingContext rc(target); nsCSSRendering::PaintBackgroundWithSC(aFrame->PresContext(), rc, aFrame, aDirtyRect, aBorderArea, firstFrame->StyleContext(), *aFrame->StyleBorder(), flags, nullptr, -1); maskSurface = targetDT->Snapshot(); // Compute mask transform. Matrix mat = ToMatrix(aContext.CurrentMatrix()); mat.Invert(); maskTransform = Matrix::Translation(drawRect.x, drawRect.y) * mat; } if ((svgMaskFrame || hasValidLayers) && !maskSurface) { // Entire surface is clipped out. aContext.Restore(); return; } if (aFrame->StyleDisplay()->mMixBlendMode != NS_STYLE_BLEND_NORMAL) { // Create a temporary context to draw to so we can blend it back with // another operator. gfxRect clipRect; { gfxContextMatrixAutoSaveRestore matRestore(&aContext); aContext.SetMatrix(gfxMatrix()); clipRect = aContext.GetClipExtents(); } IntRect drawRect = RoundedOut(ToRect(clipRect)); RefPtr<DrawTarget> targetDT = aContext.GetDrawTarget()->CreateSimilarDrawTarget(drawRect.Size(), SurfaceFormat::B8G8R8A8); if (!targetDT) { aContext.Restore(); return; } target = new gfxContext(targetDT); target->SetMatrix(aContext.CurrentMatrix() * gfxMatrix::Translation(-drawRect.TopLeft())); targetOffset = drawRect.TopLeft(); } if (clipPathFrame && !isTrivialClip) { Matrix clippedMaskTransform; RefPtr<SourceSurface> clipMaskSurface = clipPathFrame->GetClipMask(aContext, aFrame, cssPxToDevPxMatrix, &clippedMaskTransform, maskSurface, maskTransform); if (clipMaskSurface) { maskSurface = clipMaskSurface; maskTransform = clippedMaskTransform; } } if (opacity != 1.0f || svgMaskFrame || hasValidLayers || (clipPathFrame && !isTrivialClip)) { target->PushGroupForBlendBack(gfxContentType::COLOR_ALPHA, opacity, maskSurface, maskTransform); } } /* If this frame has only a trivial clipPath, set up cairo's clipping now so * we can just do normal painting and get it clipped appropriately. */ if (clipPathFrame && isTrivialClip) { aContext.Save(); clipPathFrame->ApplyClipPath(aContext, aFrame, cssPxToDevPxMatrix); } /* Paint the child */ if (effectProperties.HasValidFilter()) { RegularFramePaintCallback callback(aBuilder, aLayerManager, offsetToUserSpace); nsRegion dirtyRegion = aDirtyRect - offsetToBoundingBox; gfxMatrix tm = nsSVGIntegrationUtils::GetCSSPxToDevPxMatrix(aFrame); nsFilterInstance::PaintFilteredFrame(aFrame, target->GetDrawTarget(), tm, &callback, &dirtyRegion); } else { target->SetMatrix(matrixAutoSaveRestore.Matrix()); BasicLayerManager* basic = static_cast<BasicLayerManager*>(aLayerManager); RefPtr<gfxContext> oldCtx = basic->GetTarget(); basic->SetTarget(target); aLayerManager->EndTransaction(FrameLayerBuilder::DrawPaintedLayer, aBuilder); basic->SetTarget(oldCtx); } if (clipPathFrame && isTrivialClip) { aContext.Restore(); } /* No more effects, we're done. */ if (!complexEffects) { return; } if (opacity != 1.0f || svgMaskFrame || hasValidLayers || (clipPathFrame && !isTrivialClip)) { target->PopGroupAndBlend(); } if (aFrame->StyleDisplay()->mMixBlendMode != NS_STYLE_BLEND_NORMAL) { RefPtr<DrawTarget> targetDT = target->GetDrawTarget(); target = nullptr; RefPtr<SourceSurface> targetSurf = targetDT->Snapshot(); aContext.SetMatrix(gfxMatrix()); // This will be restored right after. RefPtr<gfxPattern> pattern = new gfxPattern(targetSurf, Matrix::Translation(targetOffset.x, targetOffset.y)); aContext.SetPattern(pattern); aContext.Paint(); } aContext.Restore(); }
already_AddRefed<SourceSurface> nsSVGPatternFrame::PaintPattern(const DrawTarget* aDrawTarget, Matrix* patternMatrix, const Matrix &aContextMatrix, nsIFrame *aSource, nsStyleSVGPaint nsStyleSVG::*aFillOrStroke, float aGraphicOpacity, const gfxRect *aOverrideBounds, imgDrawingParams& aImgParams) { /* * General approach: * Set the content geometry stuff * Calculate our bbox (using x,y,width,height & patternUnits & * patternTransform) * Create the surface * Calculate the content transformation matrix * Get our children (we may need to get them from another Pattern) * Call SVGPaint on all of our children * Return */ nsSVGPatternFrame* patternWithChildren = GetPatternWithChildren(); if (!patternWithChildren) { // Either no kids or a bad reference return nullptr; } nsIFrame* firstKid = patternWithChildren->mFrames.FirstChild(); const nsSVGViewBox& viewBox = GetViewBox(); uint16_t patternContentUnits = GetEnumValue(SVGPatternElement::PATTERNCONTENTUNITS); uint16_t patternUnits = GetEnumValue(SVGPatternElement::PATTERNUNITS); /* * Get the content geometry information. This is a little tricky -- * our parent is probably a <defs>, but we are rendering in the context * of some geometry source. Our content geometry information needs to * come from our rendering parent as opposed to our content parent. We * get that information from aSource, which is passed to us from the * backend renderer. * * There are three "geometries" that we need: * 1) The bounding box for the pattern. We use this to get the * width and height for the surface, and as the return to * GetBBox. * 2) The transformation matrix for the pattern. This is not *quite* * the same as the canvas transformation matrix that we will * provide to our rendering children since we "fudge" it a little * to get the renderer to handle the translations correctly for us. * 3) The CTM that we return to our children who make up the pattern. */ // Get all of the information we need from our "caller" -- i.e. // the geometry that is being rendered with a pattern gfxRect callerBBox; if (NS_FAILED(GetTargetGeometry(&callerBBox, viewBox, patternContentUnits, patternUnits, aSource, aContextMatrix, aOverrideBounds))) { return nullptr; } // Construct the CTM that we will provide to our children when we // render them into the tile. gfxMatrix ctm = ConstructCTM(viewBox, patternContentUnits, patternUnits, callerBBox, aContextMatrix, aSource); if (ctm.IsSingular()) { return nullptr; } if (patternWithChildren->mCTM) { *patternWithChildren->mCTM = ctm; } else { patternWithChildren->mCTM = new gfxMatrix(ctm); } // Get the bounding box of the pattern. This will be used to determine // the size of the surface, and will also be used to define the bounding // box for the pattern tile. gfxRect bbox = GetPatternRect(patternUnits, callerBBox, aContextMatrix, aSource); if (bbox.Width() <= 0.0 || bbox.Height() <= 0.0) { return nullptr; } // Get the pattern transform Matrix patternTransform = ToMatrix(GetPatternTransform()); // revert the vector effect transform so that the pattern appears unchanged if (aFillOrStroke == &nsStyleSVG::mStroke) { gfxMatrix userToOuterSVG; if (nsSVGUtils::GetNonScalingStrokeTransform(aSource, &userToOuterSVG)) { patternTransform *= ToMatrix(userToOuterSVG); if (patternTransform.IsSingular()) { NS_WARNING("Singular matrix painting non-scaling-stroke"); return nullptr; } } } // Get the transformation matrix that we will hand to the renderer's pattern // routine. *patternMatrix = GetPatternMatrix(patternUnits, patternTransform, bbox, callerBBox, aContextMatrix); if (patternMatrix->IsSingular()) { return nullptr; } // Now that we have all of the necessary geometries, we can // create our surface. gfxRect transformedBBox = ThebesRect(patternTransform.TransformBounds(ToRect(bbox))); bool resultOverflows; IntSize surfaceSize = nsSVGUtils::ConvertToSurfaceSize( transformedBBox.Size(), &resultOverflows); // 0 disables rendering, < 0 is an error if (surfaceSize.width <= 0 || surfaceSize.height <= 0) { return nullptr; } gfxFloat patternWidth = bbox.Width(); gfxFloat patternHeight = bbox.Height(); if (resultOverflows || patternWidth != surfaceSize.width || patternHeight != surfaceSize.height) { // scale drawing to pattern surface size gfxMatrix tempTM = gfxMatrix(surfaceSize.width / patternWidth, 0.0, 0.0, surfaceSize.height / patternHeight, 0.0, 0.0); patternWithChildren->mCTM->PreMultiply(tempTM); // and rescale pattern to compensate patternMatrix->PreScale(patternWidth / surfaceSize.width, patternHeight / surfaceSize.height); } RefPtr<DrawTarget> dt = aDrawTarget->CreateSimilarDrawTarget(surfaceSize, SurfaceFormat::B8G8R8A8); if (!dt || !dt->IsValid()) { return nullptr; } dt->ClearRect(Rect(0, 0, surfaceSize.width, surfaceSize.height)); RefPtr<gfxContext> ctx = gfxContext::CreateOrNull(dt); MOZ_ASSERT(ctx); // already checked the draw target above if (aGraphicOpacity != 1.0f) { ctx->Save(); ctx->PushGroupForBlendBack(gfxContentType::COLOR_ALPHA, aGraphicOpacity); } // OK, now render -- note that we use "firstKid", which // we got at the beginning because it takes care of the // referenced pattern situation for us if (aSource->IsFrameOfType(nsIFrame::eSVGGeometry)) { // Set the geometrical parent of the pattern we are rendering patternWithChildren->mSource = static_cast<SVGGeometryFrame*>(aSource); } // Delay checking NS_FRAME_DRAWING_AS_PAINTSERVER bit until here so we can // give back a clear surface if there's a loop if (!(patternWithChildren->GetStateBits() & NS_FRAME_DRAWING_AS_PAINTSERVER)) { AutoSetRestorePaintServerState paintServer(patternWithChildren); for (nsIFrame* kid = firstKid; kid; kid = kid->GetNextSibling()) { // The CTM of each frame referencing us can be different nsSVGDisplayableFrame* SVGFrame = do_QueryFrame(kid); if (SVGFrame) { SVGFrame->NotifySVGChanged(nsSVGDisplayableFrame::TRANSFORM_CHANGED); } gfxMatrix tm = *(patternWithChildren->mCTM); if (kid->GetContent()->IsSVGElement()) { tm = static_cast<nsSVGElement*>(kid->GetContent())-> PrependLocalTransformsTo(tm, eUserSpaceToParent); } nsSVGUtils::PaintFrameWithEffects(kid, *ctx, tm, aImgParams); } } patternWithChildren->mSource = nullptr; if (aGraphicOpacity != 1.0f) { ctx->PopGroupAndBlend(); ctx->Restore(); } // caller now owns the surface return dt->Snapshot(); }
nsresult imgFrame::InitWithDrawable(gfxDrawable* aDrawable, const nsIntSize& aSize, const SurfaceFormat aFormat, GraphicsFilter aFilter, uint32_t aImageFlags) { // Assert for properties that should be verified by decoders, // warn for properties related to bad content. if (!AllowedImageSize(aSize.width, aSize.height)) { NS_WARNING("Should have legal image size"); mAborted = true; return NS_ERROR_FAILURE; } mImageSize = aSize; mOffset.MoveTo(0, 0); mSize.SizeTo(aSize.width, aSize.height); mFormat = aFormat; mPaletteDepth = 0; RefPtr<DrawTarget> target; bool canUseDataSurface = gfxPlatform::GetPlatform()->CanRenderContentToDataSurface(); if (canUseDataSurface) { // It's safe to use data surfaces for content on this platform, so we can // get away with using volatile buffers. MOZ_ASSERT(!mImageSurface, "Called imgFrame::InitWithDrawable() twice?"); mVBuf = AllocateBufferForImage(mSize, mFormat); if (!mVBuf) { mAborted = true; return NS_ERROR_OUT_OF_MEMORY; } int32_t stride = VolatileSurfaceStride(mSize, mFormat); VolatileBufferPtr<uint8_t> ptr(mVBuf); if (!ptr) { mAborted = true; return NS_ERROR_OUT_OF_MEMORY; } if (mVBuf->OnHeap()) { memset(ptr, 0, stride * mSize.height); } mImageSurface = CreateLockedSurface(mVBuf, mSize, mFormat); target = gfxPlatform::GetPlatform()-> CreateDrawTargetForData(ptr, mSize, stride, mFormat); } else { // We can't use data surfaces for content, so we'll create an offscreen // surface instead. This means if someone later calls RawAccessRef(), we // may have to do an expensive readback, but we warned callers about that in // the documentation for this method. MOZ_ASSERT(!mOptSurface, "Called imgFrame::InitWithDrawable() twice?"); target = gfxPlatform::GetPlatform()-> CreateOffscreenContentDrawTarget(mSize, mFormat); } if (!target) { mAborted = true; return NS_ERROR_OUT_OF_MEMORY; } // Draw using the drawable the caller provided. nsIntRect imageRect(0, 0, mSize.width, mSize.height); nsRefPtr<gfxContext> ctx = new gfxContext(target); gfxUtils::DrawPixelSnapped(ctx, aDrawable, mSize, ImageRegion::Create(imageRect), mFormat, aFilter, aImageFlags); if (canUseDataSurface && !mImageSurface) { NS_WARNING("Failed to create VolatileDataSourceSurface"); mAborted = true; return NS_ERROR_OUT_OF_MEMORY; } if (!canUseDataSurface) { // We used an offscreen surface, which is an "optimized" surface from // imgFrame's perspective. mOptSurface = target->Snapshot(); } // If we reach this point, we should regard ourselves as complete. mDecoded = GetRect(); MOZ_ASSERT(IsImageComplete()); return NS_OK; }
nsresult nsSVGFilterInstance::BuildSourcePaint(SourceInfo *aSource, gfxASurface* aTargetSurface, DrawTarget* aTargetDT) { nsIntRect neededRect = aSource->mNeededBounds; RefPtr<DrawTarget> offscreenDT; nsRefPtr<gfxASurface> offscreenSurface; nsRefPtr<gfxContext> ctx; if (aTargetSurface) { offscreenSurface = gfxPlatform::GetPlatform()->CreateOffscreenSurface( neededRect.Size(), GFX_CONTENT_COLOR_ALPHA); if (!offscreenSurface || offscreenSurface->CairoStatus()) { return NS_ERROR_OUT_OF_MEMORY; } ctx = new gfxContext(offscreenSurface); } else { offscreenDT = gfxPlatform::GetPlatform()->CreateOffscreenContentDrawTarget( ToIntSize(neededRect.Size()), FORMAT_B8G8R8A8); if (!offscreenDT) { return NS_ERROR_OUT_OF_MEMORY; } ctx = new gfxContext(offscreenDT); } ctx->Translate(-neededRect.TopLeft()); nsRenderingContext tmpCtx; tmpCtx.Init(mTargetFrame->PresContext()->DeviceContext(), ctx); gfxMatrix m = GetUserSpaceToFilterSpaceTransform(); m.Invert(); gfxRect r = m.TransformBounds(mFilterSpaceBounds); gfxMatrix deviceToFilterSpace = GetFilterSpaceToDeviceSpaceTransform().Invert(); gfxContext *gfx = tmpCtx.ThebesContext(); gfx->Multiply(deviceToFilterSpace); gfx->Save(); gfxMatrix matrix = nsSVGUtils::GetCanvasTM(mTargetFrame, nsISVGChildFrame::FOR_PAINTING, mTransformRoot); if (!matrix.IsSingular()) { gfx->Multiply(matrix); gfx->Rectangle(r); if ((aSource == &mFillPaint && nsSVGUtils::SetupCairoFillPaint(mTargetFrame, gfx)) || (aSource == &mStrokePaint && nsSVGUtils::SetupCairoStrokePaint(mTargetFrame, gfx))) { gfx->Fill(); } } gfx->Restore(); if (offscreenSurface) { aSource->mSourceSurface = gfxPlatform::GetPlatform()->GetSourceSurfaceForSurface(aTargetDT, offscreenSurface); } else { aSource->mSourceSurface = offscreenDT->Snapshot(); } aSource->mSurfaceRect = ToIntRect(neededRect); return NS_OK; }
void MediaEngineTabVideoSource::Draw() { nsCOMPtr<nsPIDOMWindow> win = do_QueryInterface(mWindow); if (!win) { return; } int32_t innerWidth, innerHeight; win->GetInnerWidth(&innerWidth); win->GetInnerHeight(&innerHeight); if (innerWidth == 0 || innerHeight == 0) { return; } float pixelRatio; win->GetDevicePixelRatio(&pixelRatio); const int deviceInnerWidth = (int)(pixelRatio * innerWidth); const int deviceInnerHeight = (int)(pixelRatio * innerHeight); IntSize size; if ((deviceInnerWidth <= mBufWidthMax) && (deviceInnerHeight <= mBufHeightMax)) { size = IntSize(deviceInnerWidth, deviceInnerHeight); } else { const float scaleWidth = (float)mBufWidthMax / (float)deviceInnerWidth; const float scaleHeight = (float)mBufHeightMax / (float)deviceInnerHeight; const float scale = scaleWidth < scaleHeight ? scaleWidth : scaleHeight; size = IntSize((int)(scale * deviceInnerWidth), (int)(scale * deviceInnerHeight)); } gfxImageFormat format = gfxImageFormat::RGB24; uint32_t stride = gfxASurface::FormatStrideForWidth(format, size.width); if (mDataSize < static_cast<size_t>(stride * size.height)) { mDataSize = stride * size.height; mData = static_cast<unsigned char*>(malloc(mDataSize)); } if (!mData) { return; } nsRefPtr<nsPresContext> presContext; nsIDocShell* docshell = win->GetDocShell(); if (docshell) { docshell->GetPresContext(getter_AddRefs(presContext)); } if (!presContext) { return; } nscolor bgColor = NS_RGB(255, 255, 255); nsCOMPtr<nsIPresShell> presShell = presContext->PresShell(); uint32_t renderDocFlags = 0; if (!mScrollWithPage) { renderDocFlags |= nsIPresShell::RENDER_IGNORE_VIEWPORT_SCROLLING; } nsRect r(0, 0, nsPresContext::CSSPixelsToAppUnits((float)innerWidth), nsPresContext::CSSPixelsToAppUnits((float)innerHeight)); nsRefPtr<layers::ImageContainer> container = layers::LayerManager::CreateImageContainer(); RefPtr<DrawTarget> dt = Factory::CreateDrawTargetForData(BackendType::CAIRO, mData.rwget(), size, stride, SurfaceFormat::B8G8R8X8); if (!dt) { return; } nsRefPtr<gfxContext> context = new gfxContext(dt); context->SetMatrix(context->CurrentMatrix().Scale((((float) size.width)/innerWidth), (((float) size.height)/innerHeight))); NS_ENSURE_SUCCESS_VOID(presShell->RenderDocument(r, renderDocFlags, bgColor, context)); RefPtr<SourceSurface> surface = dt->Snapshot(); if (!surface) { return; } layers::CairoImage::Data cairoData; cairoData.mSize = size; cairoData.mSourceSurface = surface; nsRefPtr<layers::CairoImage> image = new layers::CairoImage(); image->SetData(cairoData); MonitorAutoLock mon(mMonitor); mImage = image; }
nsresult nsSVGFilterInstance::BuildSourceImage(gfxASurface* aTargetSurface, DrawTarget* aTargetDT) { nsIntRect neededRect = mSourceGraphic.mNeededBounds; if (neededRect.IsEmpty()) { return NS_OK; } RefPtr<DrawTarget> offscreenDT; nsRefPtr<gfxASurface> offscreenSurface; nsRefPtr<gfxContext> ctx; if (aTargetSurface) { offscreenSurface = gfxPlatform::GetPlatform()->CreateOffscreenSurface( neededRect.Size(), GFX_CONTENT_COLOR_ALPHA); if (!offscreenSurface || offscreenSurface->CairoStatus()) { return NS_ERROR_OUT_OF_MEMORY; } ctx = new gfxContext(offscreenSurface); } else { offscreenDT = gfxPlatform::GetPlatform()->CreateOffscreenContentDrawTarget( ToIntSize(neededRect.Size()), FORMAT_B8G8R8A8); if (!offscreenDT) { return NS_ERROR_OUT_OF_MEMORY; } ctx = new gfxContext(offscreenDT); } ctx->Translate(-neededRect.TopLeft()); nsRenderingContext tmpCtx; tmpCtx.Init(mTargetFrame->PresContext()->DeviceContext(), ctx); gfxMatrix m = GetUserSpaceToFilterSpaceTransform(); m.Invert(); gfxRect r = m.TransformBounds(neededRect); r.RoundOut(); nsIntRect dirty; if (!gfxUtils::GfxRectToIntRect(r, &dirty)) return NS_ERROR_FAILURE; // SVG graphics paint to device space, so we need to set an initial device // space to filter space transform on the gfxContext that SourceGraphic // and SourceAlpha will paint to. // // (In theory it would be better to minimize error by having filtered SVG // graphics temporarily paint to user space when painting the sources and // only set a user space to filter space transform on the gfxContext // (since that would eliminate the transform multiplications from user // space to device space and back again). However, that would make the // code more complex while being hard to get right without introducing // subtle bugs, and in practice it probably makes no real difference.) gfxMatrix deviceToFilterSpace = GetFilterSpaceToDeviceSpaceTransform().Invert(); tmpCtx.ThebesContext()->Multiply(deviceToFilterSpace); mPaintCallback->Paint(&tmpCtx, mTargetFrame, &dirty, mTransformRoot); RefPtr<SourceSurface> sourceGraphicSource; if (offscreenSurface) { sourceGraphicSource = gfxPlatform::GetPlatform()->GetSourceSurfaceForSurface(aTargetDT, offscreenSurface); } else { sourceGraphicSource = offscreenDT->Snapshot(); } mSourceGraphic.mSourceSurface = sourceGraphicSource; mSourceGraphic.mSurfaceRect = ToIntRect(neededRect); return NS_OK; }
already_AddRefed<SourceSurface> nsSVGClipPathFrame::GetClipMask(gfxContext& aReferenceContext, nsIFrame* aClippedFrame, const gfxMatrix& aMatrix, Matrix* aMaskTransform, SourceSurface* aExtraMask, const Matrix& aExtraMasksTransform) { MOZ_ASSERT(!IsTrivial(), "Caller needs to use ApplyClipPath"); DrawTarget& aReferenceDT = *aReferenceContext.GetDrawTarget(); // A clipPath can reference another clipPath. We re-enter this method for // each clipPath in a reference chain, so here we limit chain length: static int16_t sRefChainLengthCounter = AutoReferenceLimiter::notReferencing; AutoReferenceLimiter refChainLengthLimiter(&sRefChainLengthCounter, MAX_SVG_CLIP_PATH_REFERENCE_CHAIN_LENGTH); if (!refChainLengthLimiter.Reference()) { return nullptr; // Reference chain is too long! } // And to prevent reference loops we check that this clipPath only appears // once in the reference chain (if any) that we're currently processing: AutoReferenceLimiter refLoopDetector(&mReferencing, 1); if (!refLoopDetector.Reference()) { return nullptr; // Reference loop! } IntRect devSpaceClipExtents; { gfxContextMatrixAutoSaveRestore autoRestoreMatrix(&aReferenceContext); aReferenceContext.SetMatrix(gfxMatrix()); gfxRect rect = aReferenceContext.GetClipExtents(); devSpaceClipExtents = RoundedOut(ToRect(rect)); if (devSpaceClipExtents.IsEmpty()) { // We don't need to create a mask surface, all drawing is clipped anyway. return nullptr; } } RefPtr<DrawTarget> maskDT = aReferenceDT.CreateSimilarDrawTarget(devSpaceClipExtents.Size(), SurfaceFormat::A8); gfxMatrix mat = aReferenceContext.CurrentMatrix() * gfxMatrix::Translation(-devSpaceClipExtents.TopLeft()); // Paint this clipPath's contents into maskDT: { RefPtr<gfxContext> ctx = new gfxContext(maskDT); ctx->SetMatrix(mat); // We need to set mMatrixForChildren here so that under the PaintSVG calls // on our children (below) our GetCanvasTM() method will return the correct // transform. mMatrixForChildren = GetClipPathTransform(aClippedFrame) * aMatrix; // Check if this clipPath is itself clipped by another clipPath: nsSVGClipPathFrame* clipPathThatClipsClipPath = nsSVGEffects::GetEffectProperties(this).GetClipPathFrame(nullptr); bool clippingOfClipPathRequiredMasking; if (clipPathThatClipsClipPath) { ctx->Save(); clippingOfClipPathRequiredMasking = !clipPathThatClipsClipPath->IsTrivial(); if (!clippingOfClipPathRequiredMasking) { clipPathThatClipsClipPath->ApplyClipPath(*ctx, aClippedFrame, aMatrix); } else { Matrix maskTransform; RefPtr<SourceSurface> mask = clipPathThatClipsClipPath->GetClipMask(*ctx, aClippedFrame, aMatrix, &maskTransform); ctx->PushGroupForBlendBack(gfxContentType::ALPHA, 1.0, mask, maskTransform); // The corresponding PopGroupAndBlend call below will mask the // blend using |mask|. } } // Paint our children into the mask: for (nsIFrame* kid = mFrames.FirstChild(); kid; kid = kid->GetNextSibling()) { nsISVGChildFrame* SVGFrame = do_QueryFrame(kid); if (SVGFrame) { // The CTM of each frame referencing us can be different. SVGFrame->NotifySVGChanged(nsISVGChildFrame::TRANSFORM_CHANGED); bool isOK = true; // Children of this clipPath may themselves be clipped. nsSVGClipPathFrame *clipPathThatClipsChild = nsSVGEffects::GetEffectProperties(kid).GetClipPathFrame(&isOK); if (!isOK) { continue; } bool childsClipPathRequiresMasking; if (clipPathThatClipsChild) { childsClipPathRequiresMasking = !clipPathThatClipsChild->IsTrivial(); ctx->Save(); if (!childsClipPathRequiresMasking) { clipPathThatClipsChild->ApplyClipPath(*ctx, aClippedFrame, aMatrix); } else { Matrix maskTransform; RefPtr<SourceSurface> mask = clipPathThatClipsChild->GetClipMask(*ctx, aClippedFrame, aMatrix, &maskTransform); ctx->PushGroupForBlendBack(gfxContentType::ALPHA, 1.0, mask, maskTransform); // The corresponding PopGroupAndBlend call below will mask the // blend using |mask|. } } gfxMatrix toChildsUserSpace = mMatrixForChildren; nsIFrame* child = do_QueryFrame(SVGFrame); nsIContent* childContent = child->GetContent(); if (childContent->IsSVGElement()) { toChildsUserSpace = static_cast<const nsSVGElement*>(childContent)-> PrependLocalTransformsTo(mMatrixForChildren, eUserSpaceToParent); } // Our children have NS_STATE_SVG_CLIPPATH_CHILD set on them, and // nsSVGPathGeometryFrame::Render checks for that state bit and paints // only the geometry (opaque black) if set. SVGFrame->PaintSVG(*ctx, toChildsUserSpace); if (clipPathThatClipsChild) { if (childsClipPathRequiresMasking) { ctx->PopGroupAndBlend(); } ctx->Restore(); } } } if (clipPathThatClipsClipPath) { if (clippingOfClipPathRequiredMasking) { ctx->PopGroupAndBlend(); } ctx->Restore(); } } // Moz2D transforms in the opposite direction to Thebes mat.Invert(); if (aExtraMask) { // We could potentially due this more efficiently with OPERATOR_IN // but that operator does not work well on CG or D2D RefPtr<SourceSurface> currentMask = maskDT->Snapshot(); Matrix transform = maskDT->GetTransform(); maskDT->SetTransform(Matrix()); maskDT->ClearRect(Rect(0, 0, devSpaceClipExtents.width, devSpaceClipExtents.height)); maskDT->SetTransform(aExtraMasksTransform * transform); // draw currentMask with the inverse of the transform that we just so that // it ends up in the same spot with aExtraMask transformed by aExtraMasksTransform maskDT->MaskSurface(SurfacePattern(currentMask, ExtendMode::CLAMP, aExtraMasksTransform.Inverse() * ToMatrix(mat)), aExtraMask, Point(0, 0)); } *aMaskTransform = ToMatrix(mat); return maskDT->Snapshot(); }
static void DrawVelGraph(const nsIntRect& aClipRect, LayerManagerComposite* aManager, Layer* aLayer) { Compositor* compositor = aManager->GetCompositor(); gfx::Rect clipRect(aClipRect.x, aClipRect.y, aClipRect.width, aClipRect.height); TimeStamp now = TimeStamp::Now(); LayerVelocityUserData* velocityData = GetVelocityData(aLayer); if (velocityData->mData.size() >= 1 && now > velocityData->mData[velocityData->mData.size() - 1].mFrameTime + TimeDuration::FromMilliseconds(200)) { // clear stale data velocityData->mData.clear(); } const gfx::Point layerTransform = GetScrollData(aLayer); velocityData->mData.push_back( LayerVelocityUserData::VelocityData(now, static_cast<int>(layerTransform.x), static_cast<int>(layerTransform.y))); // TODO: dump to file // XXX: Uncomment these lines to enable ScrollGraph logging. This is // useful for HVGA phones or to output the data to accurate // graphing software. // printf_stderr("ScrollGraph (%p): %f, %f\n", // aLayer, layerTransform.x, layerTransform.y); // Keep a circular buffer of 100. size_t circularBufferSize = 100; if (velocityData->mData.size() > circularBufferSize) { velocityData->mData.erase(velocityData->mData.begin()); } if (velocityData->mData.size() == 1) { return; } // Clear and disable the graph when it's flat for (size_t i = 1; i < velocityData->mData.size(); i++) { if (velocityData->mData[i - 1].mPoint != velocityData->mData[i].mPoint) { break; } if (i == velocityData->mData.size() - 1) { velocityData->mData.clear(); return; } } if (aLayer->GetEffectiveVisibleRegion().GetBounds().width < 300 || aLayer->GetEffectiveVisibleRegion().GetBounds().height < 300) { // Don't want a graph for smaller layers return; } aManager->SetDebugOverlayWantsNextFrame(true); const Matrix4x4& transform = aLayer->GetEffectiveTransform(); nsIntRect bounds = aLayer->GetEffectiveVisibleRegion().GetBounds(); IntSize graphSize = IntSize(200, 100); Rect graphRect = Rect(bounds.x, bounds.y, graphSize.width, graphSize.height); RefPtr<DrawTarget> dt = aManager->CreateDrawTarget(graphSize, SurfaceFormat::B8G8R8A8); dt->FillRect(Rect(0, 0, graphSize.width, graphSize.height), ColorPattern(Color(0.2f,0,0,1))); int yScaleFactor = 3; Point prev = Point(0,0); bool first = true; for (int32_t i = (int32_t)velocityData->mData.size() - 2; i >= 0; i--) { const gfx::Point& p1 = velocityData->mData[i+1].mPoint; const gfx::Point& p2 = velocityData->mData[i].mPoint; int vel = sqrt((p1.x - p2.x) * (p1.x - p2.x) + (p1.y - p2.y) * (p1.y - p2.y)); Point next = Point(graphRect.width / circularBufferSize * i, graphRect.height - vel/yScaleFactor); if (first) { first = false; } else { dt->StrokeLine(prev, next, ColorPattern(Color(0,1,0,1))); } prev = next; } RefPtr<DataTextureSource> textureSource = compositor->CreateDataTextureSource(); RefPtr<SourceSurface> snapshot = dt->Snapshot(); RefPtr<DataSourceSurface> data = snapshot->GetDataSurface(); textureSource->Update(data); EffectChain effectChain; effectChain.mPrimaryEffect = CreateTexturedEffect(SurfaceFormat::B8G8R8A8, textureSource, Filter::POINT, true); compositor->DrawQuad(graphRect, clipRect, effectChain, 1.0f, transform); }