void CopyableCanvasLayer::PaintWithOpacity(gfxContext* aContext, float aOpacity, Layer* aMaskLayer, gfxContext::GraphicsOperator aOperator) { if (!mSurface) { NS_WARNING("No valid surface to draw!"); return; } nsRefPtr<gfxPattern> pat = new gfxPattern(mSurface); pat->SetFilter(mFilter); pat->SetExtend(gfxPattern::EXTEND_PAD); gfxMatrix m; if (mNeedsYFlip) { m = aContext->CurrentMatrix(); aContext->Translate(gfxPoint(0.0, mBounds.height)); aContext->Scale(1.0, -1.0); } // If content opaque, then save off current operator and set to source. // This ensures that alpha is not applied even if the source surface // has an alpha channel gfxContext::GraphicsOperator savedOp; if (GetContentFlags() & CONTENT_OPAQUE) { savedOp = aContext->CurrentOperator(); aContext->SetOperator(gfxContext::OPERATOR_SOURCE); } AutoSetOperator setOperator(aContext, aOperator); aContext->NewPath(); // No need to snap here; our transform is already set up to snap our rect aContext->Rectangle(gfxRect(0, 0, mBounds.width, mBounds.height)); aContext->SetPattern(pat); FillWithMask(aContext, aOpacity, aMaskLayer); // Restore surface operator if (GetContentFlags() & CONTENT_OPAQUE) { aContext->SetOperator(savedOp); } if (mNeedsYFlip) { aContext->SetMatrix(m); } }
gfxRect ThebesLayerComposite::GetDisplayPort() { // We use GetTransform instead of GetEffectiveTransform in this function // as we want the transform of the shadowable layers and not that of the // shadow layers, which may have been modified due to async scrolling/ // zooming. gfx3DMatrix transform = GetTransform(); // Find out the area of the nearest display-port to invalidate retained // tiles. gfxRect displayPort; gfxSize parentResolution = GetEffectiveResolution(); for (ContainerLayer* parent = GetParent(); parent; parent = parent->GetParent()) { const FrameMetrics& metrics = parent->GetFrameMetrics(); if (displayPort.IsEmpty()) { if (!metrics.mDisplayPort.IsEmpty()) { // We use the bounds to cut down on complication/computation time. // This will be incorrect when the transform involves rotation, but // it'd be quite hard to retain invalid tiles correctly in this // situation anyway. displayPort = gfxRect(metrics.mDisplayPort.x, metrics.mDisplayPort.y, metrics.mDisplayPort.width, metrics.mDisplayPort.height); displayPort.ScaleRoundOut(parentResolution.width, parentResolution.height); } parentResolution.width /= metrics.mResolution.scale; parentResolution.height /= metrics.mResolution.scale; } if (parent->UseIntermediateSurface()) { transform.PreMultiply(parent->GetTransform()); } } // If no display port was found, use the widget size from the layer manager. if (displayPort.IsEmpty()) { LayerManagerComposite* manager = static_cast<LayerManagerComposite*>(Manager()); const nsIntSize& widgetSize = manager->GetWidgetSize(); displayPort.width = widgetSize.width; displayPort.height = widgetSize.height; } // Transform the display port into layer space. displayPort = transform.Inverse().TransformBounds(displayPort); return displayPort; }
/** * Draw a single hex character using the current color. A nice way to do this * would be to fill in an A8 image surface and then use it as a mask * to paint the current color. Tragically this doesn't currently work with the * Quartz cairo backend which doesn't generally support masking with surfaces. * So for now we just paint a bunch of rectangles... */ static void DrawHexChar(gfxContext *aContext, const gfxPoint& aPt, uint32_t aDigit) { aContext->NewPath(); uint32_t glyphBits = glyphMicroFont[aDigit]; int x, y; for (y = 0; y < MINIFONT_HEIGHT; ++y) { for (x = 0; x < MINIFONT_WIDTH; ++x) { if (glyphBits & 1) { aContext->Rectangle(gfxRect(x, y, 1, 1) + aPt, true); } glyphBits >>= 1; } } aContext->Fill(); }
/* static */ bool HwcUtils::PrepareVisibleRegion(const nsIntRegion& aVisible, const gfx::Matrix& aLayerTransform, const gfx::Matrix& aLayerBufferTransform, nsIntRect aClip, nsIntRect aBufferRect, RectVector* aVisibleRegionScreen, bool& aIsVisible) { const float MIN_SRC_WIDTH = 2.f; const float MIN_SRC_HEIGHT = 2.f; gfxMatrix layerTransform = gfx::ThebesMatrix(aLayerTransform); gfxMatrix layerBufferTransform = gfx::ThebesMatrix(aLayerBufferTransform); gfxRect bufferRect = layerBufferTransform.TransformBounds(aBufferRect); gfxMatrix inverse = gfx::ThebesMatrix(aLayerBufferTransform); inverse.Invert(); nsIntRegionRectIterator rect(aVisible); aIsVisible = false; while (const nsIntRect* visibleRect = rect.Next()) { hwc_rect_t visibleRectScreen; gfxRect screenRect; screenRect = layerTransform.TransformBounds(gfxRect(*visibleRect)); screenRect.IntersectRect(screenRect, bufferRect); screenRect.IntersectRect(screenRect, aClip); screenRect.Round(); if (screenRect.IsEmpty()) { continue; } visibleRectScreen.left = screenRect.x; visibleRectScreen.top = screenRect.y; visibleRectScreen.right = screenRect.XMost(); visibleRectScreen.bottom = screenRect.YMost(); gfxRect srcCrop = inverse.TransformBounds(screenRect); // When src crop is very small, HWC could not render correctly in some cases. // See Bug 1169093 if(srcCrop.Width() < MIN_SRC_WIDTH || srcCrop.Height() < MIN_SRC_HEIGHT) { return false; } aVisibleRegionScreen->push_back(visibleRectScreen); aIsVisible = true; } return true; }
static nsIntRect MapDeviceRectToFilterSpace(const gfxMatrix& aMatrix, const gfxIntSize& aFilterSize, const nsIntRect* aDeviceRect) { nsIntRect rect(0, 0, aFilterSize.width, aFilterSize.height); if (aDeviceRect) { gfxRect r = aMatrix.TransformBounds(gfxRect(aDeviceRect->x, aDeviceRect->y, aDeviceRect->width, aDeviceRect->height)); r.RoundOut(); nsIntRect intRect; if (gfxUtils::GfxRectToIntRect(r, &intRect)) { rect = intRect; } } return rect; }
bool emitterDraw(emitter_t* emitter){ vect_t pos; double lineLen; double alphaOne, alphaTwo; if(!emitter) return false; // get pos pos = emitterGetPos(emitter); // alphas alphaOne = emitterGetAngle(emitter) - emitterGetAlpha(emitter); alphaTwo = emitterGetAngle(emitter) + emitterGetAlpha(emitter); // line length lineLen = MAX_EMITTER_LEN / MAX_VG * emitterGetSpeed(emitter); gfxColor(0, 0, 0); gfxLine( pos.x, pos.y, pos.x + lineLen * cos(alphaOne), pos.y + lineLen * sin(alphaOne) ); gfxLine( pos.x, pos.y, pos.x + lineLen * cos(alphaTwo), pos.y + lineLen * sin(alphaTwo) ); // calculate rectangle rect_t rect = { .left = pos.x - EMITTER_SIZE / 2, .right = pos.x + EMITTER_SIZE / 2, .bottom = pos.y - EMITTER_SIZE / 2, .top = pos.y + EMITTER_SIZE / 2 }; gfxRect(rect, true); return true; } void emitterFree(emitter_t* emitter){ if(!emitter) return; free(emitter); }
/** * @param aXSide LEFT means we draw from the left side of the buffer (which * is drawn on the right side of mBufferRect). RIGHT means we draw from * the right side of the buffer (which is drawn on the left side of * mBufferRect). * @param aYSide TOP means we draw from the top side of the buffer (which * is drawn on the bottom side of mBufferRect). BOTTOM means we draw from * the bottom side of the buffer (which is drawn on the top side of * mBufferRect). */ void ThebesLayerBuffer::DrawBufferQuadrant(gfxContext* aTarget, XSide aXSide, YSide aYSide, float aOpacity) { // The rectangle that we're going to fill. Basically we're going to // render the buffer at mBufferRect + quadrantTranslation to get the // pixels in the right place, but we're only going to paint within // mBufferRect nsIntRect quadrantRect = GetQuadrantRectangle(aXSide, aYSide); nsIntRect fillRect; if (!fillRect.IntersectRect(mBufferRect, quadrantRect)) return; aTarget->NewPath(); aTarget->Rectangle(gfxRect(fillRect.x, fillRect.y, fillRect.width, fillRect.height), PR_TRUE); gfxPoint quadrantTranslation(quadrantRect.x, quadrantRect.y); nsRefPtr<gfxPattern> pattern = new gfxPattern(mBuffer); #ifdef MOZ_GFX_OPTIMIZE_MOBILE gfxPattern::GraphicsFilter filter = gfxPattern::FILTER_NEAREST; pattern->SetFilter(filter); #endif gfxContextMatrixAutoSaveRestore saveMatrix(aTarget); // Transform from user -> buffer space. gfxMatrix transform; transform.Translate(-quadrantTranslation); pattern->SetMatrix(transform); aTarget->SetPattern(pattern); if (aOpacity != 1.0) { aTarget->Save(); aTarget->Clip(); aTarget->Paint(aOpacity); aTarget->Restore(); } else { aTarget->Fill(); } }
gfxRect gfxRect::Intersect(const gfxRect& aRect) const { gfxRect result(0,0,0,0); gfxFloat x = PR_MAX(aRect.X(), X()); gfxFloat xmost = PR_MIN(aRect.XMost(), XMost()); if (x >= xmost) return result; gfxFloat y = PR_MAX(aRect.Y(), Y()); gfxFloat ymost = PR_MIN(aRect.YMost(), YMost()); if (y >= ymost) return result; result = gfxRect(x, y, xmost - x, ymost - y); return result; }
NS_IMETHODIMP nsSVGForeignObjectFrame::UpdateCoveredRegion() { if (GetStateBits() & NS_STATE_SVG_NONDISPLAY_CHILD) return NS_ERROR_FAILURE; float x, y, w, h; static_cast<nsSVGForeignObjectElement*>(mContent)-> GetAnimatedLengthValues(&x, &y, &w, &h, nsnull); // If mRect's width or height are negative, reflow blows up! We must clamp! if (w < 0.0f) w = 0.0f; if (h < 0.0f) h = 0.0f; // GetCanvasTM includes the x,y translation mRect = ToCanvasBounds(gfxRect(0.0, 0.0, w, h), GetCanvasTM(), PresContext()); return NS_OK; }
//****************************************************************************** void imgContainer::ClearFrame(gfxIImageFrame *aFrame, nsIntRect &aRect) { if (!aFrame || aRect.width <= 0 || aRect.height <= 0) { return; } nsCOMPtr<nsIImage> img(do_GetInterface(aFrame)); nsRefPtr<gfxASurface> surf; img->LockImagePixels(0); img->GetSurface(getter_AddRefs(surf)); // Erase the destination rectangle to transparent gfxContext ctx(surf); ctx.SetOperator(gfxContext::OPERATOR_CLEAR); ctx.Rectangle(gfxRect(aRect.x, aRect.y, aRect.width, aRect.height)); ctx.Fill(); img->UnlockImagePixels(0); }
nsresult nsSVGFilterInstance::BuildSources() { gfxRect filterRegion = gfxRect(0, 0, mFilterSpaceSize.width, mFilterSpaceSize.height); mSourceColorAlpha.mImage.mFilterPrimitiveSubregion = filterRegion; mSourceAlpha.mImage.mFilterPrimitiveSubregion = filterRegion; nsIntRect sourceBoundsInt; gfxRect sourceBounds = UserSpaceToFilterSpace(mTargetBBox); sourceBounds.RoundOut(); // Detect possible float->int overflow if (!gfxUtils::GfxRectToIntRect(sourceBounds, &sourceBoundsInt)) return NS_ERROR_FAILURE; sourceBoundsInt.UnionRect(sourceBoundsInt, mTargetBounds); mSourceColorAlpha.mResultBoundingBox = sourceBoundsInt; mSourceAlpha.mResultBoundingBox = sourceBoundsInt; return NS_OK; }
gfxRect gfxMatrix::TransformBounds(const gfxRect& rect) const { /* Code taken from cairo-matrix.c, _cairo_matrix_transform_bounding_box isn't public */ int i; double quad_x[4], quad_y[4]; double min_x, max_x; double min_y, max_y; quad_x[0] = rect.X(); quad_y[0] = rect.Y(); cairo_matrix_transform_point (CONST_CAIRO_MATRIX(this), &quad_x[0], &quad_y[0]); quad_x[1] = rect.XMost(); quad_y[1] = rect.Y(); cairo_matrix_transform_point (CONST_CAIRO_MATRIX(this), &quad_x[1], &quad_y[1]); quad_x[2] = rect.X(); quad_y[2] = rect.YMost(); cairo_matrix_transform_point (CONST_CAIRO_MATRIX(this), &quad_x[2], &quad_y[2]); quad_x[3] = rect.XMost(); quad_y[3] = rect.YMost(); cairo_matrix_transform_point (CONST_CAIRO_MATRIX(this), &quad_x[3], &quad_y[3]); min_x = max_x = quad_x[0]; min_y = max_y = quad_y[0]; for (i = 1; i < 4; i++) { if (quad_x[i] < min_x) min_x = quad_x[i]; if (quad_x[i] > max_x) max_x = quad_x[i]; if (quad_y[i] < min_y) min_y = quad_y[i]; if (quad_y[i] > max_y) max_y = quad_y[i]; } return gfxRect(min_x, min_y, max_x - min_x, max_y - min_y); }
void gfxWindowsNativeDrawing::PaintToContext() { if (mRenderState == RENDER_STATE_NATIVE_DRAWING_DONE) { // nothing to do, it already went to the context mRenderState = RENDER_STATE_DONE; } else if (mRenderState == RENDER_STATE_ALPHA_RECOVERY_WHITE_DONE) { nsRefPtr<gfxImageSurface> black = mBlackSurface->GetAsImageSurface(); nsRefPtr<gfxImageSurface> white = mWhiteSurface->GetAsImageSurface(); if (!gfxAlphaRecovery::RecoverAlpha(black, white)) { NS_ERROR("Alpha recovery failure"); return; } nsRefPtr<gfxImageSurface> alphaSurface = new gfxImageSurface(black->Data(), black->GetSize(), black->Stride(), gfxASurface::ImageFormatARGB32); mContext->Save(); mContext->Translate(mNativeRect.TopLeft()); mContext->NewPath(); mContext->Rectangle(gfxRect(gfxPoint(0.0, 0.0), mNativeRect.Size())); nsRefPtr<gfxPattern> pat = new gfxPattern(alphaSurface); gfxMatrix m; m.Scale(mScale.width, mScale.height); pat->SetMatrix(m); if (mNativeDrawFlags & DO_NEAREST_NEIGHBOR_FILTERING) pat->SetFilter(gfxPattern::FILTER_FAST); pat->SetExtend(gfxPattern::EXTEND_PAD); mContext->SetPattern(pat); mContext->Fill(); mContext->Restore(); mRenderState = RENDER_STATE_DONE; } else { NS_ERROR("Invalid RenderState in gfxWindowsNativeDrawing::PaintToContext"); } }
void nsVideoFrame::PaintVideo(nsIRenderingContext& aRenderingContext, const nsRect& aDirtyRect, nsPoint aPt) { nsRect area = GetContentRect() - GetPosition() + aPt; nsHTMLVideoElement* element = static_cast<nsHTMLVideoElement*>(GetContent()); nsIntSize videoSize = element->GetVideoSize(nsIntSize(0, 0)); if (videoSize.width <= 0 || videoSize.height <= 0 || area.IsEmpty()) return; gfxContext* ctx = static_cast<gfxContext*>(aRenderingContext.GetNativeGraphicData(nsIRenderingContext::NATIVE_THEBES_CONTEXT)); nsPresContext* presContext = PresContext(); gfxRect r = gfxRect(presContext->AppUnitsToGfxUnits(area.x), presContext->AppUnitsToGfxUnits(area.y), presContext->AppUnitsToGfxUnits(area.width), presContext->AppUnitsToGfxUnits(area.height)); r = CorrectForAspectRatio(r, videoSize); element->Paint(ctx, nsLayoutUtils::GetGraphicsFilterForFrame(this), r); }
void nsDisplayOpacity::Paint(nsDisplayListBuilder* aBuilder, nsIRenderingContext* aCtx, const nsRect& aDirtyRect) { // XXX This way of handling 'opacity' creates exponential time blowup in the // depth of nested translucent elements. This will be fixed when we move to // cairo with support for real alpha channels in surfaces, so we don't have // to do this white/black hack anymore. float opacity = mFrame->GetStyleDisplay()->mOpacity; nsRect bounds; bounds.IntersectRect(GetBounds(aBuilder), aDirtyRect); nsCOMPtr<nsIDeviceContext> devCtx; aCtx->GetDeviceContext(*getter_AddRefs(devCtx)); float a2p = 1.0f / devCtx->AppUnitsPerDevPixel(); nsRefPtr<gfxContext> ctx = aCtx->ThebesContext(); ctx->Save(); ctx->NewPath(); ctx->Rectangle(gfxRect(bounds.x * a2p, bounds.y * a2p, bounds.width * a2p, bounds.height * a2p), PR_TRUE); ctx->Clip(); if (mNeedAlpha) ctx->PushGroup(gfxASurface::CONTENT_COLOR_ALPHA); else ctx->PushGroup(gfxASurface::CONTENT_COLOR); nsDisplayWrapList::Paint(aBuilder, aCtx, bounds); ctx->PopGroupToSource(); ctx->SetOperator(gfxContext::OPERATOR_OVER); ctx->Paint(opacity); ctx->Restore(); }
NS_IMETHODIMP nsSVGPathGeometryFrame::UpdateCoveredRegion() { mRect.Empty(); gfxContext context(nsSVGUtils::GetThebesComputationalSurface()); GeneratePath(&context); context.IdentityMatrix(); gfxRect extent = context.GetUserPathExtent(); // Be careful when replacing the following logic to get the fill and stroke // extents independently (instead of computing the stroke extents from the // path extents). You may think that you can just use the stroke extents if // there is both a fill and a stroke. In reality it's necessary to calculate // both the fill and stroke extents, and take the union of the two. There are // two reasons for this: // // # Due to stroke dashing, in certain cases the fill extents could actually // extend outside the stroke extents. // # If the stroke is very thin, cairo won't paint any stroke, and so the // stroke bounds that it will return will be empty. if (HasStroke()) { SetupCairoStrokeGeometry(&context); extent = nsSVGUtils::PathExtentsToMaxStrokeExtents(extent, this); } else if (GetStyleSVG()->mFill.mType == eStyleSVGPaintType_None) { extent = gfxRect(0, 0, 0, 0); } if (!extent.IsEmpty()) { mRect = nsSVGUtils::ToAppPixelRect(PresContext(), extent); } // Add in markers mRect = GetCoveredRegion(); return NS_OK; }
void SetAntialiasingFlags(Layer* aLayer, gfxContext* aTarget) { if (!aTarget->IsCairo()) { SetAntialiasingFlags(aLayer, aTarget->GetDrawTarget()); return; } bool permitSubpixelAA = !(aLayer->GetContentFlags() & Layer::CONTENT_DISABLE_SUBPIXEL_AA); nsRefPtr<gfxASurface> surface = aTarget->CurrentSurface(); if (surface->GetContentType() != gfxContentType::COLOR_ALPHA) { // Destination doesn't have alpha channel; no need to set any special flags surface->SetSubpixelAntialiasingEnabled(permitSubpixelAA); return; } const nsIntRect& bounds = aLayer->GetVisibleRegion().GetBounds(); permitSubpixelAA &= !(aLayer->GetContentFlags() & Layer::CONTENT_COMPONENT_ALPHA) || surface->GetOpaqueRect().Contains( aTarget->UserToDevice(gfxRect(bounds.x, bounds.y, bounds.width, bounds.height))); surface->SetSubpixelAntialiasingEnabled(permitSubpixelAA); }
gfxRect gfxContext::UserToDevice(const gfxRect& rect) const { double xmin = rect.X(), ymin = rect.Y(), xmax = rect.XMost(), ymax = rect.YMost(); double x[3], y[3]; x[0] = xmin; y[0] = ymax; x[1] = xmax; y[1] = ymax; x[2] = xmax; y[2] = ymin; cairo_user_to_device(mCairo, &xmin, &ymin); xmax = xmin; ymax = ymin; for (int i = 0; i < 3; i++) { cairo_user_to_device(mCairo, &x[i], &y[i]); xmin = NS_MIN(xmin, x[i]); xmax = NS_MAX(xmax, x[i]); ymin = NS_MIN(ymin, y[i]); ymax = NS_MAX(ymax, y[i]); } return gfxRect(xmin, ymin, xmax - xmin, ymax - ymin); }
static LayoutDeviceRect TransformCompositionBounds(const ParentLayerRect& aCompositionBounds, const CSSToParentLayerScale& aZoom, const ScreenPoint& aScrollOffset, const CSSToScreenScale& aResolution, const gfx3DMatrix& aTransformScreenToLayout) { // Transform the current composition bounds into transformed layout device // space by compensating for the difference in resolution and subtracting the // old composition bounds origin. ScreenRect offsetViewportRect = (aCompositionBounds / aZoom) * aResolution; offsetViewportRect.MoveBy(-aScrollOffset); gfxRect transformedViewport = aTransformScreenToLayout.TransformBounds( gfxRect(offsetViewportRect.x, offsetViewportRect.y, offsetViewportRect.width, offsetViewportRect.height)); return LayoutDeviceRect(transformedViewport.x, transformedViewport.y, transformedViewport.width, transformedViewport.height); }
void ContainerLayer::DefaultComputeEffectiveTransforms(const gfx3DMatrix& aTransformToSurface) { gfxMatrix residual; gfx3DMatrix idealTransform = GetLocalTransform()*aTransformToSurface; mEffectiveTransform = SnapTransform(idealTransform, gfxRect(0, 0, 0, 0), &residual); PRBool useIntermediateSurface; float opacity = GetEffectiveOpacity(); if (opacity != 1.0f && HasMultipleChildren()) { useIntermediateSurface = PR_TRUE; } else { useIntermediateSurface = PR_FALSE; gfxMatrix contTransform; if (!mEffectiveTransform.Is2D(&contTransform) || !contTransform.PreservesAxisAlignedRectangles()) { for (Layer* child = GetFirstChild(); child; child = child->GetNextSibling()) { const nsIntRect *clipRect = child->GetEffectiveClipRect(); /* We can't (easily) forward our transform to children with a non-empty clip * rect since it would need to be adjusted for the transform. * TODO: This is easily solvable for translation/scaling transforms. */ if (clipRect && !clipRect->IsEmpty() && !child->GetVisibleRegion().IsEmpty()) { useIntermediateSurface = PR_TRUE; break; } } } } mUseIntermediateSurface = useIntermediateSurface; if (useIntermediateSurface) { ComputeEffectiveTransformsForChildren(gfx3DMatrix::From2D(residual)); } else { ComputeEffectiveTransformsForChildren(idealTransform); } }
nsSVGForeignObjectFrame::GetFrameForPoint(const nsPoint &aPoint) { if (IsDisabled() || (GetStateBits() & NS_STATE_SVG_NONDISPLAY_CHILD)) return nsnull; nsIFrame* kid = GetFirstPrincipalChild(); if (!kid) return nsnull; float x, y, width, height; static_cast<nsSVGElement*>(mContent)-> GetAnimatedLengthValues(&x, &y, &width, &height, nsnull); gfxMatrix tm = GetCanvasTM().Invert(); if (tm.IsSingular()) return nsnull; // Convert aPoint from app units in canvas space to user space: gfxPoint pt = gfxPoint(aPoint.x, aPoint.y) / PresContext()->AppUnitsPerDevPixel(); pt = tm.Transform(pt); if (!gfxRect(0.0f, 0.0f, width, height).Contains(pt)) return nsnull; // Convert pt to app units in *local* space: pt = pt * nsPresContext::AppUnitsPerCSSPixel(); nsPoint point = nsPoint(NSToIntRound(pt.x), NSToIntRound(pt.y)); nsIFrame *frame = nsLayoutUtils::GetFrameForPoint(kid, point); if (frame && nsSVGUtils::HitTestClip(this, aPoint)) return frame; return nsnull; }
void nsSVGFilterInstance::ComputeFilterPrimitiveSubregion(PrimitiveInfo* aPrimitive) { nsSVGFE* fE = aPrimitive->mFE; gfxRect defaultFilterSubregion(0,0,0,0); if (fE->SubregionIsUnionOfRegions()) { for (PRUint32 i = 0; i < aPrimitive->mInputs.Length(); ++i) { defaultFilterSubregion = defaultFilterSubregion.Union( aPrimitive->mInputs[i]->mImage.mFilterPrimitiveSubregion); } } else { defaultFilterSubregion = gfxRect(0, 0, mFilterSpaceSize.width, mFilterSpaceSize.height); } gfxRect feArea = nsSVGUtils::GetRelativeRect(mPrimitiveUnits, &fE->mLengthAttributes[nsSVGFE::X], mTargetBBox, mTargetFrame); gfxRect region = UserSpaceToFilterSpace(feArea); if (!fE->mLengthAttributes[nsSVGFE::X].IsExplicitlySet()) region.x = defaultFilterSubregion.X(); if (!fE->mLengthAttributes[nsSVGFE::Y].IsExplicitlySet()) region.y = defaultFilterSubregion.Y(); if (!fE->mLengthAttributes[nsSVGFE::WIDTH].IsExplicitlySet()) region.width = defaultFilterSubregion.Width(); if (!fE->mLengthAttributes[nsSVGFE::HEIGHT].IsExplicitlySet()) region.height = defaultFilterSubregion.Height(); // We currently require filter primitive subregions to be pixel-aligned. // Following the spec, any pixel partially in the region is included // in the region. region.RoundOut(); aPrimitive->mImage.mFilterPrimitiveSubregion = region; }
void nsRenderingContext::SetClip(const nsIntRegion& aRegion) { // Region is in device coords, no transformation. This should // only be called when there is no transform in place, when we we // just start painting a widget. The region is set by the platform // paint routine. Therefore, there is no option to intersect with // an existing clip. gfxMatrix mat = mThebes->CurrentMatrix(); mThebes->IdentityMatrix(); mThebes->ResetClip(); mThebes->NewPath(); nsIntRegionRectIterator iter(aRegion); const nsIntRect* rect; while ((rect = iter.Next())) { mThebes->Rectangle(gfxRect(rect->x, rect->y, rect->width, rect->height), PR_TRUE); } mThebes->Clip(); mThebes->SetMatrix(mat); }
gfxRect gfxMatrix::Transform(const gfxRect& rect) const { return gfxRect(Transform(rect.pos), Transform(rect.size)); }
NS_IMETHODIMP AsyncFaviconDataReady::OnComplete(nsIURI *aFaviconURI, uint32_t aDataLen, const uint8_t *aData, const nsACString &aMimeType) { if (!aDataLen || !aData) { if (mURLShortcut) { OnFaviconDataNotAvailable(); } return NS_OK; } nsCOMPtr<nsIFile> icoFile; nsresult rv = FaviconHelper::GetOutputIconPath(mNewURI, icoFile, mURLShortcut); NS_ENSURE_SUCCESS(rv, rv); nsAutoString path; rv = icoFile->GetPath(path); NS_ENSURE_SUCCESS(rv, rv); // Convert the obtained favicon data to an input stream nsCOMPtr<nsIInputStream> stream; rv = NS_NewByteInputStream(getter_AddRefs(stream), reinterpret_cast<const char*>(aData), aDataLen, NS_ASSIGNMENT_DEPEND); NS_ENSURE_SUCCESS(rv, rv); // Decode the image from the format it was returned to us in (probably PNG) nsAutoCString mimeTypeOfInputData; mimeTypeOfInputData.AssignLiteral("image/vnd.microsoft.icon"); nsCOMPtr<imgIContainer> container; nsCOMPtr<imgITools> imgtool = do_CreateInstance("@mozilla.org/image/tools;1"); rv = imgtool->DecodeImageData(stream, aMimeType, getter_AddRefs(container)); NS_ENSURE_SUCCESS(rv, rv); nsRefPtr<gfxASurface> imgFrame = container->GetFrame(imgIContainer::FRAME_FIRST, 0); NS_ENSURE_TRUE(imgFrame, NS_ERROR_FAILURE); nsRefPtr<gfxImageSurface> imageSurface; gfxIntSize size; if (mURLShortcut) { imageSurface = new gfxImageSurface(gfxIntSize(48, 48), gfxImageFormat::ARGB32); gfxContext context(imageSurface); context.SetOperator(gfxContext::OPERATOR_SOURCE); context.SetColor(gfxRGBA(1, 1, 1, 1)); context.Rectangle(gfxRect(0, 0, 48, 48)); context.Fill(); context.Translate(gfxPoint(16, 16)); context.SetOperator(gfxContext::OPERATOR_OVER); context.DrawSurface(imgFrame, gfxSize(16, 16)); size = imageSurface->GetSize(); } else { imageSurface = imgFrame->GetAsReadableARGB32ImageSurface(); size.width = GetSystemMetrics(SM_CXSMICON); size.height = GetSystemMetrics(SM_CYSMICON); if (!size.width || !size.height) { size.width = 16; size.height = 16; } } // Allocate a new buffer that we own and can use out of line in // another thread. Copy the favicon raw data into it. const fallible_t fallible = fallible_t(); uint8_t *data = new (fallible) uint8_t[imageSurface->GetDataSize()]; if (!data) { return NS_ERROR_OUT_OF_MEMORY; } memcpy(data, imageSurface->Data(), imageSurface->GetDataSize()); // AsyncEncodeAndWriteIcon takes ownership of the heap allocated buffer nsCOMPtr<nsIRunnable> event = new AsyncEncodeAndWriteIcon(path, data, imageSurface->GetDataSize(), imageSurface->Stride(), size.width, size.height, mURLShortcut); mIOThread->Dispatch(event, NS_DISPATCH_NORMAL); return NS_OK; }
NS_IMETHODIMP AsyncWriteIconToDisk::Run() { NS_PRECONDITION(!NS_IsMainThread(), "Should not be called on the main thread."); // Convert the obtained favicon data to an input stream nsCOMPtr<nsIInputStream> stream; nsresult rv = NS_NewByteInputStream(getter_AddRefs(stream), reinterpret_cast<const char*>(mBuffer.get()), mBufferLength, NS_ASSIGNMENT_DEPEND); NS_ENSURE_SUCCESS(rv, rv); // Decode the image from the format it was returned to us in (probably PNG) nsCOMPtr<imgIContainer> container; nsCOMPtr<imgITools> imgtool = do_CreateInstance("@mozilla.org/image/tools;1"); rv = imgtool->DecodeImageData(stream, mMimeTypeOfInputData, getter_AddRefs(container)); NS_ENSURE_SUCCESS(rv, rv); // Get the recommended icon width and height, or if failure to obtain // these settings, fall back to 16x16 ICOs. These values can be different // if the user has a different DPI setting other than 100%. // Windows would scale the 16x16 icon themselves, but it's better // we let our ICO encoder do it. nsCOMPtr<nsIInputStream> iconStream; if (!mURLShortcut) { int32_t systemIconWidth = GetSystemMetrics(SM_CXSMICON); int32_t systemIconHeight = GetSystemMetrics(SM_CYSMICON); if ((systemIconWidth == 0 || systemIconHeight == 0)) { systemIconWidth = 16; systemIconHeight = 16; } // Scale the image to the needed size and in ICO format mMimeTypeOfInputData.AssignLiteral("image/vnd.microsoft.icon"); rv = imgtool->EncodeScaledImage(container, mMimeTypeOfInputData, systemIconWidth, systemIconHeight, EmptyString(), getter_AddRefs(iconStream)); } else { nsRefPtr<gfxASurface> s; rv = container->GetFrame(imgIContainer::FRAME_FIRST, 0, getter_AddRefs(s)); NS_ENSURE_SUCCESS(rv, rv); gfxImageSurface* surface = new gfxImageSurface(gfxIntSize(48, 48), gfxImageSurface::ImageFormatARGB32); gfxContext context(surface); context.SetOperator(gfxContext::OPERATOR_SOURCE); context.SetColor(gfxRGBA(1, 1, 1, 1)); context.Rectangle(gfxRect(0, 0, 48, 48)); context.Fill(); context.Translate(gfxPoint(16, 16)); context.SetOperator(gfxContext::OPERATOR_OVER); context.DrawSurface(s, gfxSize(16, 16)); gfxIntSize size = surface->GetSize(); nsRefPtr<imgIEncoder> encoder = do_CreateInstance("@mozilla.org/image/encoder;2?" "type=image/vnd.microsoft.icon"); NS_ENSURE_TRUE(encoder, NS_ERROR_FAILURE); rv = encoder->InitFromData(surface->Data(), surface->Stride() * size.height, size.width, size.height, surface->Stride(), imgIEncoder::INPUT_FORMAT_HOSTARGB, EmptyString()); NS_ENSURE_SUCCESS(rv, rv); CallQueryInterface(encoder.get(), getter_AddRefs(iconStream)); if (!iconStream) { return NS_ERROR_FAILURE; } } NS_ENSURE_SUCCESS(rv, rv); nsCOMPtr<nsIFile> icoFile = do_CreateInstance("@mozilla.org/file/local;1"); NS_ENSURE_TRUE(icoFile, NS_ERROR_FAILURE); rv = icoFile->InitWithPath(mIconPath); // Setup the output stream for the ICO file on disk nsCOMPtr<nsIOutputStream> outputStream; rv = NS_NewLocalFileOutputStream(getter_AddRefs(outputStream), icoFile); NS_ENSURE_SUCCESS(rv, rv); // Obtain the ICO buffer size from the re-encoded ICO stream uint64_t bufSize64; rv = iconStream->Available(&bufSize64); NS_ENSURE_SUCCESS(rv, rv); NS_ENSURE_TRUE(bufSize64 <= UINT32_MAX, NS_ERROR_FILE_TOO_BIG); uint32_t bufSize = (uint32_t)bufSize64; // Setup a buffered output stream from the stream object // so that we can simply use WriteFrom with the stream object nsCOMPtr<nsIOutputStream> bufferedOutputStream; rv = NS_NewBufferedOutputStream(getter_AddRefs(bufferedOutputStream), outputStream, bufSize); NS_ENSURE_SUCCESS(rv, rv); // Write out the icon stream to disk and make sure we wrote everything uint32_t wrote; rv = bufferedOutputStream->WriteFrom(iconStream, bufSize, &wrote); NS_ASSERTION(bufSize == wrote, "Icon wrote size should be equal to requested write size"); // Cleanup bufferedOutputStream->Close(); outputStream->Close(); if (mURLShortcut) { SendMessage(HWND_BROADCAST, WM_SETTINGCHANGE, SPI_SETNONCLIENTMETRICS, 0); } return rv; }
void ClientTiledThebesLayer::BeginPaint() { if (ClientManager()->IsRepeatTransaction()) { return; } mPaintData.mLowPrecisionPaintCount = 0; mPaintData.mPaintFinished = false; // Calculate the transform required to convert screen space into layer space mPaintData.mTransformScreenToLayer = GetEffectiveTransform(); // XXX Not sure if this code for intermediate surfaces is correct. // It rarely gets hit though, and shouldn't have terrible consequences // even if it is wrong. for (ContainerLayer* parent = GetParent(); parent; parent = parent->GetParent()) { if (parent->UseIntermediateSurface()) { mPaintData.mTransformScreenToLayer.PreMultiply(parent->GetEffectiveTransform()); } } mPaintData.mTransformScreenToLayer.Invert(); // Compute the critical display port in layer space. mPaintData.mLayerCriticalDisplayPort.SetEmpty(); const gfx::Rect& criticalDisplayPort = GetParent()->GetFrameMetrics().mCriticalDisplayPort; if (!criticalDisplayPort.IsEmpty()) { gfxRect transformedCriticalDisplayPort = mPaintData.mTransformScreenToLayer.TransformBounds( gfxRect(criticalDisplayPort.x, criticalDisplayPort.y, criticalDisplayPort.width, criticalDisplayPort.height)); transformedCriticalDisplayPort.RoundOut(); mPaintData.mLayerCriticalDisplayPort = nsIntRect(transformedCriticalDisplayPort.x, transformedCriticalDisplayPort.y, transformedCriticalDisplayPort.width, transformedCriticalDisplayPort.height); } // Calculate the frame resolution. mPaintData.mResolution.SizeTo(1, 1); for (ContainerLayer* parent = GetParent(); parent; parent = parent->GetParent()) { const FrameMetrics& metrics = parent->GetFrameMetrics(); mPaintData.mResolution.width *= metrics.mResolution.width; mPaintData.mResolution.height *= metrics.mResolution.height; } // Calculate the scroll offset since the last transaction, and the // composition bounds. mPaintData.mCompositionBounds.SetEmpty(); mPaintData.mScrollOffset.MoveTo(0, 0); Layer* primaryScrollable = ClientManager()->GetPrimaryScrollableLayer(); if (primaryScrollable) { const FrameMetrics& metrics = primaryScrollable->AsContainerLayer()->GetFrameMetrics(); mPaintData.mScrollOffset = metrics.mScrollOffset; gfxRect transformedViewport = mPaintData.mTransformScreenToLayer.TransformBounds( gfxRect(metrics.mCompositionBounds.x, metrics.mCompositionBounds.y, metrics.mCompositionBounds.width, metrics.mCompositionBounds.height)); transformedViewport.RoundOut(); mPaintData.mCompositionBounds = nsIntRect(transformedViewport.x, transformedViewport.y, transformedViewport.width, transformedViewport.height); } }
NS_IMETHODIMP nsCanvasRenderingContextGLPrivate::GetInputStream(const char* aMimeType, const PRUnichar* aEncoderOptions, nsIInputStream **aStream) { // XXX disabled for now due to the win32 nsRefPtr situation -- we need // to manage allocations and deletions very carefully, and can't allocate // an object in our dll and have xul.dll call delete on it (which // Release() will do). return NS_ERROR_FAILURE; #if 0 if (!mGLPbuffer || !mGLPbuffer->ThebesSurface()) return NS_ERROR_FAILURE; nsresult rv; const char encoderPrefix[] = "@mozilla.org/image/encoder;2?type="; nsAutoArrayPtr<char> conid(new (std::nothrow) char[strlen(encoderPrefix) + strlen(aMimeType) + 1]); if (!conid) return NS_ERROR_OUT_OF_MEMORY; strcpy(conid, encoderPrefix); strcat(conid, aMimeType); nsCOMPtr<imgIEncoder> encoder = do_CreateInstance(conid); if (!encoder) return NS_ERROR_FAILURE; nsAutoArrayPtr<PRUint8> imageBuffer(new (std::nothrow) PRUint8[mWidth * mHeight * 4]); if (!imageBuffer) return NS_ERROR_OUT_OF_MEMORY; nsRefPtr<gfxImageSurface> imgsurf = new gfxImageSurface(imageBuffer.get(), gfxIntSize(mWidth, mHeight), mWidth * 4, gfxASurface::ImageFormatARGB32); if (!imgsurf || imgsurf->CairoStatus()) return NS_ERROR_FAILURE; nsRefPtr<gfxContext> ctx = new gfxContext(imgsurf); if (!ctx || ctx->HasError()) return NS_ERROR_FAILURE; nsRefPtr<gfxASurface> surf = mGLPbuffer->ThebesSurface(); nsRefPtr<gfxPattern> pat = CanvasGLThebes::CreatePattern(surf); gfxMatrix m; m.Translate(gfxPoint(0.0, mGLPbuffer->Height())); m.Scale(1.0, -1.0); pat->SetMatrix(m); // XXX I don't want to use PixelSnapped here, but layout doesn't guarantee // pixel alignment for this stuff! ctx->NewPath(); ctx->PixelSnappedRectangleAndSetPattern(gfxRect(0, 0, mWidth, mHeight), pat); ctx->SetOperator(gfxContext::OPERATOR_SOURCE); ctx->Fill(); rv = encoder->InitFromData(imageBuffer.get(), mWidth * mHeight * 4, mWidth, mHeight, mWidth * 4, imgIEncoder::INPUT_FORMAT_HOSTARGB, nsDependentString(aEncoderOptions)); NS_ENSURE_SUCCESS(rv, rv); return CallQueryInterface(encoder, aStream); #endif }
void BasicCompositor::DrawQuad(const gfx::Rect& aRect, const gfx::Rect& aClipRect, const EffectChain &aEffectChain, gfx::Float aOpacity, const gfx::Matrix4x4& aTransform, const gfx::Rect& aVisibleRect) { RefPtr<DrawTarget> buffer = mRenderTarget->mDrawTarget; // For 2D drawing, |dest| and |buffer| are the same surface. For 3D drawing, // |dest| is a temporary surface. RefPtr<DrawTarget> dest = buffer; buffer->PushClipRect(aClipRect); AutoRestoreTransform autoRestoreTransform(dest); Matrix newTransform; Rect transformBounds; gfx3DMatrix new3DTransform; IntPoint offset = mRenderTarget->GetOrigin(); if (aTransform.Is2D()) { newTransform = aTransform.As2D(); } else { // Create a temporary surface for the transform. dest = gfxPlatform::GetPlatform()->CreateOffscreenContentDrawTarget(RoundOut(aRect).Size(), SurfaceFormat::B8G8R8A8); if (!dest) { return; } dest->SetTransform(Matrix::Translation(-aRect.x, -aRect.y)); // Get the bounds post-transform. new3DTransform = To3DMatrix(aTransform); gfxRect bounds = new3DTransform.TransformBounds(ThebesRect(aRect)); bounds.IntersectRect(bounds, gfxRect(offset.x, offset.y, buffer->GetSize().width, buffer->GetSize().height)); transformBounds = ToRect(bounds); transformBounds.RoundOut(); // Propagate the coordinate offset to our 2D draw target. newTransform = Matrix::Translation(transformBounds.x, transformBounds.y); // When we apply the 3D transformation, we do it against a temporary // surface, so undo the coordinate offset. new3DTransform = gfx3DMatrix::Translation(aRect.x, aRect.y, 0) * new3DTransform; } newTransform.PostTranslate(-offset.x, -offset.y); buffer->SetTransform(newTransform); RefPtr<SourceSurface> sourceMask; Matrix maskTransform; if (aEffectChain.mSecondaryEffects[EffectTypes::MASK]) { EffectMask *effectMask = static_cast<EffectMask*>(aEffectChain.mSecondaryEffects[EffectTypes::MASK].get()); sourceMask = effectMask->mMaskTexture->AsSourceBasic()->GetSurface(dest); MOZ_ASSERT(effectMask->mMaskTransform.Is2D(), "How did we end up with a 3D transform here?!"); MOZ_ASSERT(!effectMask->mIs3D); maskTransform = effectMask->mMaskTransform.As2D(); maskTransform.PreTranslate(-offset.x, -offset.y); } switch (aEffectChain.mPrimaryEffect->mType) { case EffectTypes::SOLID_COLOR: { EffectSolidColor* effectSolidColor = static_cast<EffectSolidColor*>(aEffectChain.mPrimaryEffect.get()); FillRectWithMask(dest, aRect, effectSolidColor->mColor, DrawOptions(aOpacity), sourceMask, &maskTransform); break; } case EffectTypes::RGB: { TexturedEffect* texturedEffect = static_cast<TexturedEffect*>(aEffectChain.mPrimaryEffect.get()); TextureSourceBasic* source = texturedEffect->mTexture->AsSourceBasic(); if (texturedEffect->mPremultiplied) { DrawSurfaceWithTextureCoords(dest, aRect, source->GetSurface(dest), texturedEffect->mTextureCoords, texturedEffect->mFilter, aOpacity, sourceMask, &maskTransform); } else { RefPtr<DataSourceSurface> srcData = source->GetSurface(dest)->GetDataSurface(); // Yes, we re-create the premultiplied data every time. // This might be better with a cache, eventually. RefPtr<DataSourceSurface> premultData = gfxUtils::CreatePremultipliedDataSurface(srcData); DrawSurfaceWithTextureCoords(dest, aRect, premultData, texturedEffect->mTextureCoords, texturedEffect->mFilter, aOpacity, sourceMask, &maskTransform); } break; } case EffectTypes::YCBCR: { NS_RUNTIMEABORT("Can't (easily) support component alpha with BasicCompositor!"); break; } case EffectTypes::RENDER_TARGET: { EffectRenderTarget* effectRenderTarget = static_cast<EffectRenderTarget*>(aEffectChain.mPrimaryEffect.get()); RefPtr<BasicCompositingRenderTarget> surface = static_cast<BasicCompositingRenderTarget*>(effectRenderTarget->mRenderTarget.get()); RefPtr<SourceSurface> sourceSurf = surface->mDrawTarget->Snapshot(); DrawSurfaceWithTextureCoords(dest, aRect, sourceSurf, effectRenderTarget->mTextureCoords, effectRenderTarget->mFilter, aOpacity, sourceMask, &maskTransform); break; } case EffectTypes::COMPONENT_ALPHA: { NS_RUNTIMEABORT("Can't (easily) support component alpha with BasicCompositor!"); break; } default: { NS_RUNTIMEABORT("Invalid effect type!"); break; } } if (!aTransform.Is2D()) { dest->Flush(); RefPtr<SourceSurface> snapshot = dest->Snapshot(); RefPtr<DataSourceSurface> source = snapshot->GetDataSurface(); RefPtr<DataSourceSurface> temp = Factory::CreateDataSourceSurface(RoundOut(transformBounds).Size(), SurfaceFormat::B8G8R8A8 #ifdef MOZ_ENABLE_SKIA , true #endif ); if (NS_WARN_IF(!temp)) { buffer->PopClip(); return; } Transform(temp, source, new3DTransform, transformBounds.TopLeft()); transformBounds.MoveTo(0, 0); buffer->DrawSurface(temp, transformBounds, transformBounds); } buffer->PopClip(); }
gfxRect nsSVGMarkerFrame::GetMarkBBoxContribution(const gfxMatrix &aToBBoxUserspace, PRUint32 aFlags, nsSVGPathGeometryFrame *aMarkedFrame, const nsSVGMark *aMark, float aStrokeWidth) { // If the flag is set when we get here, it means this marker frame // has already been used in calculating the current mark bbox, and // the document has a marker reference loop. if (mInUse) return gfxRect(); AutoMarkerReferencer markerRef(this, aMarkedFrame); nsSVGMarkerElement *content = static_cast<nsSVGMarkerElement*>(mContent); const nsSVGViewBoxRect viewBox = content->GetViewBoxRect(); if (viewBox.width <= 0.0f || viewBox.height <= 0.0f) { return gfxRect(); } mStrokeWidth = aStrokeWidth; mX = aMark->x; mY = aMark->y; mAutoAngle = aMark->angle; gfxMatrix markerTM = content->GetMarkerTransform(mStrokeWidth, mX, mY, mAutoAngle); gfxMatrix viewBoxTM = content->GetViewBoxTransform(); gfxMatrix tm = viewBoxTM * markerTM * aToBBoxUserspace; gfxRect bbox; bool firstChild = true; for (nsIFrame* kid = mFrames.FirstChild(); kid; kid = kid->GetNextSibling()) { nsISVGChildFrame* child = do_QueryFrame(kid); if (child) { // When we're being called to obtain the invalidation area, we need to // pass down all the flags so that stroke is included. However, once DOM // getBBox() accepts flags, maybe we should strip some of those here? // We need to include zero width/height vertical/horizontal lines, so we have // to use UnionEdges, but we must special case the first bbox so that we don't // include the initial gfxRect(0,0,0,0). gfxRect childBBox = child->GetBBoxContribution(tm, aFlags); if (firstChild && (childBBox.Width() > 0 || childBBox.Height() > 0)) { bbox = childBBox; firstChild = false; continue; } bbox = bbox.UnionEdges(childBBox); } } return bbox; }