void LayerManagerComposite::WorldTransformRect(nsIntRect& aRect) { gfx::Rect grect(aRect.x, aRect.y, aRect.width, aRect.height); grect = mWorldMatrix.TransformBounds(grect); aRect.SetRect(grect.X(), grect.Y(), grect.Width(), grect.Height()); }
already_AddRefed<gfxContext> ThebesLayerBuffer::GetContextForQuadrantUpdate(const nsIntRect& aBounds) { nsRefPtr<gfxContext> ctx = new gfxContext(mBuffer); // Figure out which quadrant to draw in PRInt32 xBoundary = mBufferRect.XMost() - mBufferRotation.x; PRInt32 yBoundary = mBufferRect.YMost() - mBufferRotation.y; XSide sideX = aBounds.XMost() <= xBoundary ? RIGHT : LEFT; YSide sideY = aBounds.YMost() <= yBoundary ? BOTTOM : TOP; nsIntRect quadrantRect = GetQuadrantRectangle(sideX, sideY); NS_ASSERTION(quadrantRect.Contains(aBounds), "Messed up quadrants"); ctx->Translate(-gfxPoint(quadrantRect.x, quadrantRect.y)); return ctx.forget(); }
static void CalculatePluginClip(const nsIntRect& aBounds, const nsTArray<nsIntRect>& aPluginClipRects, const nsIntPoint& aContentOffset, const nsIntRegion& aParentLayerVisibleRegion, nsTArray<nsIntRect>& aResult, nsIntRect& aVisibleBounds, bool& aPluginIsVisible) { aPluginIsVisible = true; // aBounds (content origin) nsIntRegion contentVisibleRegion(aBounds); // aPluginClipRects (plugin widget origin) for (uint32_t idx = 0; idx < aPluginClipRects.Length(); idx++) { nsIntRect rect = aPluginClipRects[idx]; // shift to content origin rect.MoveBy(aBounds.x, aBounds.y); contentVisibleRegion.AndWith(rect); } // apply layers clip (window origin) nsIntRegion region = aParentLayerVisibleRegion; region.MoveBy(-aContentOffset.x, -aContentOffset.y); contentVisibleRegion.AndWith(region); if (contentVisibleRegion.IsEmpty()) { aPluginIsVisible = false; return; } // shift to plugin widget origin contentVisibleRegion.MoveBy(-aBounds.x, -aBounds.y); nsIntRegionRectIterator iter(contentVisibleRegion); for (const nsIntRect* rgnRect = iter.Next(); rgnRect; rgnRect = iter.Next()) { aResult.AppendElement(*rgnRect); aVisibleBounds.UnionRect(aVisibleBounds, *rgnRect); } }
// Computes the smallest rectangle that contains both aRect1 and aRect2 and // fills 'this' with the result. Returns FALSE if both aRect1 and aRect2 are // empty and TRUE otherwise PRBool nsIntRect::UnionRect(const nsIntRect &aRect1, const nsIntRect &aRect2) { PRBool result = PR_TRUE; // Is aRect1 empty? if (aRect1.IsEmpty()) { if (aRect2.IsEmpty()) { // Both rectangles are empty which is an error Empty(); result = PR_FALSE; } else { // aRect1 is empty so set the result to aRect2 *this = aRect2; } } else if (aRect2.IsEmpty()) { // aRect2 is empty so set the result to aRect1 *this = aRect1; } else { PRInt32 xmost1 = aRect1.XMost(); PRInt32 xmost2 = aRect2.XMost(); PRInt32 ymost1 = aRect1.YMost(); PRInt32 ymost2 = aRect2.YMost(); // Compute the origin x = PR_MIN(aRect1.x, aRect2.x); y = PR_MIN(aRect1.y, aRect2.y); // Compute the size width = PR_MAX(xmost1, xmost2) - x; height = PR_MAX(ymost1, ymost2) - y; } return result; }
/* static */ void ProgressTracker::SyncNotifyInternal(ObserverArray& aObservers, bool aHasImage, Progress aProgress, const nsIntRect& aDirtyRect) { MOZ_ASSERT(NS_IsMainThread()); typedef imgINotificationObserver I; if (aProgress & FLAG_SIZE_AVAILABLE) { NOTIFY_IMAGE_OBSERVERS(aObservers, Notify(I::SIZE_AVAILABLE)); } if (aProgress & FLAG_ONLOAD_BLOCKED) { NOTIFY_IMAGE_OBSERVERS(aObservers, BlockOnload()); } if (aHasImage) { // OnFrameUpdate // If there's any content in this frame at all (always true for // vector images, true for raster images that have decoded at // least one frame) then send OnFrameUpdate. if (!aDirtyRect.IsEmpty()) { NOTIFY_IMAGE_OBSERVERS(aObservers, Notify(I::FRAME_UPDATE, &aDirtyRect)); } if (aProgress & FLAG_FRAME_COMPLETE) { NOTIFY_IMAGE_OBSERVERS(aObservers, Notify(I::FRAME_COMPLETE)); } if (aProgress & FLAG_HAS_TRANSPARENCY) { NOTIFY_IMAGE_OBSERVERS(aObservers, Notify(I::HAS_TRANSPARENCY)); } if (aProgress & FLAG_IS_ANIMATED) { NOTIFY_IMAGE_OBSERVERS(aObservers, Notify(I::IS_ANIMATED)); } } // Send UnblockOnload before OnStopDecode and OnStopRequest. This allows // observers that can fire events when they receive those notifications to do // so then, instead of being forced to wait for UnblockOnload. if (aProgress & FLAG_ONLOAD_UNBLOCKED) { NOTIFY_IMAGE_OBSERVERS(aObservers, UnblockOnload()); } if (aProgress & FLAG_DECODE_COMPLETE) { MOZ_ASSERT(aHasImage, "Stopped decoding without ever having an image?"); NOTIFY_IMAGE_OBSERVERS(aObservers, Notify(I::DECODE_COMPLETE)); } if (aProgress & FLAG_LOAD_COMPLETE) { NOTIFY_IMAGE_OBSERVERS(aObservers, OnLoadComplete(aProgress & FLAG_LAST_PART_COMPLETE)); } }
void nsSVGUtils::ConvertImageDataFromLinearRGB(PRUint8 *data, PRInt32 stride, const nsIntRect &rect) { for (PRInt32 y = rect.y; y < rect.YMost(); y++) { for (PRInt32 x = rect.x; x < rect.XMost(); x++) { PRUint8 *pixel = data + stride * y + 4 * x; pixel[GFX_ARGB32_OFFSET_B] = glinearRGBTosRGBMap[pixel[GFX_ARGB32_OFFSET_B]]; pixel[GFX_ARGB32_OFFSET_G] = glinearRGBTosRGBMap[pixel[GFX_ARGB32_OFFSET_G]]; pixel[GFX_ARGB32_OFFSET_R] = glinearRGBTosRGBMap[pixel[GFX_ARGB32_OFFSET_R]]; } } }
// Clip aTarget's image to its filter primitive subregion. // aModifiedRect contains all the pixels which might not be RGBA(0,0,0,0), // it's relative to the surface data. static void ClipTarget(nsSVGFilterInstance* aInstance, const nsSVGFE::Image* aTarget, const nsIntRect& aModifiedRect) { nsIntPoint surfaceTopLeft = aInstance->GetSurfaceRect().TopLeft(); NS_ASSERTION(aInstance->GetSurfaceRect().Contains(aModifiedRect + surfaceTopLeft), "Modified data area overflows the surface?"); nsIntRect clip = aModifiedRect; nsSVGUtils::ClipToGfxRect(&clip, aTarget->mFilterPrimitiveSubregion - gfxPoint(surfaceTopLeft.x, surfaceTopLeft.y)); ClearRect(aTarget->mImage, aModifiedRect.x, aModifiedRect.y, aModifiedRect.XMost(), clip.y); ClearRect(aTarget->mImage, aModifiedRect.x, clip.y, clip.x, clip.YMost()); ClearRect(aTarget->mImage, clip.XMost(), clip.y, aModifiedRect.XMost(), clip.YMost()); ClearRect(aTarget->mImage, aModifiedRect.x, clip.YMost(), aModifiedRect.XMost(), aModifiedRect.YMost()); }
/* static */ void ProgressTracker::SyncNotifyInternal(ProxyArray& aProxies, bool aHasImage, Progress aProgress, const nsIntRect& aDirtyRect) { MOZ_ASSERT(NS_IsMainThread()); // OnStartRequest if (aProgress & FLAG_REQUEST_STARTED) NOTIFY_IMAGE_OBSERVERS(aProxies, OnStartRequest()); // OnStartContainer if (aProgress & FLAG_HAS_SIZE) NOTIFY_IMAGE_OBSERVERS(aProxies, OnStartContainer()); // OnStartDecode if (aProgress & FLAG_DECODE_STARTED) NOTIFY_IMAGE_OBSERVERS(aProxies, OnStartDecode()); // BlockOnload if (aProgress & FLAG_ONLOAD_BLOCKED) NOTIFY_IMAGE_OBSERVERS(aProxies, BlockOnload()); if (aHasImage) { // OnFrameUpdate // If there's any content in this frame at all (always true for // vector images, true for raster images that have decoded at // least one frame) then send OnFrameUpdate. if (!aDirtyRect.IsEmpty()) NOTIFY_IMAGE_OBSERVERS(aProxies, OnFrameUpdate(&aDirtyRect)); if (aProgress & FLAG_FRAME_STOPPED) NOTIFY_IMAGE_OBSERVERS(aProxies, OnStopFrame()); // OnImageIsAnimated if (aProgress & FLAG_IS_ANIMATED) NOTIFY_IMAGE_OBSERVERS(aProxies, OnImageIsAnimated()); } // Send UnblockOnload before OnStopDecode and OnStopRequest. This allows // observers that can fire events when they receive those notifications to do // so then, instead of being forced to wait for UnblockOnload. if (aProgress & FLAG_ONLOAD_UNBLOCKED) { NOTIFY_IMAGE_OBSERVERS(aProxies, UnblockOnload()); } if (aProgress & FLAG_DECODE_STOPPED) { MOZ_ASSERT(aHasImage, "Stopped decoding without ever having an image?"); NOTIFY_IMAGE_OBSERVERS(aProxies, OnStopDecode()); } if (aProgress & FLAG_REQUEST_STOPPED) { NOTIFY_IMAGE_OBSERVERS(aProxies, OnStopRequest(aProgress & FLAG_MULTIPART_STOPPED)); } }
void CopyDataRect(uint8_t *aDest, const uint8_t *aSrc, uint32_t aStride, const nsIntRect& aDataRect) { for (int32_t y = aDataRect.y; y < aDataRect.YMost(); y++) { memcpy(aDest + y * aStride + 4 * aDataRect.x, aSrc + y * aStride + 4 * aDataRect.x, 4 * aDataRect.width); } }
nsRect nsFilterInstance::FilterSpaceToFrameSpace(const nsIntRect& aRect) const { if (aRect.IsEmpty()) { return nsRect(); } gfxRect r(aRect.x, aRect.y, aRect.width, aRect.height); r = mFilterSpaceToFrameSpaceInCSSPxTransform.TransformBounds(r); return nsLayoutUtils::RoundGfxRectToAppRect(r, mAppUnitsPerCSSPx); }
already_AddRefed<gfxContext> RotatedContentBuffer::GetContextForQuadrantUpdate(const nsIntRect& aBounds, ContextSource aSource, nsIntPoint *aTopLeft) { if (!EnsureBuffer()) { return nullptr; } nsRefPtr<gfxContext> ctx; if (aSource == BUFFER_BOTH && HaveBufferOnWhite()) { if (!EnsureBufferOnWhite()) { return nullptr; } MOZ_ASSERT(mDTBuffer && mDTBufferOnWhite); RefPtr<DrawTarget> dualDT = Factory::CreateDualDrawTarget(mDTBuffer, mDTBufferOnWhite); ctx = new gfxContext(dualDT); } else if (aSource == BUFFER_WHITE) { if (!EnsureBufferOnWhite()) { return nullptr; } ctx = new gfxContext(mDTBufferOnWhite); } else { // BUFFER_BLACK, or BUFFER_BOTH with a single buffer. ctx = new gfxContext(mDTBuffer); } // Figure out which quadrant to draw in int32_t xBoundary = mBufferRect.XMost() - mBufferRotation.x; int32_t yBoundary = mBufferRect.YMost() - mBufferRotation.y; XSide sideX = aBounds.XMost() <= xBoundary ? RIGHT : LEFT; YSide sideY = aBounds.YMost() <= yBoundary ? BOTTOM : TOP; nsIntRect quadrantRect = GetQuadrantRectangle(sideX, sideY); NS_ASSERTION(quadrantRect.Contains(aBounds), "Messed up quadrants"); ctx->Translate(-gfxPoint(quadrantRect.x, quadrantRect.y)); if (aTopLeft) { *aTopLeft = nsIntPoint(quadrantRect.x, quadrantRect.y); } return ctx.forget(); }
/*static*/ ScreenConfiguration nsScreenGonk::GetConfiguration() { ScreenOrientation orientation = ComputeOrientation(sScreenRotation, gScreenBounds.Size()); uint32_t colorDepth = ColorDepth(); // NB: perpetuating colorDepth == pixelDepth illusion here, for // consistency. return ScreenConfiguration(sVirtualBounds, orientation, colorDepth, colorDepth); }
/* static */ bool HwcUtils::PrepareLayerRects(nsIntRect aVisible, const gfx::Matrix& aLayerTransform, const gfx::Matrix& aLayerBufferTransform, nsIntRect aClip, nsIntRect aBufferRect, bool aYFlipped, hwc_rect_t* aSourceCrop, hwc_rect_t* aVisibleRegionScreen) { gfxMatrix aTransform = gfx::ThebesMatrix(aLayerTransform); gfxRect visibleRect(aVisible); gfxRect clip(aClip); gfxRect visibleRectScreen = aTransform.TransformBounds(visibleRect); // |clip| is guaranteed to be integer visibleRectScreen.IntersectRect(visibleRectScreen, clip); if (visibleRectScreen.IsEmpty()) { return false; } gfxMatrix inverse = gfx::ThebesMatrix(aLayerBufferTransform); inverse.Invert(); gfxRect crop = inverse.TransformBounds(visibleRectScreen); //clip to buffer size crop.IntersectRect(crop, aBufferRect); crop.Round(); if (crop.IsEmpty()) { return false; } //propagate buffer clipping back to visible rect gfxMatrix layerBufferTransform = gfx::ThebesMatrix(aLayerBufferTransform); visibleRectScreen = layerBufferTransform.TransformBounds(crop); visibleRectScreen.Round(); // Map from layer space to buffer space crop -= aBufferRect.TopLeft(); if (aYFlipped) { crop.y = aBufferRect.height - (crop.y + crop.height); } aSourceCrop->left = crop.x; aSourceCrop->top = crop.y; aSourceCrop->right = crop.x + crop.width; aSourceCrop->bottom = crop.y + crop.height; aVisibleRegionScreen->left = visibleRectScreen.x; aVisibleRegionScreen->top = visibleRectScreen.y; aVisibleRegionScreen->right = visibleRectScreen.x + visibleRectScreen.width; aVisibleRegionScreen->bottom = visibleRectScreen.y + visibleRectScreen.height; return true; }
void AndroidGeckoLayerClient::SetFirstPaintViewport(const nsIntPoint& aOffset, float aZoom, const nsIntRect& aPageRect, const gfx::Rect& aCssPageRect) { NS_ASSERTION(!isNull(), "SetFirstPaintViewport called on null layer client!"); JNIEnv *env = GetJNIForThread(); // this is called on the compositor thread if (!env) return; AutoLocalJNIFrame jniFrame(env, 0); return env->CallVoidMethod(wrapped_obj, jSetFirstPaintViewport, (float)aOffset.x, (float)aOffset.y, aZoom, (float)aPageRect.x, (float)aPageRect.y, (float)aPageRect.XMost(), (float)aPageRect.YMost(), aCssPageRect.x, aCssPageRect.y, aCssPageRect.XMost(), aCssPageRect.YMost()); }
static void ComputeLinearRGBLuminanceMask(uint8_t *aData, int32_t aStride, const nsIntRect &aRect, float aOpacity) { for (int32_t y = aRect.y; y < aRect.YMost(); y++) { for (int32_t x = aRect.x; x < aRect.XMost(); x++) { uint8_t *pixel = aData + aStride * y + 4 * x; uint8_t a = pixel[GFX_ARGB32_OFFSET_A]; uint8_t luminance; // unpremultiply if (a) { if (a != 255) { pixel[GFX_ARGB32_OFFSET_B] = (255 * pixel[GFX_ARGB32_OFFSET_B]) / a; pixel[GFX_ARGB32_OFFSET_G] = (255 * pixel[GFX_ARGB32_OFFSET_G]) / a; pixel[GFX_ARGB32_OFFSET_R] = (255 * pixel[GFX_ARGB32_OFFSET_R]) / a; } /* sRGB -> linearRGB -> intensity */ luminance = static_cast<uint8_t> ((gsRGBToLinearRGBMap[pixel[GFX_ARGB32_OFFSET_R]] * 0.2125 + gsRGBToLinearRGBMap[pixel[GFX_ARGB32_OFFSET_G]] * 0.7154 + gsRGBToLinearRGBMap[pixel[GFX_ARGB32_OFFSET_B]] * 0.0721) * (a / 255.0) * aOpacity); } else { luminance = 0; } memset(pixel, luminance, 4); } } }
void nsSVGUtils::PremultiplyImageDataAlpha(PRUint8 *data, PRInt32 stride, const nsIntRect &rect) { for (PRInt32 y = rect.y; y < rect.YMost(); y++) { for (PRInt32 x = rect.x; x < rect.XMost(); x++) { PRUint8 *pixel = data + stride * y + 4 * x; PRUint8 a = pixel[GFX_ARGB32_OFFSET_A]; if (a == 255) continue; FAST_DIVIDE_BY_255(pixel[GFX_ARGB32_OFFSET_B], pixel[GFX_ARGB32_OFFSET_B] * a); FAST_DIVIDE_BY_255(pixel[GFX_ARGB32_OFFSET_G], pixel[GFX_ARGB32_OFFSET_G] * a); FAST_DIVIDE_BY_255(pixel[GFX_ARGB32_OFFSET_R], pixel[GFX_ARGB32_OFFSET_R] * a); } } }
/* static */ void WinUtils::InvalidatePluginAsWorkaround(nsIWidget *aWidget, const nsIntRect &aRect) { aWidget->Invalidate(aRect); // XXX - Even more evil workaround!! See bug 762948, flash's bottom // level sandboxed window doesn't seem to get our invalidate. We send // an invalidate to it manually. This is totally specialized for this // bug, for other child window structures this will just be a more or // less bogus invalidate but since that should not have any bad // side-effects this will have to do for now. HWND current = (HWND)aWidget->GetNativeData(NS_NATIVE_WINDOW); RECT windowRect; RECT parentRect; ::GetWindowRect(current, &parentRect); HWND next = current; do { current = next; ::EnumChildWindows(current, &EnumFirstChild, (LPARAM)&next); ::GetWindowRect(next, &windowRect); // This is relative to the screen, adjust it to be relative to the // window we're reconfiguring. windowRect.left -= parentRect.left; windowRect.top -= parentRect.top; } while (next != current && windowRect.top == 0 && windowRect.left == 0); if (windowRect.top == 0 && windowRect.left == 0) { RECT rect; rect.left = aRect.x; rect.top = aRect.y; rect.right = aRect.XMost(); rect.bottom = aRect.YMost(); ::InvalidateRect(next, &rect, FALSE); } }
bool imgFrame::Draw(gfxContext *aContext, GraphicsFilter aFilter, const gfxMatrix &aUserSpaceToImageSpace, const gfxRect& aFill, const nsIntMargin &aPadding, const nsIntRect &aSubimage, uint32_t aImageFlags) { PROFILER_LABEL("image", "imgFrame::Draw"); NS_ASSERTION(!aFill.IsEmpty(), "zero dest size --- fix caller"); NS_ASSERTION(!aSubimage.IsEmpty(), "zero source size --- fix caller"); NS_ASSERTION(!mPalettedImageData, "Directly drawing a paletted image!"); bool doPadding = aPadding != nsIntMargin(0,0,0,0); bool doPartialDecode = !ImageComplete(); if (mSinglePixel && !doPadding && !doPartialDecode) { DoSingleColorFastPath(aContext, mSinglePixelColor, aFill); return true; } gfxMatrix userSpaceToImageSpace = aUserSpaceToImageSpace; gfxRect sourceRect = userSpaceToImageSpace.TransformBounds(aFill); gfxRect imageRect(0, 0, mSize.width + aPadding.LeftRight(), mSize.height + aPadding.TopBottom()); gfxRect subimage(aSubimage.x, aSubimage.y, aSubimage.width, aSubimage.height); gfxRect fill = aFill; NS_ASSERTION(!sourceRect.Intersect(subimage).IsEmpty(), "We must be allowed to sample *some* source pixels!"); nsRefPtr<gfxASurface> surf; if (!mSinglePixel) { surf = ThebesSurface(); if (!surf) return false; } bool doTile = !imageRect.Contains(sourceRect) && !(aImageFlags & imgIContainer::FLAG_CLAMP); SurfaceWithFormat surfaceResult = SurfaceForDrawing(doPadding, doPartialDecode, doTile, aPadding, userSpaceToImageSpace, fill, subimage, sourceRect, imageRect, surf); if (surfaceResult.IsValid()) { gfxUtils::DrawPixelSnapped(aContext, surfaceResult.mDrawable, userSpaceToImageSpace, subimage, sourceRect, imageRect, fill, surfaceResult.mFormat, aFilter, aImageFlags); } return true; }
/** * Sets hwc layer rectangles required for hwc composition * * @param aVisible Input. Layer's unclipped visible rectangle * The origin is the top-left corner of the layer * @param aTransform Input. Layer's transformation matrix * It transforms from layer space to screen space * @param aClip Input. A clipping rectangle. * The origin is the top-left corner of the screen * @param aBufferRect Input. The layer's buffer bounds * The origin is the top-left corner of the layer * @param aSurceCrop Output. Area of the source to consider, * the origin is the top-left corner of the buffer * @param aVisibleRegionScreen Output. Visible region in screen space. * The origin is the top-left corner of the screen * @return true if the layer should be rendered. * false if the layer can be skipped */ static bool PrepareLayerRects(nsIntRect aVisible, const gfxMatrix& aTransform, nsIntRect aClip, nsIntRect aBufferRect, hwc_rect_t* aSourceCrop, hwc_rect_t* aVisibleRegionScreen) { gfxRect visibleRect(aVisible); gfxRect clip(aClip); gfxRect visibleRectScreen = aTransform.TransformBounds(visibleRect); // |clip| is guaranteed to be integer visibleRectScreen.IntersectRect(visibleRectScreen, clip); if (visibleRectScreen.IsEmpty()) { LOGD("Skip layer"); return false; } gfxMatrix inverse(aTransform); inverse.Invert(); gfxRect crop = inverse.TransformBounds(visibleRectScreen); //clip to buffer size crop.IntersectRect(crop, aBufferRect); crop.RoundOut(); if (crop.IsEmpty()) { LOGD("Skip layer"); return false; } //propagate buffer clipping back to visible rect visibleRectScreen = aTransform.TransformBounds(crop); visibleRectScreen.RoundOut(); // Map from layer space to buffer space crop -= aBufferRect.TopLeft(); aSourceCrop->left = crop.x; aSourceCrop->top = crop.y; aSourceCrop->right = crop.x + crop.width; aSourceCrop->bottom = crop.y + crop.height; aVisibleRegionScreen->left = visibleRectScreen.x; aVisibleRegionScreen->top = visibleRectScreen.y; aVisibleRegionScreen->right = visibleRectScreen.x + visibleRectScreen.width; aVisibleRegionScreen->bottom = visibleRectScreen.y + visibleRectScreen.height; return true; }
nsIntRect nsSVGIntegrationUtils::AdjustInvalidAreaForSVGEffects(nsIFrame* aFrame, const nsPoint& aToReferenceFrame, const nsIntRect& aInvalidRect) { // Don't bother calling GetEffectProperties; the filter property should // already have been set up during reflow/ComputeFrameEffectsRect nsIFrame* firstFrame = nsLayoutUtils::GetFirstContinuationOrSpecialSibling(aFrame); nsSVGEffects::EffectProperties effectProperties = nsSVGEffects::GetEffectProperties(firstFrame); if (!effectProperties.mFilter) return aInvalidRect; nsSVGFilterProperty *prop = nsSVGEffects::GetFilterProperty(firstFrame); if (!prop || !prop->IsInObserverList()) { return aInvalidRect; } int32_t appUnitsPerDevPixel = aFrame->PresContext()->AppUnitsPerDevPixel(); nsSVGFilterFrame* filterFrame = prop->GetFilterFrame(); if (!filterFrame) { // The frame is either not there or not currently available, // perhaps because we're in the middle of tearing stuff down. // Be conservative, return our visual overflow rect relative // to the reference frame. nsRect overflow = aFrame->GetVisualOverflowRect() + aToReferenceFrame; return overflow.ToOutsidePixels(appUnitsPerDevPixel); } // Convert aInvalidRect into "user space" in app units: nsPoint toUserSpace = aFrame->GetOffsetTo(firstFrame) + GetOffsetToUserSpace(firstFrame); // The initial rect was relative to the reference frame, so we need to // remove that offset to get a rect relative to the current frame. toUserSpace -= aToReferenceFrame; nsRect preEffectsRect = aInvalidRect.ToAppUnits(appUnitsPerDevPixel) + toUserSpace; // Adjust the dirty area for effects, and shift it back to being relative to // the reference frame. nsRect result = filterFrame->GetPostFilterDirtyArea(firstFrame, preEffectsRect) - toUserSpace; // Return the result, in pixels relative to the reference frame. return result.ToOutsidePixels(appUnitsPerDevPixel); }
static nsIntRect TransformRect(const nsIntRect& aRect, const gfx3DMatrix& aTransform) { if (aRect.IsEmpty()) { return nsIntRect(); } gfxRect rect(aRect.x, aRect.y, aRect.width, aRect.height); rect = aTransform.TransformBounds(rect); rect.RoundOut(); nsIntRect intRect; if (!gfxUtils::GfxRectToIntRect(rect, &intRect)) { return nsIntRect(); } return intRect; }
//****************************************************************************** void FrameAnimator::ClearFrame(uint8_t* aFrameData, const nsIntRect& aFrameRect, const nsIntRect& aRectToClear) { if (!aFrameData || aFrameRect.width <= 0 || aFrameRect.height <= 0 || aRectToClear.width <= 0 || aRectToClear.height <= 0) { return; } nsIntRect toClear = aFrameRect.Intersect(aRectToClear); if (toClear.IsEmpty()) { return; } uint32_t bytesPerRow = aFrameRect.width * 4; for (int row = toClear.y; row < toClear.y + toClear.height; ++row) { memset(aFrameData + toClear.x * 4 + row * bytesPerRow, 0, toClear.width * 4); } }
void imgFrame::Draw(gfxContext *aContext, gfxPattern::GraphicsFilter aFilter, const gfxMatrix &aUserSpaceToImageSpace, const gfxRect& aFill, const nsIntMargin &aPadding, const nsIntRect &aSubimage) { NS_ASSERTION(!aFill.IsEmpty(), "zero dest size --- fix caller"); NS_ASSERTION(!aSubimage.IsEmpty(), "zero source size --- fix caller"); NS_ASSERTION(!mPalettedImageData, "Directly drawing a paletted image!"); bool doPadding = aPadding != nsIntMargin(0,0,0,0); bool doPartialDecode = !ImageComplete(); if (mSinglePixel && !doPadding && !doPartialDecode) { DoSingleColorFastPath(aContext, mSinglePixelColor, aFill); return; } gfxMatrix userSpaceToImageSpace = aUserSpaceToImageSpace; gfxRect sourceRect = userSpaceToImageSpace.Transform(aFill); gfxRect imageRect(0, 0, mSize.width + aPadding.LeftRight(), mSize.height + aPadding.TopBottom()); gfxRect subimage(aSubimage.x, aSubimage.y, aSubimage.width, aSubimage.height); gfxRect fill = aFill; NS_ASSERTION(!sourceRect.Intersect(subimage).IsEmpty(), "We must be allowed to sample *some* source pixels!"); bool doTile = !imageRect.Contains(sourceRect); SurfaceWithFormat surfaceResult = SurfaceForDrawing(doPadding, doPartialDecode, doTile, aPadding, userSpaceToImageSpace, fill, subimage, sourceRect, imageRect); if (surfaceResult.IsValid()) { gfxUtils::DrawPixelSnapped(aContext, surfaceResult.mDrawable, userSpaceToImageSpace, subimage, sourceRect, imageRect, fill, surfaceResult.mFormat, aFilter); } }
void SVGFEGaussianBlurElement::GaussianBlur(const Image* aSource, const Image* aTarget, const nsIntRect& aDataRect, uint32_t aDX, uint32_t aDY) { NS_ASSERTION(nsIntRect(0, 0, aTarget->mImage->Width(), aTarget->mImage->Height()).Contains(aDataRect), "aDataRect out of bounds"); nsAutoArrayPtr<uint8_t> tmp(new uint8_t[aTarget->mImage->GetDataSize()]); if (!tmp) return; memset(tmp, 0, aTarget->mImage->GetDataSize()); bool alphaOnly = AreAllColorChannelsZero(aTarget); const uint8_t* sourceData = aSource->mImage->Data(); uint8_t* targetData = aTarget->mImage->Data(); uint32_t stride = aTarget->mImage->Stride(); if (aDX == 0) { CopyDataRect(tmp, sourceData, stride, aDataRect); } else { int32_t longLobe = aDX/2; int32_t shortLobe = (aDX & 1) ? longLobe : longLobe - 1; for (int32_t major = aDataRect.y; major < aDataRect.YMost(); ++major) { int32_t ms = major*stride; BoxBlur(sourceData + ms, tmp + ms, 4, aDataRect.x, aDataRect.XMost(), longLobe, shortLobe, alphaOnly); BoxBlur(tmp + ms, targetData + ms, 4, aDataRect.x, aDataRect.XMost(), shortLobe, longLobe, alphaOnly); BoxBlur(targetData + ms, tmp + ms, 4, aDataRect.x, aDataRect.XMost(), longLobe, longLobe, alphaOnly); } } if (aDY == 0) { CopyDataRect(targetData, tmp, stride, aDataRect); } else { int32_t longLobe = aDY/2; int32_t shortLobe = (aDY & 1) ? longLobe : longLobe - 1; for (int32_t major = aDataRect.x; major < aDataRect.XMost(); ++major) { int32_t ms = major*4; BoxBlur(tmp + ms, targetData + ms, stride, aDataRect.y, aDataRect.YMost(), longLobe, shortLobe, alphaOnly); BoxBlur(targetData + ms, tmp + ms, stride, aDataRect.y, aDataRect.YMost(), shortLobe, longLobe, alphaOnly); BoxBlur(tmp + ms, targetData + ms, stride, aDataRect.y, aDataRect.YMost(), longLobe, longLobe, alphaOnly); } } }
bool BasicContainerLayer::ChildrenPartitionVisibleRegion(const nsIntRect& aInRect) { Matrix transform; if (!GetEffectiveTransform().CanDraw2D(&transform) || ThebesMatrix(transform).HasNonIntegerTranslation()) return false; nsIntPoint offset(int32_t(transform._31), int32_t(transform._32)); nsIntRect rect = aInRect.Intersect(GetEffectiveVisibleRegion().GetBounds() + offset); nsIntRegion covered; for (Layer* l = mFirstChild; l; l = l->GetNextSibling()) { if (ToData(l)->IsHidden()) continue; Matrix childTransform; if (!l->GetEffectiveTransform().CanDraw2D(&childTransform) || ThebesMatrix(childTransform).HasNonIntegerTranslation() || l->GetEffectiveOpacity() != 1.0) return false; nsIntRegion childRegion = l->GetEffectiveVisibleRegion(); childRegion.MoveBy(int32_t(childTransform._31), int32_t(childTransform._32)); childRegion.And(childRegion, rect); if (l->GetClipRect()) { childRegion.And(childRegion, *l->GetClipRect() + offset); } nsIntRegion intersection; intersection.And(covered, childRegion); if (!intersection.IsEmpty()) return false; covered.Or(covered, childRegion); } return covered.Contains(rect); }
void TiledContentHost::RenderLayerBuffer(TiledLayerBufferComposite& aLayerBuffer, const nsIntRegion& aValidRegion, EffectChain& aEffectChain, float aOpacity, const gfx::Point& aOffset, const gfx::Filter& aFilter, const gfx::Rect& aClipRect, const nsIntRegion& aMaskRegion, nsIntRect aVisibleRect, gfx::Matrix4x4 aTransform) { float resolution = aLayerBuffer.GetResolution(); gfxSize layerScale(1, 1); // We assume that the current frame resolution is the one used in our primary // layer buffer. Compensate for a changing frame resolution. if (aLayerBuffer.GetFrameResolution() != mVideoMemoryTiledBuffer.GetFrameResolution()) { const gfxSize& layerResolution = aLayerBuffer.GetFrameResolution(); const gfxSize& localResolution = mVideoMemoryTiledBuffer.GetFrameResolution(); layerScale.width = layerResolution.width / localResolution.width; layerScale.height = layerResolution.height / localResolution.height; aVisibleRect.ScaleRoundOut(layerScale.width, layerScale.height); } aTransform.Scale(1/(resolution * layerScale.width), 1/(resolution * layerScale.height), 1); uint32_t rowCount = 0; uint32_t tileX = 0; for (int32_t x = aVisibleRect.x; x < aVisibleRect.x + aVisibleRect.width;) { rowCount++; int32_t tileStartX = aLayerBuffer.GetTileStart(x); int32_t w = aLayerBuffer.GetScaledTileLength() - tileStartX; if (x + w > aVisibleRect.x + aVisibleRect.width) { w = aVisibleRect.x + aVisibleRect.width - x; } int tileY = 0; for (int32_t y = aVisibleRect.y; y < aVisibleRect.y + aVisibleRect.height;) { int32_t tileStartY = aLayerBuffer.GetTileStart(y); int32_t h = aLayerBuffer.GetScaledTileLength() - tileStartY; if (y + h > aVisibleRect.y + aVisibleRect.height) { h = aVisibleRect.y + aVisibleRect.height - y; } TiledTexture tileTexture = aLayerBuffer. GetTile(nsIntPoint(aLayerBuffer.RoundDownToTileEdge(x), aLayerBuffer.RoundDownToTileEdge(y))); if (tileTexture != aLayerBuffer.GetPlaceholderTile()) { nsIntRegion tileDrawRegion; tileDrawRegion.And(aValidRegion, nsIntRect(x * layerScale.width, y * layerScale.height, w * layerScale.width, h * layerScale.height)); tileDrawRegion.Sub(tileDrawRegion, aMaskRegion); if (!tileDrawRegion.IsEmpty()) { tileDrawRegion.ScaleRoundOut(resolution / layerScale.width, resolution / layerScale.height); nsIntPoint tileOffset((x - tileStartX) * resolution, (y - tileStartY) * resolution); uint32_t tileSize = aLayerBuffer.GetTileLength(); RenderTile(tileTexture, aEffectChain, aOpacity, aTransform, aOffset, aFilter, aClipRect, tileDrawRegion, tileOffset, nsIntSize(tileSize, tileSize)); } } tileY++; y += h; } tileX++; x += w; } }
static bool _get_rectangular_clip (cairo_t *cr, const nsIntRect& bounds, bool *need_clip, nsIntRect *rectangles, int max_rectangles, int *num_rectangles) { cairo_rectangle_list_t *cliplist; cairo_rectangle_t *clips; int i; bool retval = true; cliplist = cairo_copy_clip_rectangle_list (cr); if (cliplist->status != CAIRO_STATUS_SUCCESS) { retval = false; NATIVE_DRAWING_NOTE("FALLBACK: non-rectangular clip"); goto FINISH; } /* the clip is always in surface backend coordinates (i.e. native backend coords) */ clips = cliplist->rectangles; for (i = 0; i < cliplist->num_rectangles; ++i) { nsIntRect rect; if (!_convert_coord_to_int (clips[i].x, &rect.x) || !_convert_coord_to_int (clips[i].y, &rect.y) || !_convert_coord_to_int (clips[i].width, &rect.width) || !_convert_coord_to_int (clips[i].height, &rect.height)) { retval = false; NATIVE_DRAWING_NOTE("FALLBACK: non-integer clip"); goto FINISH; } if (rect.IsEqualInterior(bounds)) { /* the bounds are entirely inside the clip region so we don't need to clip. */ *need_clip = false; goto FINISH; } NS_ASSERTION(bounds.Contains(rect), "Was expecting to be clipped to bounds"); if (i >= max_rectangles) { retval = false; NATIVE_DRAWING_NOTE("FALLBACK: unsupported clip rectangle count"); goto FINISH; } rectangles[i] = rect; } *need_clip = true; *num_rectangles = cliplist->num_rectangles; FINISH: cairo_rectangle_list_destroy (cliplist); return retval; }
void GLBlitTextureImageHelper::BlitTextureImage(TextureImage *aSrc, const nsIntRect& aSrcRect, TextureImage *aDst, const nsIntRect& aDstRect) { GLContext *gl = mCompositor->gl(); NS_ASSERTION(!aSrc->InUpdate(), "Source texture is in update!"); NS_ASSERTION(!aDst->InUpdate(), "Destination texture is in update!"); if (!aSrc || !aDst || aSrcRect.IsEmpty() || aDstRect.IsEmpty()) return; int savedFb = 0; gl->fGetIntegerv(LOCAL_GL_FRAMEBUFFER_BINDING, &savedFb); ScopedGLState scopedScissorTestState(gl, LOCAL_GL_SCISSOR_TEST, false); ScopedGLState scopedBlendState(gl, LOCAL_GL_BLEND, false); // 2.0 means scale up by two float blitScaleX = float(aDstRect.width) / float(aSrcRect.width); float blitScaleY = float(aDstRect.height) / float(aSrcRect.height); // We start iterating over all destination tiles aDst->BeginBigImageIteration(); do { // calculate portion of the tile that is going to be painted to nsIntRect dstSubRect; nsIntRect dstTextureRect = ThebesIntRect(aDst->GetTileRect()); dstSubRect.IntersectRect(aDstRect, dstTextureRect); // this tile is not part of the destination rectangle aDstRect if (dstSubRect.IsEmpty()) continue; // (*) transform the rect of this tile into the rectangle defined by aSrcRect... nsIntRect dstInSrcRect(dstSubRect); dstInSrcRect.MoveBy(-aDstRect.TopLeft()); // ...which might be of different size, hence scale accordingly dstInSrcRect.ScaleRoundOut(1.0f / blitScaleX, 1.0f / blitScaleY); dstInSrcRect.MoveBy(aSrcRect.TopLeft()); SetBlitFramebufferForDestTexture(aDst->GetTextureID()); UseBlitProgram(); aSrc->BeginBigImageIteration(); // now iterate over all tiles in the source Image... do { // calculate portion of the source tile that is in the source rect nsIntRect srcSubRect; nsIntRect srcTextureRect = ThebesIntRect(aSrc->GetTileRect()); srcSubRect.IntersectRect(aSrcRect, srcTextureRect); // this tile is not part of the source rect if (srcSubRect.IsEmpty()) { continue; } // calculate intersection of source rect with destination rect srcSubRect.IntersectRect(srcSubRect, dstInSrcRect); // this tile does not overlap the current destination tile if (srcSubRect.IsEmpty()) { continue; } // We now have the intersection of // the current source tile // and the desired source rectangle // and the destination tile // and the desired destination rectange // in destination space. // We need to transform this back into destination space, inverting the transform from (*) nsIntRect srcSubInDstRect(srcSubRect); srcSubInDstRect.MoveBy(-aSrcRect.TopLeft()); srcSubInDstRect.ScaleRoundOut(blitScaleX, blitScaleY); srcSubInDstRect.MoveBy(aDstRect.TopLeft()); // we transform these rectangles to be relative to the current src and dst tiles, respectively nsIntSize srcSize = srcTextureRect.Size(); nsIntSize dstSize = dstTextureRect.Size(); srcSubRect.MoveBy(-srcTextureRect.x, -srcTextureRect.y); srcSubInDstRect.MoveBy(-dstTextureRect.x, -dstTextureRect.y); float dx0 = 2.0f * float(srcSubInDstRect.x) / float(dstSize.width) - 1.0f; float dy0 = 2.0f * float(srcSubInDstRect.y) / float(dstSize.height) - 1.0f; float dx1 = 2.0f * float(srcSubInDstRect.x + srcSubInDstRect.width) / float(dstSize.width) - 1.0f; float dy1 = 2.0f * float(srcSubInDstRect.y + srcSubInDstRect.height) / float(dstSize.height) - 1.0f; ScopedViewportRect autoViewportRect(gl, 0, 0, dstSize.width, dstSize.height); RectTriangles rects; nsIntSize realTexSize = srcSize; if (!CanUploadNonPowerOfTwo(gl)) { realTexSize = nsIntSize(gfx::NextPowerOfTwo(srcSize.width), gfx::NextPowerOfTwo(srcSize.height)); } if (aSrc->GetWrapMode() == LOCAL_GL_REPEAT) { rects.addRect(/* dest rectangle */ dx0, dy0, dx1, dy1, /* tex coords */ srcSubRect.x / float(realTexSize.width), srcSubRect.y / float(realTexSize.height), srcSubRect.XMost() / float(realTexSize.width), srcSubRect.YMost() / float(realTexSize.height)); } else { DecomposeIntoNoRepeatTriangles(srcSubRect, realTexSize, rects); // now put the coords into the d[xy]0 .. d[xy]1 coordinate space // from the 0..1 that it comes out of decompose InfallibleTArray<RectTriangles::coord>& coords = rects.vertCoords(); for (unsigned int i = 0; i < coords.Length(); ++i) { coords[i].x = (coords[i].x * (dx1 - dx0)) + dx0; coords[i].y = (coords[i].y * (dy1 - dy0)) + dy0; } } ScopedBindTextureUnit autoTexUnit(gl, LOCAL_GL_TEXTURE0); ScopedBindTexture autoTex(gl, aSrc->GetTextureID()); ScopedVertexAttribPointer autoAttrib0(gl, 0, 2, LOCAL_GL_FLOAT, LOCAL_GL_FALSE, 0, 0, rects.vertCoords().Elements()); ScopedVertexAttribPointer autoAttrib1(gl, 1, 2, LOCAL_GL_FLOAT, LOCAL_GL_FALSE, 0, 0, rects.texCoords().Elements()); gl->fDrawArrays(LOCAL_GL_TRIANGLES, 0, rects.elements()); } while (aSrc->NextTile()); } while (aDst->NextTile()); // unbind the previous texture from the framebuffer SetBlitFramebufferForDestTexture(0); gl->fBindFramebuffer(LOCAL_GL_FRAMEBUFFER, savedFb); }
/** * Box blur involves looking at one pixel, and setting its value to the average * of its neighbouring pixels. * @param aInput The input buffer. * @param aOutput The output buffer. * @param aLeftLobe The number of pixels to blend on the left. * @param aRightLobe The number of pixels to blend on the right. * @param aWidth The number of columns in the buffers. * @param aRows The number of rows in the buffers. * @param aSkipRect An area to skip blurring in. * XXX shouldn't we pass stride in separately here? */ static void BoxBlurHorizontal(unsigned char* aInput, unsigned char* aOutput, PRInt32 aLeftLobe, PRInt32 aRightLobe, PRInt32 aWidth, PRInt32 aRows, const nsIntRect& aSkipRect) { NS_ASSERTION(aWidth > 0, "Can't handle zero width here"); PRInt32 boxSize = aLeftLobe + aRightLobe + 1; PRBool skipRectCoversWholeRow = 0 >= aSkipRect.x && aWidth <= aSkipRect.XMost(); for (PRInt32 y = 0; y < aRows; y++) { // Check whether the skip rect intersects this row. If the skip // rect covers the whole surface in this row, we can avoid // this row entirely (and any others along the skip rect). PRBool inSkipRectY = y >= aSkipRect.y && y < aSkipRect.YMost(); if (inSkipRectY && skipRectCoversWholeRow) { y = aSkipRect.YMost() - 1; continue; } PRInt32 alphaSum = 0; for (PRInt32 i = 0; i < boxSize; i++) { PRInt32 pos = i - aLeftLobe; // See assertion above; if aWidth is zero, then we would have no // valid position to clamp to. pos = NS_MAX(pos, 0); pos = NS_MIN(pos, aWidth - 1); alphaSum += aInput[aWidth * y + pos]; } for (PRInt32 x = 0; x < aWidth; x++) { // Check whether we are within the skip rect. If so, go // to the next point outside the skip rect. if (inSkipRectY && x >= aSkipRect.x && x < aSkipRect.XMost()) { x = aSkipRect.XMost(); if (x >= aWidth) break; // Recalculate the neighbouring alpha values for // our new point on the surface. alphaSum = 0; for (PRInt32 i = 0; i < boxSize; i++) { PRInt32 pos = x + i - aLeftLobe; // See assertion above; if aWidth is zero, then we would have no // valid position to clamp to. pos = NS_MAX(pos, 0); pos = NS_MIN(pos, aWidth - 1); alphaSum += aInput[aWidth * y + pos]; } } PRInt32 tmp = x - aLeftLobe; PRInt32 last = NS_MAX(tmp, 0); PRInt32 next = NS_MIN(tmp + boxSize, aWidth - 1); aOutput[aWidth * y + x] = alphaSum/boxSize; alphaSum += aInput[aWidth * y + next] - aInput[aWidth * y + last]; } } }
NS_METHOD PuppetWidget::GetScreenBounds(nsIntRect &aRect) { aRect.MoveTo(LayoutDeviceIntPoint::ToUntyped(WidgetToScreenOffset())); aRect.SizeTo(mBounds.Size()); return NS_OK; }