FloatRect GraphicsContext::computeLineBoundsAndAntialiasingModeForText(const FloatPoint& point, float width, bool printing, Color& color) { FloatPoint origin = point; float thickness = std::max(strokeThickness(), 0.5f); if (printing) return FloatRect(origin, FloatSize(width, thickness)); AffineTransform transform = getCTM(GraphicsContext::DefinitelyIncludeDeviceScale); // Just compute scale in x dimension, assuming x and y scales are equal. float scale = transform.b() ? sqrtf(transform.a() * transform.a() + transform.b() * transform.b()) : transform.a(); if (scale < 1.0) { // This code always draws a line that is at least one-pixel line high, // which tends to visually overwhelm text at small scales. To counter this // effect, an alpha is applied to the underline color when text is at small scales. static const float minimumUnderlineAlpha = 0.4f; float shade = scale > minimumUnderlineAlpha ? scale : minimumUnderlineAlpha; int alpha = color.alpha() * shade; color = Color(color.red(), color.green(), color.blue(), alpha); } FloatPoint devicePoint = transform.mapPoint(point); // Visual overflow might occur here due to integral roundf/ceilf. visualOverflowForDecorations adjusts the overflow value for underline decoration. FloatPoint deviceOrigin = FloatPoint(roundf(devicePoint.x()), ceilf(devicePoint.y())); if (auto inverse = transform.inverse()) origin = inverse.value().mapPoint(deviceOrigin); return FloatRect(origin, FloatSize(width, thickness)); }
FloatRect GraphicsContext::computeLineBoundsAndAntialiasingModeForText(const FloatPoint& point, float width, bool printing, bool& shouldAntialias, Color& color) { FloatPoint origin = point; float thickness = std::max(strokeThickness(), 0.5f); shouldAntialias = true; if (!printing) { AffineTransform transform = getCTM(GraphicsContext::DefinitelyIncludeDeviceScale); if (transform.preservesAxisAlignment()) shouldAntialias = false; // This code always draws a line that is at least one-pixel line high, // which tends to visually overwhelm text at small scales. To counter this // effect, an alpha is applied to the underline color when text is at small scales. // Just compute scale in x dimension, assuming x and y scales are equal. float scale = transform.b() ? sqrtf(transform.a() * transform.a() + transform.b() * transform.b()) : transform.a(); if (scale < 1.0) { static const float minimumUnderlineAlpha = 0.4f; float shade = scale > minimumUnderlineAlpha ? scale : minimumUnderlineAlpha; int alpha = color.alpha() * shade; color = Color(color.red(), color.green(), color.blue(), alpha); } FloatPoint devicePoint = transform.mapPoint(point); FloatPoint deviceOrigin = FloatPoint(roundf(devicePoint.x()), ceilf(devicePoint.y())); if (auto inverse = transform.inverse()) origin = inverse.value().mapPoint(deviceOrigin); } return FloatRect(origin.x(), origin.y(), width, thickness); }
void SVGAnimateMotionElement::applyResultsToTarget() { // We accumulate to the target element transform list so there is not much to do here. SVGElement* targetElement = this->targetElement(); if (!targetElement) return; if (RenderObject* renderer = targetElement->renderer()) RenderSVGResource::markForLayoutAndParentResourceInvalidation(renderer); AffineTransform* t = targetElement->supplementalTransform(); if (!t) return; // ...except in case where we have additional instances in <use> trees. const HashSet<SVGElementInstance*>& instances = targetElement->instancesForElement(); const HashSet<SVGElementInstance*>::const_iterator end = instances.end(); for (HashSet<SVGElementInstance*>::const_iterator it = instances.begin(); it != end; ++it) { SVGElement* shadowTreeElement = (*it)->shadowTreeElement(); ASSERT(shadowTreeElement); AffineTransform* transform = shadowTreeElement->supplementalTransform(); if (!transform) continue; transform->setMatrix(t->a(), t->b(), t->c(), t->d(), t->e(), t->f()); if (RenderObject* renderer = shadowTreeElement->renderer()) { renderer->setNeedsTransformUpdate(); RenderSVGResource::markForLayoutAndParentResourceInvalidation(renderer); } } }
void GraphicsContext::concatCTM(const AffineTransform& affine) { if (paintingDisabled()) return; //platformContext()->canvas()->concat(affine); nvgTransform(platformContext()->canvas(), affine.a(), affine.b(), affine.c(), affine.d(), affine.e(), affine.f()); }
static v8::Handle<v8::Value> aAttrGetter(v8::Local<v8::String> name, const v8::AccessorInfo& info) { INC_STATS("DOM.AffineTransform.a._get"); V8SVGPODTypeWrapper<AffineTransform>* impWrapper = V8SVGPODTypeWrapper<AffineTransform>::toNative(info.Holder()); AffineTransform impInstance = *impWrapper; AffineTransform* imp = &impInstance; return v8::Number::New(imp->a()); }
String SVGTransformList::valueAsString() const { // TODO: We may want to build a real transform string, instead of concatting to a matrix(...). SVGTransform transform = concatenate(); if (transform.type() == SVGTransform::SVG_TRANSFORM_MATRIX) { AffineTransform matrix = transform.matrix(); return String::format("matrix(%f %f %f %f %f %f)", matrix.a(), matrix.b(), matrix.c(), matrix.d(), matrix.e(), matrix.f()); } return String(); }
// static void Shader::affineTo3x3(const AffineTransform& transform, float mat[9]) { mat[0] = transform.a(); mat[1] = transform.b(); mat[2] = 0.0f; mat[3] = transform.c(); mat[4] = transform.d(); mat[5] = 0.0f; mat[6] = transform.e(); mat[7] = transform.f(); mat[8] = 1.0f; }
static inline void normalizeTransform(AffineTransform& transform) { // Obtain consistent numerical results for the AffineTransform on both 32/64bit platforms. // Tested with SnowLeopard on Core Duo vs. Core 2 Duo. static const float s_floatEpsilon = std::numeric_limits<float>::epsilon(); if (fabs(transform.a() - 1) <= s_floatEpsilon) transform.setA(1); else if (fabs(transform.a() + 1) <= s_floatEpsilon) transform.setA(-1); if (fabs(transform.d() - 1) <= s_floatEpsilon) transform.setD(1); else if (fabs(transform.d() + 1) <= s_floatEpsilon) transform.setD(-1); if (fabs(transform.e()) <= s_floatEpsilon) transform.setE(0); if (fabs(transform.f()) <= s_floatEpsilon) transform.setF(0); }
void Image::drawPattern(GraphicsContext* gc, const FloatRect& srcRect, const AffineTransform& patternTransform, const FloatPoint& phase, ColorSpace, CompositeOperator compositeOp, const FloatRect& destRect) { SkBitmapRef* image = this->nativeImageForCurrentFrame(); if (!image || destRect.isEmpty()) return; // in case we get called with an incomplete bitmap const SkBitmap& origBitmap = image->bitmap(); if (origBitmap.getPixels() == NULL && origBitmap.pixelRef() == NULL) return; SkIRect srcR; // we may have to scale if the image has been subsampled (so save RAM) bool imageIsSubSampled = image->origWidth() != origBitmap.width() || image->origHeight() != origBitmap.height(); float scaleX = 1; float scaleY = 1; if (imageIsSubSampled) { scaleX = (float)image->origWidth() / origBitmap.width(); scaleY = (float)image->origHeight() / origBitmap.height(); round_scaled(&srcR, srcRect, 1 / scaleX, 1 / scaleY); } else round(&srcR, srcRect); // now extract the proper subset of the src image SkBitmap bitmap; if (!origBitmap.extractSubset(&bitmap, srcR)) { SkDebugf("--- Image::drawPattern calling extractSubset failed\n"); return; } SkMatrix matrix(patternTransform); if (imageIsSubSampled) { matrix.preScale(SkFloatToScalar(scaleX), SkFloatToScalar(scaleY)); } // We also need to translate it such that the origin of the pattern is the // origin of the destination rect, which is what WebKit expects. Skia uses // the coordinate system origin as the base for the patter. If WebKit wants // a shifted image, it will shift it from there using the patternTransform. float tx = phase.x() + srcRect.x() * patternTransform.a(); float ty = phase.y() + srcRect.y() * patternTransform.d(); matrix.postTranslate(SkFloatToScalar(tx), SkFloatToScalar(ty)); gc->platformContext()->drawBitmapPattern(bitmap, matrix, compositeOp, destRect); }
static bool rotationOfCharacterCallback(QueryData* queryData, const SVGTextFragment& fragment) { RotationOfCharacterData* data = static_cast<RotationOfCharacterData*>(queryData); int startPosition = data->position; int endPosition = startPosition + 1; if (!mapStartEndPositionsIntoFragmentCoordinates(queryData, fragment, startPosition, endPosition)) return false; if (!fragment.isTransformed()) { data->rotation = 0; } else { AffineTransform fragmentTransform = fragment.buildFragmentTransform(SVGTextFragment::TransformIgnoringTextLength); fragmentTransform.scale(1 / fragmentTransform.xScale(), 1 / fragmentTransform.yScale()); data->rotation = narrowPrecisionToFloat(rad2deg(atan2(fragmentTransform.b(), fragmentTransform.a()))); } return true; }
SkMatrix affineTransformToSkMatrix(const AffineTransform& source) { SkMatrix result; result.setScaleX(WebCoreDoubleToSkScalar(source.a())); result.setSkewX(WebCoreDoubleToSkScalar(source.c())); result.setTranslateX(WebCoreDoubleToSkScalar(source.e())); result.setScaleY(WebCoreDoubleToSkScalar(source.d())); result.setSkewY(WebCoreDoubleToSkScalar(source.b())); result.setTranslateY(WebCoreDoubleToSkScalar(source.f())); // FIXME: Set perspective properly. result.setPerspX(0); result.setPerspY(0); result.set(SkMatrix::kMPersp2, SK_Scalar1); return result; }
bool SVGTextQuery::rotationOfCharacterCallback(Data* queryData, const SVGTextFragment& fragment) const { RotationOfCharacterData* data = static_cast<RotationOfCharacterData*>(queryData); unsigned startPosition = data->position; unsigned endPosition = startPosition + 1; if (!mapStartEndPositionsIntoFragmentCoordinates(queryData, fragment, startPosition, endPosition)) return false; AffineTransform fragmentTransform; fragment.buildFragmentTransform(fragmentTransform, SVGTextFragment::TransformIgnoringTextLength); if (fragmentTransform.isIdentity()) data->rotation = 0; else { fragmentTransform.scale(1 / fragmentTransform.xScale(), 1 / fragmentTransform.yScale()); data->rotation = narrowPrecisionToFloat(rad2deg(atan2(fragmentTransform.b(), fragmentTransform.a()))); } return true; }
void drawPatternToCairoContext(cairo_t* cr, cairo_surface_t* image, const IntSize& imageSize, const FloatRect& tileRect, const AffineTransform& patternTransform, const FloatPoint& phase, cairo_operator_t op, const FloatRect& destRect) { // Avoid NaN if (!isfinite(phase.x()) || !isfinite(phase.y())) return; cairo_save(cr); RefPtr<cairo_surface_t> clippedImageSurface = 0; if (tileRect.size() != imageSize) { IntRect imageRect = enclosingIntRect(tileRect); clippedImageSurface = adoptRef(cairo_image_surface_create(CAIRO_FORMAT_ARGB32, imageRect.width(), imageRect.height())); RefPtr<cairo_t> clippedImageContext = adoptRef(cairo_create(clippedImageSurface.get())); cairo_set_source_surface(clippedImageContext.get(), image, -tileRect.x(), -tileRect.y()); cairo_paint(clippedImageContext.get()); image = clippedImageSurface.get(); } cairo_pattern_t* pattern = cairo_pattern_create_for_surface(image); cairo_pattern_set_extend(pattern, CAIRO_EXTEND_REPEAT); cairo_matrix_t patternMatrix = cairo_matrix_t(patternTransform); cairo_matrix_t phaseMatrix = {1, 0, 0, 1, phase.x() + tileRect.x() * patternTransform.a(), phase.y() + tileRect.y() * patternTransform.d()}; cairo_matrix_t combined; cairo_matrix_multiply(&combined, &patternMatrix, &phaseMatrix); cairo_matrix_invert(&combined); cairo_pattern_set_matrix(pattern, &combined); cairo_set_operator(cr, op); cairo_set_source(cr, pattern); cairo_pattern_destroy(pattern); cairo_rectangle(cr, destRect.x(), destRect.y(), destRect.width(), destRect.height()); cairo_fill(cr); cairo_restore(cr); }
void Image::drawPattern(GraphicsContext* ctxt, const FloatRect& tileRect, const AffineTransform& patternTransform, const FloatPoint& phase, ColorSpace styleColorSpace, CompositeOperator op, const FloatRect& destRect, BlendMode blendMode) { if (!nativeImageForCurrentFrame()) return; if (!patternTransform.isInvertible()) return; CGContextRef context = ctxt->platformContext(); GraphicsContextStateSaver stateSaver(*ctxt); CGContextClipToRect(context, destRect); ctxt->setCompositeOperation(op, blendMode); CGContextTranslateCTM(context, destRect.x(), destRect.y() + destRect.height()); CGContextScaleCTM(context, 1, -1); // Compute the scaled tile size. float scaledTileHeight = tileRect.height() * narrowPrecisionToFloat(patternTransform.d()); // We have to adjust the phase to deal with the fact we're in Cartesian space now (with the bottom left corner of destRect being // the origin). float adjustedX = phase.x() - destRect.x() + tileRect.x() * narrowPrecisionToFloat(patternTransform.a()); // We translated the context so that destRect.x() is the origin, so subtract it out. float adjustedY = destRect.height() - (phase.y() - destRect.y() + tileRect.y() * narrowPrecisionToFloat(patternTransform.d()) + scaledTileHeight); CGImageRef tileImage = nativeImageForCurrentFrame(); float h = CGImageGetHeight(tileImage); RetainPtr<CGImageRef> subImage; if (tileRect.size() == size()) subImage = tileImage; else { // Copying a sub-image out of a partially-decoded image stops the decoding of the original image. It should never happen // because sub-images are only used for border-image, which only renders when the image is fully decoded. ASSERT(h == height()); subImage = adoptCF(CGImageCreateWithImageInRect(tileImage, tileRect)); } // Adjust the color space. subImage = Image::imageWithColorSpace(subImage.get(), styleColorSpace); // Leopard has an optimized call for the tiling of image patterns, but we can only use it if the image has been decoded enough that // its buffer is the same size as the overall image. Because a partially decoded CGImageRef with a smaller width or height than the // overall image buffer needs to tile with "gaps", we can't use the optimized tiling call in that case. // FIXME: We cannot use CGContextDrawTiledImage with scaled tiles on Leopard, because it suffers from rounding errors. Snow Leopard is ok. float scaledTileWidth = tileRect.width() * narrowPrecisionToFloat(patternTransform.a()); float w = CGImageGetWidth(tileImage); if (w == size().width() && h == size().height() && !spaceSize().width() && !spaceSize().height()) CGContextDrawTiledImage(context, FloatRect(adjustedX, adjustedY, scaledTileWidth, scaledTileHeight), subImage.get()); else { // On Leopard and newer, this code now only runs for partially decoded images whose buffers do not yet match the overall size of the image. static const CGPatternCallbacks patternCallbacks = { 0, drawPatternCallback, patternReleaseCallback }; CGAffineTransform matrix = CGAffineTransformMake(narrowPrecisionToCGFloat(patternTransform.a()), 0, 0, narrowPrecisionToCGFloat(patternTransform.d()), adjustedX, adjustedY); matrix = CGAffineTransformConcat(matrix, CGContextGetCTM(context)); // The top of a partially-decoded image is drawn at the bottom of the tile. Map it to the top. matrix = CGAffineTransformTranslate(matrix, 0, size().height() - h); #if PLATFORM(IOS) matrix = CGAffineTransformScale(matrix, 1, -1); matrix = CGAffineTransformTranslate(matrix, 0, -h); #endif CGImageRef platformImage = CGImageRetain(subImage.get()); RetainPtr<CGPatternRef> pattern = adoptCF(CGPatternCreate(platformImage, CGRectMake(0, 0, tileRect.width(), tileRect.height()), matrix, tileRect.width() + spaceSize().width() * (1 / narrowPrecisionToFloat(patternTransform.a())), tileRect.height() + spaceSize().height() * (1 / narrowPrecisionToFloat(patternTransform.d())), kCGPatternTilingConstantSpacing, true, &patternCallbacks)); if (!pattern) return; RetainPtr<CGColorSpaceRef> patternSpace = adoptCF(CGColorSpaceCreatePattern(0)); CGFloat alpha = 1; RetainPtr<CGColorRef> color = adoptCF(CGColorCreateWithPattern(patternSpace.get(), pattern.get(), &alpha)); CGContextSetFillColorSpace(context, patternSpace.get()); // FIXME: Really want a public API for this. It is just CGContextSetBaseCTM(context, CGAffineTransformIdentiy). wkSetBaseCTM(context, CGAffineTransformIdentity); CGContextSetPatternPhase(context, CGSizeZero); CGContextSetFillColorWithColor(context, color.get()); CGContextFillRect(context, CGContextGetClipBoundingBox(context)); } stateSaver.restore(); if (imageObserver()) imageObserver()->didDraw(this); }
void Image::drawPattern(GraphicsContext* ctxt, const FloatRect& srcRect, const AffineTransform& patternTransform, const FloatPoint& phase, ColorSpace, CompositeOperator, const FloatRect& dstRect) { #if USE(WXGC) wxGCDC* context = (wxGCDC*)ctxt->platformContext(); wxGraphicsBitmap* bitmap = nativeImageForCurrentFrame(); #else wxDC* context = ctxt->platformContext(); wxBitmap* bitmap = nativeImageForCurrentFrame(); #endif if (!bitmap) // If it's too early we won't have an image yet. return; ctxt->save(); ctxt->clip(IntRect(dstRect.x(), dstRect.y(), dstRect.width(), dstRect.height())); float currentW = 0; float currentH = 0; #if USE(WXGC) wxGraphicsContext* gc = context->GetGraphicsContext(); float adjustedX = phase.x() + srcRect.x() * narrowPrecisionToFloat(patternTransform.a()); float adjustedY = phase.y() + srcRect.y() * narrowPrecisionToFloat(patternTransform.d()); gc->ConcatTransform(patternTransform); #else float adjustedX = phase.x(); float adjustedY = phase.y(); wxMemoryDC mydc; mydc.SelectObject(*bitmap); ctxt->concatCTM(patternTransform); #endif //wxPoint origin(context->GetDeviceOrigin()); AffineTransform mat(ctxt->getCTM()); wxPoint origin(mat.mapPoint(IntPoint(0, 0))); wxSize clientSize(context->GetSize()); wxCoord w = srcRect.width(); wxCoord h = srcRect.height(); wxCoord srcx = srcRect.x(); wxCoord srcy = srcRect.y(); while (currentW < dstRect.right() - phase.x() && origin.x + adjustedX + currentW < clientSize.x) { while (currentH < dstRect.bottom() - phase.y() && origin.y + adjustedY + currentH < clientSize.y) { #if USE(WXGC) #if wxCHECK_VERSION(2,9,0) gc->DrawBitmap(*bitmap, adjustedX + currentW, adjustedY + currentH, (wxDouble)srcRect.width(), (wxDouble)srcRect.height()); #else gc->DrawGraphicsBitmap(*bitmap, adjustedX + currentW, adjustedY + currentH, (wxDouble)srcRect.width(), (wxDouble)srcRect.height()); #endif #else context->Blit(adjustedX + currentW, adjustedY + currentH, w, h, &mydc, srcx, srcy, wxCOPY, true); #endif currentH += srcRect.height(); } currentW += srcRect.width(); currentH = 0; } ctxt->restore(); #if !USE(WXGC) mydc.SelectObject(wxNullBitmap); #endif // NB: delete is causing crashes during page load, but not during the deletion // itself. It occurs later on when a valid bitmap created in frameAtIndex // suddenly becomes invalid after returning. It's possible these errors deal // with reentrancy and threding problems. //delete bitmap; startAnimation(); if (ImageObserver* observer = imageObserver()) observer->didDraw(this); }
void Image::drawPattern(GraphicsContext* context, const FloatRect& floatSrcRect, const AffineTransform& patternTransform, const FloatPoint& phase, ColorSpace styleColorSpace, CompositeOperator compositeOp, const FloatRect& destRect) { FloatRect normSrcRect = normalizeRect(floatSrcRect); if (destRect.isEmpty() || normSrcRect.isEmpty()) return; // nothing to draw NativeImageSkia* bitmap = nativeImageForCurrentFrame(); if (!bitmap) return; SkIRect srcRect = enclosingIntRect(normSrcRect); // Figure out what size the bitmap will be in the destination. The // destination rect is the bounds of the pattern, we need to use the // matrix to see how big it will be. float destBitmapWidth, destBitmapHeight; TransformDimensions(patternTransform, srcRect.width(), srcRect.height(), &destBitmapWidth, &destBitmapHeight); // Compute the resampling mode. ResamplingMode resampling; if (context->platformContext()->isAccelerated() || context->platformContext()->printing()) resampling = RESAMPLE_LINEAR; else resampling = computeResamplingMode(context->platformContext(), *bitmap, srcRect.width(), srcRect.height(), destBitmapWidth, destBitmapHeight); // Load the transform WebKit requested. SkMatrix matrix(patternTransform); SkShader* shader; if (resampling == RESAMPLE_AWESOME) { // Do nice resampling. int width = static_cast<int>(destBitmapWidth); int height = static_cast<int>(destBitmapHeight); SkBitmap resampled = bitmap->resizedBitmap(srcRect, width, height); shader = SkShader::CreateBitmapShader(resampled, SkShader::kRepeat_TileMode, SkShader::kRepeat_TileMode); // Since we just resized the bitmap, we need to undo the scale set in // the image transform. matrix.setScaleX(SkIntToScalar(1)); matrix.setScaleY(SkIntToScalar(1)); } else { // No need to do nice resampling. SkBitmap srcSubset; bitmap->bitmap().extractSubset(&srcSubset, srcRect); shader = SkShader::CreateBitmapShader(srcSubset, SkShader::kRepeat_TileMode, SkShader::kRepeat_TileMode); } // We also need to translate it such that the origin of the pattern is the // origin of the destination rect, which is what WebKit expects. Skia uses // the coordinate system origin as the base for the patter. If WebKit wants // a shifted image, it will shift it from there using the patternTransform. float adjustedX = phase.x() + normSrcRect.x() * narrowPrecisionToFloat(patternTransform.a()); float adjustedY = phase.y() + normSrcRect.y() * narrowPrecisionToFloat(patternTransform.d()); matrix.postTranslate(SkFloatToScalar(adjustedX), SkFloatToScalar(adjustedY)); shader->setLocalMatrix(matrix); SkPaint paint; paint.setShader(shader)->unref(); paint.setXfermodeMode(WebCoreCompositeToSkiaComposite(compositeOp)); paint.setFilterBitmap(resampling == RESAMPLE_LINEAR); context->platformContext()->paintSkPaint(destRect, paint); }
void Image::drawPattern(GraphicsContext* ctxt, const FloatRect& tileRect, const AffineTransform& patternTransform, const FloatPoint& phase, ColorSpace styleColorSpace, CompositeOperator op, const FloatRect& destRect) { if (!nativeImageForCurrentFrame()) return; ASSERT(patternTransform.isInvertible()); if (!patternTransform.isInvertible()) // Avoid a hang under CGContextDrawTiledImage on release builds. return; CGContextRef context = ctxt->platformContext(); ctxt->save(); CGContextClipToRect(context, destRect); ctxt->setCompositeOperation(op); CGContextTranslateCTM(context, destRect.x(), destRect.y() + destRect.height()); CGContextScaleCTM(context, 1, -1); // Compute the scaled tile size. float scaledTileHeight = tileRect.height() * narrowPrecisionToFloat(patternTransform.d()); // We have to adjust the phase to deal with the fact we're in Cartesian space now (with the bottom left corner of destRect being // the origin). float adjustedX = phase.x() - destRect.x() + tileRect.x() * narrowPrecisionToFloat(patternTransform.a()); // We translated the context so that destRect.x() is the origin, so subtract it out. float adjustedY = destRect.height() - (phase.y() - destRect.y() + tileRect.y() * narrowPrecisionToFloat(patternTransform.d()) + scaledTileHeight); CGImageRef tileImage = nativeImageForCurrentFrame(); float h = CGImageGetHeight(tileImage); RetainPtr<CGImageRef> subImage; if (tileRect.size() == size()) subImage = tileImage; else { // Copying a sub-image out of a partially-decoded image stops the decoding of the original image. It should never happen // because sub-images are only used for border-image, which only renders when the image is fully decoded. ASSERT(h == height()); subImage.adoptCF(CGImageCreateWithImageInRect(tileImage, tileRect)); } // Adjust the color space. subImage = imageWithColorSpace(subImage.get(), styleColorSpace); #ifndef BUILDING_ON_TIGER // Leopard has an optimized call for the tiling of image patterns, but we can only use it if the image has been decoded enough that // its buffer is the same size as the overall image. Because a partially decoded CGImageRef with a smaller width or height than the // overall image buffer needs to tile with "gaps", we can't use the optimized tiling call in that case. // FIXME: Could create WebKitSystemInterface SPI for CGCreatePatternWithImage2 and probably make Tiger tile faster as well. // FIXME: We cannot use CGContextDrawTiledImage with scaled tiles on Leopard, because it suffers from rounding errors. Snow Leopard is ok. float scaledTileWidth = tileRect.width() * narrowPrecisionToFloat(patternTransform.a()); float w = CGImageGetWidth(tileImage); #ifdef BUILDING_ON_LEOPARD if (w == size().width() && h == size().height() && scaledTileWidth == tileRect.width() && scaledTileHeight == tileRect.height()) #else if (w == size().width() && h == size().height()) #endif CGContextDrawTiledImage(context, FloatRect(adjustedX, adjustedY, scaledTileWidth, scaledTileHeight), subImage.get()); else { #endif // On Leopard, this code now only runs for partially decoded images whose buffers do not yet match the overall size of the image. // On Tiger this code runs all the time. This code is suboptimal because the pattern does not reference the image directly, and the // pattern is destroyed before exiting the function. This means any decoding the pattern does doesn't end up cached anywhere, so we // redecode every time we paint. static const CGPatternCallbacks patternCallbacks = { 0, drawPatternCallback, NULL }; CGAffineTransform matrix = CGAffineTransformMake(narrowPrecisionToCGFloat(patternTransform.a()), 0, 0, narrowPrecisionToCGFloat(patternTransform.d()), adjustedX, adjustedY); matrix = CGAffineTransformConcat(matrix, CGContextGetCTM(context)); // The top of a partially-decoded image is drawn at the bottom of the tile. Map it to the top. matrix = CGAffineTransformTranslate(matrix, 0, size().height() - h); RetainPtr<CGPatternRef> pattern(AdoptCF, CGPatternCreate(subImage.get(), CGRectMake(0, 0, tileRect.width(), tileRect.height()), matrix, tileRect.width(), tileRect.height(), kCGPatternTilingConstantSpacing, true, &patternCallbacks)); if (!pattern) { ctxt->restore(); return; } RetainPtr<CGColorSpaceRef> patternSpace(AdoptCF, CGColorSpaceCreatePattern(0)); CGFloat alpha = 1; RetainPtr<CGColorRef> color(AdoptCF, CGColorCreateWithPattern(patternSpace.get(), pattern.get(), &alpha)); CGContextSetFillColorSpace(context, patternSpace.get()); // FIXME: Really want a public API for this. It is just CGContextSetBaseCTM(context, CGAffineTransformIdentiy). wkSetPatternBaseCTM(context, CGAffineTransformIdentity); CGContextSetPatternPhase(context, CGSizeZero); CGContextSetFillColorWithColor(context, color.get()); CGContextFillRect(context, CGContextGetClipBoundingBox(context)); #ifndef BUILDING_ON_TIGER } #endif ctxt->restore(); if (imageObserver()) imageObserver()->didDraw(this); }
void Image::drawPattern(GraphicsContext* context, const FloatRect& floatSrcRect, const AffineTransform& patternTransform, const FloatPoint& phase, ColorSpace styleColorSpace, CompositeOperator compositeOp, const FloatRect& destRect) { #if PLATFORM(CHROMIUM) TRACE_EVENT0("skia", "Image::drawPattern"); #endif FloatRect normSrcRect = normalizeRect(floatSrcRect); if (destRect.isEmpty() || normSrcRect.isEmpty()) return; // nothing to draw NativeImageSkia* bitmap = nativeImageForCurrentFrame(); if (!bitmap) return; SkMatrix ctm = context->platformContext()->canvas()->getTotalMatrix(); SkMatrix totalMatrix; totalMatrix.setConcat(ctm, patternTransform); // Figure out what size the bitmap will be in the destination. The // destination rect is the bounds of the pattern, we need to use the // matrix to see how big it will be. SkRect destRectTarget; totalMatrix.mapRect(&destRectTarget, normSrcRect); float destBitmapWidth = SkScalarToFloat(destRectTarget.width()); float destBitmapHeight = SkScalarToFloat(destRectTarget.height()); // Compute the resampling mode. ResamplingMode resampling; if (context->platformContext()->isAccelerated() || context->platformContext()->printing()) resampling = RESAMPLE_LINEAR; else resampling = computeResamplingMode(totalMatrix, *bitmap, normSrcRect.width(), normSrcRect.height(), destBitmapWidth, destBitmapHeight); resampling = limitResamplingMode(context->platformContext(), resampling); // Load the transform WebKit requested. SkMatrix matrix(patternTransform); SkShader* shader; if (resampling == RESAMPLE_AWESOME) { // Do nice resampling. float scaleX = destBitmapWidth / normSrcRect.width(); float scaleY = destBitmapHeight / normSrcRect.height(); SkRect scaledSrcRect; SkIRect enclosingScaledSrcRect; // The image fragment generated here is not exactly what is // requested. The scale factor used is approximated and image // fragment is slightly larger to align to integer // boundaries. SkBitmap resampled = extractScaledImageFragment(*bitmap, normSrcRect, scaleX, scaleY, &scaledSrcRect, &enclosingScaledSrcRect); shader = SkShader::CreateBitmapShader(resampled, SkShader::kRepeat_TileMode, SkShader::kRepeat_TileMode); // Since we just resized the bitmap, we need to remove the scale // applied to the pixels in the bitmap shader. This means we need // CTM * patternTransform to have identity scale. Since we // can't modify CTM (or the rectangle will be drawn in the wrong // place), we must set patternTransform's scale to the inverse of // CTM scale. matrix.setScaleX(ctm.getScaleX() ? 1 / ctm.getScaleX() : 1); matrix.setScaleY(ctm.getScaleY() ? 1 / ctm.getScaleY() : 1); } else { // No need to do nice resampling. SkBitmap srcSubset; bitmap->bitmap().extractSubset(&srcSubset, enclosingIntRect(normSrcRect)); shader = SkShader::CreateBitmapShader(srcSubset, SkShader::kRepeat_TileMode, SkShader::kRepeat_TileMode); } // We also need to translate it such that the origin of the pattern is the // origin of the destination rect, which is what WebKit expects. Skia uses // the coordinate system origin as the base for the patter. If WebKit wants // a shifted image, it will shift it from there using the patternTransform. float adjustedX = phase.x() + normSrcRect.x() * narrowPrecisionToFloat(patternTransform.a()); float adjustedY = phase.y() + normSrcRect.y() * narrowPrecisionToFloat(patternTransform.d()); matrix.postTranslate(SkFloatToScalar(adjustedX), SkFloatToScalar(adjustedY)); shader->setLocalMatrix(matrix); SkPaint paint; paint.setShader(shader)->unref(); paint.setXfermodeMode(WebCoreCompositeToSkiaComposite(compositeOp)); paint.setFilterBitmap(resampling == RESAMPLE_LINEAR); context->platformContext()->paintSkPaint(destRect, paint); }