Ejemplo n.º 1
0
static FloatPoint reflectedPoint(const FloatPoint& reflectIn, const FloatPoint& pointToReflect)
{
    return FloatPoint(2 * reflectIn.x() - pointToReflect.x(), 2 * reflectIn.y() - pointToReflect.y());
}
Ejemplo n.º 2
0
void SVGPathSegListBuilder::arcTo(float r1, float r2, float angle, bool largeArcFlag, bool sweepFlag, const FloatPoint& targetPoint, PathCoordinateMode mode)
{
    ASSERT(m_pathElement);
    ASSERT(m_pathSegList);
    if (mode == AbsoluteCoordinates)
        m_pathSegList->appendWithoutByteStreamSync(SVGPathSegArcAbs::create(m_pathElement, targetPoint.x(), targetPoint.y(), r1, r2, angle, largeArcFlag, sweepFlag));
    else
        m_pathSegList->appendWithoutByteStreamSync(SVGPathSegArcRel::create(m_pathElement, targetPoint.x(), targetPoint.y(), r1, r2, angle, largeArcFlag, sweepFlag));
}
Ejemplo n.º 3
0
bool PolygonShape::firstIncludedIntervalLogicalTop(LayoutUnit minLogicalIntervalTop, const LayoutSize& minLogicalIntervalSize, LayoutUnit& result) const
{
    float minIntervalTop = minLogicalIntervalTop;
    float minIntervalHeight = minLogicalIntervalSize.height();
    float minIntervalWidth = minLogicalIntervalSize.width();

    const FloatPolygon& polygon = shapePaddingBounds();
    const FloatRect boundingBox = polygon.boundingBox();
    if (minIntervalWidth > boundingBox.width())
        return false;

    float minY = std::max(boundingBox.y(), minIntervalTop);
    float maxY = minY + minIntervalHeight;

    if (maxY > boundingBox.maxY())
        return false;

    Vector<const FloatPolygonEdge*> edges;
    polygon.overlappingEdges(minIntervalTop, boundingBox.maxY(), edges);

    float dx = minIntervalWidth / 2;
    float dy = minIntervalHeight / 2;
    Vector<OffsetPolygonEdge> offsetEdges;

    for (unsigned i = 0; i < edges.size(); ++i) {
        const FloatPolygonEdge& edge = *(edges[i]);
        const FloatPoint& vertex0 = edge.previousEdge().vertex1();
        const FloatPoint& vertex1 = edge.vertex1();
        const FloatPoint& vertex2 = edge.vertex2();
        Vector<OffsetPolygonEdge> offsetEdgeBuffer;

        if (vertex2.y() > vertex1.y() ? vertex2.x() >= vertex1.x() : vertex1.x() >= vertex2.x()) {
            offsetEdgeBuffer.append(OffsetPolygonEdge(edge, FloatSize(dx, -dy)));
            offsetEdgeBuffer.append(OffsetPolygonEdge(edge, FloatSize(-dx, dy)));
        } else {
            offsetEdgeBuffer.append(OffsetPolygonEdge(edge, FloatSize(dx, dy)));
            offsetEdgeBuffer.append(OffsetPolygonEdge(edge, FloatSize(-dx, -dy)));
        }

        if (isReflexVertex(vertex0, vertex1, vertex2)) {
            if (vertex2.x() <= vertex1.x() && vertex0.x() <= vertex1.x())
                offsetEdgeBuffer.append(OffsetPolygonEdge(vertex1, FloatSize(dx, -dy), FloatSize(dx, dy)));
            else if (vertex2.x() >= vertex1.x() && vertex0.x() >= vertex1.x())
                offsetEdgeBuffer.append(OffsetPolygonEdge(vertex1, FloatSize(-dx, -dy), FloatSize(-dx, dy)));
            if (vertex2.y() <= vertex1.y() && vertex0.y() <= vertex1.y())
                offsetEdgeBuffer.append(OffsetPolygonEdge(vertex1, FloatSize(-dx, dy), FloatSize(dx, dy)));
            else if (vertex2.y() >= vertex1.y() && vertex0.y() >= vertex1.y())
                offsetEdgeBuffer.append(OffsetPolygonEdge(vertex1, FloatSize(-dx, -dy), FloatSize(dx, -dy)));
        }

        for (unsigned j = 0; j < offsetEdgeBuffer.size(); ++j)
            if (offsetEdgeBuffer[j].maxY() >= minY)
                offsetEdges.append(offsetEdgeBuffer[j]);
    }

    offsetEdges.append(OffsetPolygonEdge(polygon, minIntervalTop, FloatSize(0, dy)));

    FloatPoint offsetEdgesIntersection;
    FloatRect firstFitRect;
    bool firstFitFound = false;

    for (unsigned i = 0; i < offsetEdges.size() - 1; ++i) {
        for (unsigned j = i + 1; j < offsetEdges.size(); ++j) {
            if (offsetEdges[i].intersection(offsetEdges[j], offsetEdgesIntersection)) {
                FloatPoint potentialFirstFitLocation(offsetEdgesIntersection.x() - dx, offsetEdgesIntersection.y() - dy);
                FloatRect potentialFirstFitRect(potentialFirstFitLocation, minLogicalIntervalSize);
                if ((offsetEdges[i].basis() == OffsetPolygonEdge::LineTop
                    || offsetEdges[j].basis() == OffsetPolygonEdge::LineTop
                    || potentialFirstFitLocation.y() >= minIntervalTop)
                    && (!firstFitFound || aboveOrToTheLeft(potentialFirstFitRect, firstFitRect))
                    && polygon.contains(offsetEdgesIntersection)
                    && firstFitRectInPolygon(polygon, potentialFirstFitRect, offsetEdges[i].edgeIndex(), offsetEdges[j].edgeIndex())) {
                    firstFitFound = true;
                    firstFitRect = potentialFirstFitRect;
                }
            }
        }
    }

    if (firstFitFound)
        result = ceiledLayoutUnit(firstFitRect.y());
    return firstFitFound;
}
Ejemplo n.º 4
0
LayoutState::LayoutState(std::unique_ptr<LayoutState> next, RenderBox* renderer, const LayoutSize& offset, LayoutUnit pageLogicalHeight, bool pageLogicalHeightChanged)
    : m_lineGrid(0)
    , m_next(std::move(next))
#ifndef NDEBUG
    , m_renderer(renderer)
#endif
{
    ASSERT(m_next);

    bool fixed = renderer->isOutOfFlowPositioned() && renderer->style().position() == FixedPosition;
    if (fixed) {
        // FIXME: This doesn't work correctly with transforms.
        FloatPoint fixedOffset = renderer->view().localToAbsolute(FloatPoint(), IsFixed);
        m_paintOffset = LayoutSize(fixedOffset.x(), fixedOffset.y()) + offset;
    } else
        m_paintOffset = m_next->m_paintOffset + offset;

    if (renderer->isOutOfFlowPositioned() && !fixed) {
        if (RenderElement* container = renderer->container()) {
            if (container->isInFlowPositioned() && container->isRenderInline())
                m_paintOffset += toRenderInline(container)->offsetForInFlowPositionedInline(renderer);
        }
    }

    m_layoutOffset = m_paintOffset;

    if (renderer->isInFlowPositioned() && renderer->hasLayer())
        m_paintOffset += renderer->layer()->offsetForInFlowPosition();

    m_clipped = !fixed && m_next->m_clipped;
    if (m_clipped)
        m_clipRect = m_next->m_clipRect;

    if (renderer->hasOverflowClip()) {
        LayoutRect clipRect(toLayoutPoint(m_paintOffset) + renderer->view().layoutDelta(), renderer->cachedSizeForOverflowClip());
        if (m_clipped)
            m_clipRect.intersect(clipRect);
        else {
            m_clipRect = clipRect;
            m_clipped = true;
        }

        m_paintOffset -= renderer->scrolledContentOffset();
    }

    // If we establish a new page height, then cache the offset to the top of the first page.
    // We can compare this later on to figure out what part of the page we're actually on,
    if (pageLogicalHeight || renderer->isRenderFlowThread()) {
        m_pageLogicalHeight = pageLogicalHeight;
        bool isFlipped = renderer->style().isFlippedBlocksWritingMode();
        m_pageOffset = LayoutSize(m_layoutOffset.width() + (!isFlipped ? renderer->borderLeft() + renderer->paddingLeft() : renderer->borderRight() + renderer->paddingRight()),
                               m_layoutOffset.height() + (!isFlipped ? renderer->borderTop() + renderer->paddingTop() : renderer->borderBottom() + renderer->paddingBottom()));
        m_pageLogicalHeightChanged = pageLogicalHeightChanged;
        m_isPaginated = true;
    } else {
        // If we don't establish a new page height, then propagate the old page height and offset down.
        m_pageLogicalHeight = m_next->m_pageLogicalHeight;
        m_pageLogicalHeightChanged = m_next->m_pageLogicalHeightChanged;
        m_pageOffset = m_next->m_pageOffset;
        
        // Disable pagination for objects we don't support. For now this includes overflow:scroll/auto, inline blocks and
        // writing mode roots.
        if (renderer->isUnsplittableForPagination()) {
            m_pageLogicalHeight = 0;
            m_isPaginated = false;
        } else
            m_isPaginated = m_pageLogicalHeight || renderer->flowThreadContainingBlock();
    }
    
    // Propagate line grid information.
    propagateLineGridInfo(renderer);

    m_layoutDelta = m_next->m_layoutDelta;
#if !ASSERT_DISABLED && ENABLE(SATURATED_LAYOUT_ARITHMETIC)
    m_layoutDeltaXSaturated = m_next->m_layoutDeltaXSaturated;
    m_layoutDeltaYSaturated = m_next->m_layoutDeltaYSaturated;
#endif

    if (lineGrid() && (lineGrid()->style().writingMode() == renderer->style().writingMode()) && renderer->isRenderMultiColumnFlowThread())
        toRenderMultiColumnFlowThread(renderer)->computeLineGridPaginationOrigin(*this);

    // If we have a new grid to track, then add it to our set.
    if (renderer->style().lineGrid() != RenderStyle::initialLineGrid() && renderer->isRenderBlockFlow())
        establishLineGrid(toRenderBlockFlow(renderer));

    // FIXME: <http://bugs.webkit.org/show_bug.cgi?id=13443> Apply control clip if present.
}
void SVGDocumentExtensions::updatePan(const FloatPoint& pos) const
{
    if (SVGSVGElement* svg = rootElement(*m_document))
        svg->setCurrentTranslate(FloatPoint(pos.x() - m_translate.x(), pos.y() - m_translate.y()));
}
Ejemplo n.º 6
0
// RenderBox methods will expect coordinates w/o any transforms in coordinates
// relative to our borderBox origin.  This method gives us exactly that.
void RenderSVGRoot::buildLocalToBorderBoxTransform()
{
    SVGSVGElement* svg = static_cast<SVGSVGElement*>(node());
    float scale = style()->effectiveZoom();
    FloatPoint translate = svg->currentTranslate();
    LayoutSize borderAndPadding(borderLeft() + paddingLeft(), borderTop() + paddingTop());
    m_localToBorderBoxTransform = svg->viewBoxToViewTransform(contentWidth() / scale, contentHeight() / scale);
    if (borderAndPadding.isEmpty() && scale == 1 && translate == FloatPoint::zero())
        return;
    m_localToBorderBoxTransform = AffineTransform(scale, 0, 0, scale, borderAndPadding.width() + translate.x(), borderAndPadding.height() + translate.y()) * m_localToBorderBoxTransform;
}
Ejemplo n.º 7
0
void ScrollAnimator::setCurrentPosition(const FloatPoint& position)
{
    m_currentPosX = position.x();
    m_currentPosY = position.y();
}
Ejemplo n.º 8
0
void Path::addArcTo(const FloatPoint& p1, const FloatPoint& p2, float radius)
{
    // FIXME: Why do we return if the path is empty? Can't a path start with an arc?
    if (isEmpty())
        return;

    cairo_t* cr = platformPath()->context();

    double x0, y0;
    cairo_get_current_point(cr, &x0, &y0);
    FloatPoint p0(x0, y0);

    // Draw only a straight line to p1 if any of the points are equal or the radius is zero
    // or the points are collinear (triangle that the points form has area of zero value).
    if ((p1.x() == p0.x() && p1.y() == p0.y()) || (p1.x() == p2.x() && p1.y() == p2.y()) || !radius
        || !areaOfTriangleFormedByPoints(p0, p1, p2)) {
        cairo_line_to(cr, p1.x(), p1.y());
        return;
    }

    FloatPoint p1p0((p0.x() - p1.x()),(p0.y() - p1.y()));
    FloatPoint p1p2((p2.x() - p1.x()),(p2.y() - p1.y()));
    float p1p0_length = sqrtf(p1p0.x() * p1p0.x() + p1p0.y() * p1p0.y());
    float p1p2_length = sqrtf(p1p2.x() * p1p2.x() + p1p2.y() * p1p2.y());

    double cos_phi = (p1p0.x() * p1p2.x() + p1p0.y() * p1p2.y()) / (p1p0_length * p1p2_length);
    // all points on a line logic
    if (cos_phi == -1) {
        cairo_line_to(cr, p1.x(), p1.y());
        return;
    }
    if (cos_phi == 1) {
        // add infinite far away point
        unsigned int max_length = 65535;
        double factor_max = max_length / p1p0_length;
        FloatPoint ep((p0.x() + factor_max * p1p0.x()), (p0.y() + factor_max * p1p0.y()));
        cairo_line_to(cr, ep.x(), ep.y());
        return;
    }

    float tangent = radius / tan(acos(cos_phi) / 2);
    float factor_p1p0 = tangent / p1p0_length;
    FloatPoint t_p1p0((p1.x() + factor_p1p0 * p1p0.x()), (p1.y() + factor_p1p0 * p1p0.y()));

    FloatPoint orth_p1p0(p1p0.y(), -p1p0.x());
    float orth_p1p0_length = sqrt(orth_p1p0.x() * orth_p1p0.x() + orth_p1p0.y() * orth_p1p0.y());
    float factor_ra = radius / orth_p1p0_length;

    // angle between orth_p1p0 and p1p2 to get the right vector orthographic to p1p0
    double cos_alpha = (orth_p1p0.x() * p1p2.x() + orth_p1p0.y() * p1p2.y()) / (orth_p1p0_length * p1p2_length);
    if (cos_alpha < 0.f)
        orth_p1p0 = FloatPoint(-orth_p1p0.x(), -orth_p1p0.y());

    FloatPoint p((t_p1p0.x() + factor_ra * orth_p1p0.x()), (t_p1p0.y() + factor_ra * orth_p1p0.y()));

    // calculate angles for addArc
    orth_p1p0 = FloatPoint(-orth_p1p0.x(), -orth_p1p0.y());
    float sa = acos(orth_p1p0.x() / orth_p1p0_length);
    if (orth_p1p0.y() < 0.f)
        sa = 2 * piDouble - sa;

    // anticlockwise logic
    bool anticlockwise = false;

    float factor_p1p2 = tangent / p1p2_length;
    FloatPoint t_p1p2((p1.x() + factor_p1p2 * p1p2.x()), (p1.y() + factor_p1p2 * p1p2.y()));
    FloatPoint orth_p1p2((t_p1p2.x() - p.x()),(t_p1p2.y() - p.y()));
    float orth_p1p2_length = sqrtf(orth_p1p2.x() * orth_p1p2.x() + orth_p1p2.y() * orth_p1p2.y());
    float ea = acos(orth_p1p2.x() / orth_p1p2_length);
    if (orth_p1p2.y() < 0)
        ea = 2 * piDouble - ea;
    if ((sa > ea) && ((sa - ea) < piDouble))
        anticlockwise = true;
    if ((sa < ea) && ((ea - sa) > piDouble))
        anticlockwise = true;

    cairo_line_to(cr, t_p1p0.x(), t_p1p0.y());

    addArc(p, radius, sa, ea, anticlockwise);
}
Ejemplo n.º 9
0
void Path::addArc(const FloatPoint& center, float radius, float startAngle, float endAngle, bool anticlockwise)
{
    // The OpenVG spec says nothing about inf as radius or start/end angle.
    // WebKit seems to pass those (e.g. https://bugs.webkit.org/show_bug.cgi?id=16449),
    // so abort instead of risking undefined behavior.
    if (!isfinite(radius) || !isfinite(startAngle) || !isfinite(endAngle))
        return;

    // For some reason, the HTML 5 spec defines the angle as going clockwise
    // from the positive X axis instead of going standard anticlockwise.
    // So let's make it a proper angle in order to keep sanity.
    startAngle = fmod((2.0 * piDouble) - startAngle, 2.0 * piDouble);
    endAngle = fmod((2.0 * piDouble) - endAngle, 2.0 * piDouble);

    // Make it so that endAngle > startAngle. fmod() above takes care of
    // keeping the difference below 360 degrees.
    if (endAngle <= startAngle)
        endAngle += 2.0 * piDouble;

    const VGfloat angleDelta = anticlockwise
        ? (endAngle - startAngle)
        : (startAngle - endAngle + (2.0 * piDouble));

    // OpenVG uses endpoint parameterization while this method receives its
    // values in center parameterization. It lacks an ellipse rotation
    // parameter so we use 0 for that, and also the radius is only a single
    // value which makes for rh == rv. In order to convert from endpoint to
    // center parameterization, we use the formulas from the OpenVG/SVG specs:

    // (x,y) = (cos rot, -sin rot; sin rot, -cos rot) * (rh * cos angle, rv * sin angle) + (center.x, center.y)
    // rot is 0, which simplifies this a bit:
    // (x,y) = (1, 0; 0, -1) * (rh * cos angle, rv * sin angle) + (center.x, center.y)
    //       = (1 * rh * cos angle + 0 * rv * sin angle, 0 * rh * cos angle + -1 * rv * sin angle) + (center.x, center.y)
    //       = (rh * cos angle, -rv * sin angle) + (center.x, center.y)
    // (Set angle = {startAngle, endAngle} to retrieve the respective endpoints.)

    const VGfloat startX = radius * cos(startAngle) + center.x();
    const VGfloat startY = -radius * sin(startAngle) + center.y();
    const VGfloat endX = radius * cos(endAngle) + center.x();
    const VGfloat endY = -radius * sin(endAngle) + center.y();

    // Fa: large arc flag, makes the difference between SCWARC_TO and LCWARC_TO
    //     respectively SCCWARC_TO and LCCWARC_TO arcs.
    const bool largeArc = (angleDelta > piDouble);

    // Fs: sweep flag, specifying whether the arc is drawn in increasing (true)
    //     or decreasing (0) direction. No need to calculate this value, as it
    //     we already get it passed as a parameter (Fs == !anticlockwise).

    // Translate the large arc and sweep flags into an OpenVG segment command.
    // As OpenVG thinks of everything upside down, we need to reverse the
    // anticlockwise parameter in order to get the specified rotation.
    const VGubyte segmentCommand = !anticlockwise
        ? (largeArc ? VG_LCCWARC_TO_ABS : VG_SCCWARC_TO_ABS)
        : (largeArc ? VG_LCWARC_TO_ABS : VG_SCWARC_TO_ABS);

    // So now, we've got all the parameters in endpoint parameterization format
    // as OpenVG requires it. Which means we can just pass it like this.
    const VGubyte pathSegments[] = {
        hasCurrentPoint() ? VG_LINE_TO_ABS : VG_MOVE_TO_ABS,
        segmentCommand
    };
    const VGfloat pathData[] = {
        startX, startY,
        radius, radius, 0, endX, endY
    };

    m_path->makeCompatibleContextCurrent();
    vgAppendPathData(m_path->vgPath(), 2, pathSegments, pathData);
    ASSERT_VG_NO_ERROR();

    m_path->m_currentPoint.setX(endX);
    m_path->m_currentPoint.setY(endY);
}
Ejemplo n.º 10
0
void Path::addLineTo(const FloatPoint& p)
{
    cairo_t* cr = ensurePlatformPath()->context();
    cairo_line_to(cr, p.x(), p.y());
}
Ejemplo n.º 11
0
static inline float areaOfTriangleFormedByPoints(const FloatPoint& p1, const FloatPoint& p2, const FloatPoint& p3)
{
    return p1.x() * (p2.y() - p3.y()) + p2.x() * (p3.y() - p1.y()) + p3.x() * (p1.y() - p2.y());
}
Ejemplo n.º 12
0
void GraphicsContext::drawLineForDocumentMarker(const FloatPoint& point, float width, DocumentMarkerLineStyle style)
{
    if (paintingDisabled())
        return;

    if (style != DocumentMarkerSpellingLineStyle && style != DocumentMarkerGrammarLineStyle)
        return;

    // These are the same for misspelling or bad grammar
    const int patternHeight = 3; // 3 rows
    ASSERT(cMisspellingLineThickness == patternHeight);
    const int patternWidth = 4; // 4 pixels
    ASSERT(patternWidth == cMisspellingLinePatternWidth);

    // Make sure to draw only complete dots.
    // NOTE: Code here used to shift the underline to the left and increase the width
    // to make sure everything gets underlined, but that results in drawing out of
    // bounds (e.g. when at the edge of a view) and could make it appear that the
    // space between adjacent misspelled words was underlined.
    // allow slightly more considering that the pattern ends with a transparent pixel
    float widthMod = fmodf(width, patternWidth);
    if (patternWidth - widthMod > cMisspellingLinePatternGapWidth)
        width -= widthMod;

    // Draw the underline
    CGContextRef context = platformContext();
    CGContextSaveGState(context);

    const Color& patternColor = style == DocumentMarkerGrammarLineStyle ? grammarPatternColor() : spellingPatternColor();
    setCGStrokeColor(context, patternColor);

    wkSetPatternPhaseInUserSpace(context, point);
    CGContextSetBlendMode(context, kCGBlendModeNormal);

    // 3 rows, each offset by half a pixel for blending purposes
    const CGPoint upperPoints [] = {{point.x(), point.y() + patternHeight - 2.5 }, {point.x() + width, point.y() + patternHeight - 2.5}};
    const CGPoint middlePoints [] = {{point.x(), point.y() + patternHeight - 1.5 }, {point.x() + width, point.y() + patternHeight - 1.5}};
    const CGPoint lowerPoints [] = {{point.x(), point.y() + patternHeight - 0.5 }, {point.x() + width, point.y() + patternHeight - 0.5 }};

    // Dash lengths for the top and bottom of the error underline are the same.
    // These are magic.
    static const CGFloat edge_dash_lengths[] = {2.0f, 2.0f};
    static const CGFloat middle_dash_lengths[] = { 2.76f, 1.24f };
    static const CGFloat edge_offset = -(edge_dash_lengths[1] - 1.0f) / 2.0f;
    static const CGFloat middle_offset = -(middle_dash_lengths[1] - 1.0f) / 2.0f;

    // Line opacities.  Once again, these are magic.
    const float upperOpacity = 0.33f;
    const float middleOpacity = 0.75f;
    const float lowerOpacity = 0.88f;

    //Top line
    CGContextSetLineDash(context, edge_offset, edge_dash_lengths, WTF_ARRAY_LENGTH(edge_dash_lengths));
    CGContextSetAlpha(context, upperOpacity);
    CGContextStrokeLineSegments(context, upperPoints, 2);

    // Middle line
    CGContextSetLineDash(context, middle_offset, middle_dash_lengths, WTF_ARRAY_LENGTH(middle_dash_lengths));
    CGContextSetAlpha(context, middleOpacity);
    CGContextStrokeLineSegments(context, middlePoints, 2);

    // Bottom line
    CGContextSetLineDash(context, edge_offset, edge_dash_lengths, WTF_ARRAY_LENGTH(edge_dash_lengths));
    CGContextSetAlpha(context, lowerOpacity);
    CGContextStrokeLineSegments(context, lowerPoints, 2);

    CGContextRestoreGState(context);
}
Ejemplo n.º 13
0
static void drawTextCommon(GraphicsContext* ctx, const TextRun& run, const FloatPoint& point, int from, int to, const QFont& font, bool isComplexText)
{
    if (to < 0)
        to = run.length();

    QPainter *p = ctx->platformContext();

    QPen textFillPen;
    if (ctx->textDrawingMode() & TextModeFill) {
        if (ctx->fillGradient()) {
            QBrush brush(*ctx->fillGradient()->platformGradient());
            brush.setTransform(ctx->fillGradient()->gradientSpaceTransform());
            textFillPen = QPen(brush, 0);
        } else if (ctx->fillPattern()) {
            AffineTransform affine;
            textFillPen = QPen(QBrush(ctx->fillPattern()->createPlatformPattern(affine)), 0);
        } else
            textFillPen = QPen(QColor(ctx->fillColor()));
    }

    QPen textStrokePen;
    if (ctx->textDrawingMode() & TextModeStroke) {
        if (ctx->strokeGradient()) {
            QBrush brush(*ctx->strokeGradient()->platformGradient());
            brush.setTransform(ctx->strokeGradient()->gradientSpaceTransform());
            textStrokePen = QPen(brush, ctx->strokeThickness());
        } else if (ctx->strokePattern()) {
            AffineTransform affine;
            QBrush brush(ctx->strokePattern()->createPlatformPattern(affine));
            textStrokePen = QPen(brush, ctx->strokeThickness());
        } else
            textStrokePen = QPen(QColor(ctx->strokeColor()), ctx->strokeThickness());
    }

    String sanitized = Font::normalizeSpaces(String(run.characters(), run.length()));
    QString string = fromRawDataWithoutRef(sanitized);
    QPointF pt(point.x(), point.y());

    if (from > 0 || to < run.length()) {
        if (isComplexText) {
            QTextLayout layout(string, font);
            QTextLine line = setupLayout(&layout, run);
            float x1 = line.cursorToX(from);
            float x2 = line.cursorToX(to);
            if (x2 < x1)
                qSwap(x1, x2);

            QFontMetrics fm(font);
            int ascent = fm.ascent();
            QRectF boundingRect(point.x() + x1, point.y() - ascent, x2 - x1, fm.height());
            QRectF clip = boundingRect;

            ContextShadow* ctxShadow = ctx->contextShadow();

            if (ctxShadow->m_type != ContextShadow::NoShadow) {
                qreal dx1 = 0, dx2 = 0, dy1 = 0, dy2 = 0;
                if (ctxShadow->offset().x() > 0)
                    dx2 = ctxShadow->offset().x();
                else
                    dx1 = -ctxShadow->offset().x();
                if (ctxShadow->offset().y() > 0)
                    dy2 = ctxShadow->offset().y();
                else
                    dy1 = -ctxShadow->offset().y();
                // expand the clip rect to include the text shadow as well
                clip.adjust(dx1, dx2, dy1, dy2);
                clip.adjust(-ctxShadow->m_blurDistance, -ctxShadow->m_blurDistance, ctxShadow->m_blurDistance, ctxShadow->m_blurDistance);
            }
            p->save();
            p->setClipRect(clip.toRect(), Qt::IntersectClip);
            pt.setY(pt.y() - ascent);

            if (ctxShadow->m_type != ContextShadow::NoShadow) {
                ContextShadow* ctxShadow = ctx->contextShadow();
                if (!ctxShadow->mustUseContextShadow(p)) {
                    p->save();
                    p->setPen(ctxShadow->m_color);
                    p->translate(ctxShadow->offset());
                    line.draw(p, pt);
                    p->restore();
                } else {
                    QPainter* shadowPainter = ctxShadow->beginShadowLayer(p, boundingRect);
                    if (shadowPainter) {
                        // Since it will be blurred anyway, we don't care about render hints.
                        shadowPainter->setFont(p->font());
                        shadowPainter->setPen(ctxShadow->m_color);
                        line.draw(shadowPainter, pt);
                        ctxShadow->endShadowLayer(p);
                    }
                }
            }
            p->setPen(textFillPen);
            line.draw(p, pt);
            p->restore();
            return;
        }
#if QT_VERSION >= QT_VERSION_CHECK(4, 7, 0)
        int skipWidth = QFontMetrics(font).width(string, from, Qt::TextBypassShaping);
        pt.setX(pt.x() + skipWidth);
        string = fromRawDataWithoutRef(sanitized, from, to - from);
#endif
    }

    p->setFont(font);

    int flags = run.rtl() ? Qt::TextForceRightToLeft : Qt::TextForceLeftToRight;
#if QT_VERSION >= QT_VERSION_CHECK(4, 7, 0)
    // See QWebPagePrivate::QWebPagePrivate() where the default path is set to Complex for Qt 4.6 and earlier.
    if (!isComplexText && !(ctx->textDrawingMode() & TextModeStroke))
        flags |= Qt::TextBypassShaping;
#endif

    QPainterPath textStrokePath;
    if (ctx->textDrawingMode() & TextModeStroke)
        textStrokePath.addText(pt, font, string);

    ContextShadow* ctxShadow = ctx->contextShadow();
    if (ctxShadow->m_type != ContextShadow::NoShadow) {
        if (ctx->textDrawingMode() & TextModeFill) {
            if (ctxShadow->m_type != ContextShadow::BlurShadow) {
                p->save();
                p->setPen(ctxShadow->m_color);
                p->translate(ctxShadow->offset());
                p->drawText(pt, string, flags, run.padding());
                p->restore();
            } else {
                QFontMetrics fm(font);
#if QT_VERSION >= QT_VERSION_CHECK(4, 7, 0)
                QRectF boundingRect(pt.x(), point.y() - fm.ascent(), fm.width(string, -1, flags), fm.height());
#else
                QRectF boundingRect(pt.x(), point.y() - fm.ascent(), fm.width(string), fm.height());
#endif
                QPainter* shadowPainter = ctxShadow->beginShadowLayer(p, boundingRect);
                if (shadowPainter) {
                    // Since it will be blurred anyway, we don't care about render hints.
                    shadowPainter->setFont(p->font());
                    shadowPainter->setPen(ctxShadow->m_color);
                    shadowPainter->drawText(pt, string, flags, run.padding());
                    ctxShadow->endShadowLayer(p);
                }
            }
        } else if (ctx->textDrawingMode() & TextModeStroke) {
            if (ctxShadow->m_type != ContextShadow::BlurShadow) {
                p->translate(ctxShadow->offset());
                p->strokePath(textStrokePath, QPen(ctxShadow->m_color));
                p->translate(-ctxShadow->offset());
            } else {
                QFontMetrics fm(font);
#if QT_VERSION >= QT_VERSION_CHECK(4, 7, 0)
                QRectF boundingRect(pt.x(), point.y() - fm.ascent(), fm.width(string, -1, flags), fm.height());
#else
                QRectF boundingRect(pt.x(), point.y() - fm.ascent(), fm.width(string), fm.height());
#endif
                QPainter* shadowPainter = ctxShadow->beginShadowLayer(p, boundingRect);
                if (shadowPainter) {
                    // Since it will be blurred anyway, we don't care about render hints.
                    shadowPainter->setFont(p->font());
                    shadowPainter->strokePath(textStrokePath, QPen(ctxShadow->m_color));
                    ctxShadow->endShadowLayer(p);
                }
            }
        }
    }

    if (ctx->textDrawingMode() & TextModeStroke)
        p->strokePath(textStrokePath, textStrokePen);

    if (ctx->textDrawingMode() & TextModeFill) {
        QPen previousPen = p->pen();
        p->setPen(textFillPen);
        p->drawText(pt, string, flags, run.padding());
        p->setPen(previousPen);
    }
}
Ejemplo n.º 14
0
static inline float leftSide(const FloatPoint& vertex1, const FloatPoint& vertex2, const FloatPoint& point)
{
    return ((point.x() - vertex1.x()) * (vertex2.y() - vertex1.y())) - ((vertex2.x() - vertex1.x()) * (point.y() - vertex1.y()));
}
Ejemplo n.º 15
0
// Blend the points with a ratio (1/3):(2/3).
static FloatPoint blendPoints(const FloatPoint& p1, const FloatPoint& p2)
{
    const float oneOverThree = 1 / 3.f;
    return FloatPoint((p1.x() + 2 * p2.x()) * oneOverThree, (p1.y() + 2 * p2.y()) * oneOverThree);
}
Ejemplo n.º 16
0
void drawTextWithSpacing(GraphicsContext* graphicsContext, const SimpleFontData* font, const wxColour& color, const GlyphBuffer& glyphBuffer, int from, int numGlyphs, const FloatPoint& point)
{
    graphicsContext->save();
    
    wxGCDC* dc = static_cast<wxGCDC*>(graphicsContext->platformContext());

    wxFont* wxfont = font->getWxFont();
    graphicsContext->setFillColor(graphicsContext->fillColor(), DeviceColorSpace);

    CGContextRef cgContext = static_cast<CGContextRef>(dc->GetGraphicsContext()->GetNativeContext());

    CGFontRef cgFont;

#ifdef wxOSX_USE_CORE_TEXT && wxOSX_USE_CORE_TEXT
    cgFont = CTFontCopyGraphicsFont((CTFontRef)wxfont->OSXGetCTFont(), NULL);
#else
    ATSFontRef fontRef;
    
    fontRef = FMGetATSFontRefFromFont(wxfont->MacGetATSUFontID());
    
    if (fontRef)
        cgFont = CGFontCreateWithPlatformFont((void*)&fontRef);
#endif
    
    CGContextSetFont(cgContext, cgFont);

    CGContextSetFontSize(cgContext, wxfont->GetPointSize());

    CGFloat red, green, blue, alpha;
    graphicsContext->fillColor().getRGBA(red, green, blue, alpha);
    CGContextSetRGBFillColor(cgContext, red, green, blue, alpha);

    CGAffineTransform matrix = CGAffineTransformIdentity;
    matrix.b = -matrix.b;
    matrix.d = -matrix.d;
    
    CGContextSetTextMatrix(cgContext, matrix);

    CGContextSetTextPosition(cgContext, point.x(), point.y());
    
    const FloatSize* advanceSizes = static_cast<const FloatSize*>(glyphBuffer.advances(from));
    int size = glyphBuffer.size() - from;
    CGSize sizes[size];
    CGGlyph glyphs[numGlyphs];
    
    // if the function doesn't exist, we're probably on tiger and need to grab the
    // function under its old name, CGFontGetGlyphsForUnicodes
    if (!CGFontGetGlyphsForUnichars)
        CGFontGetGlyphsForUnichars = (CGFontGetGlyphsForUnicharsPtr)dlsym(RTLD_DEFAULT, "CGFontGetGlyphsForUnicodes");
    
    // Let's make sure we got the function under one name or another!
    ASSERT(CGFontGetGlyphsForUnichars);
    CGFontGetGlyphsForUnichars(cgFont, glyphBuffer.glyphs(from), glyphs, numGlyphs);
    
    for (int i = 0; i < size; i++) {
        FloatSize fsize = advanceSizes[i];
        sizes[i] = CGSizeMake(fsize.width(), fsize.height());
    }
    
    CGContextShowGlyphsWithAdvances(cgContext, glyphs, sizes, numGlyphs);
    
    if (cgFont)
        CGFontRelease(cgFont);
    graphicsContext->restore();
}
Ejemplo n.º 17
0
// RenderBox methods will expect coordinates w/o any transforms in coordinates
// relative to our borderBox origin.  This method gives us exactly that.
void RenderSVGRoot::buildLocalToBorderBoxTransform()
{
    SVGSVGElement* svg = toSVGSVGElement(node());
    ASSERT(svg);
    float scale = style()->effectiveZoom();
    FloatPoint translate = svg->currentTranslate();
    LayoutSize borderAndPadding(borderLeft() + paddingLeft(), borderTop() + paddingTop());
    m_localToBorderBoxTransform = svg->viewBoxToViewTransform(contentWidth() / scale, contentHeight() / scale);
    AffineTransform viewToBorderBoxTransform(scale, 0, 0, scale, borderAndPadding.width() + translate.x(), borderAndPadding.height() + translate.y());
    if (viewToBorderBoxTransform.isIdentity())
        return;
    m_localToBorderBoxTransform = viewToBorderBoxTransform * m_localToBorderBoxTransform;
}
Ejemplo n.º 18
0
void Font::drawGlyphs(GraphicsContext* gc, const SimpleFontData* font,
    const GlyphBuffer& glyphBuffer, unsigned from, unsigned numGlyphs,
    const FloatPoint& point, const FloatRect& textRect) const
{
    SkScalar x = SkFloatToScalar(point.x());
    SkScalar y = SkFloatToScalar(point.y());

// ENABLE_OPENTYPE_VERTICAL is not enabled on MACOSX
#if !OS(MACOSX)
    const OpenTypeVerticalData* verticalData = font->verticalData();
    if (font->platformData().orientation() == Vertical && verticalData) {
        SkAutoSTMalloc<32, SkPoint> storage(numGlyphs);
        SkPoint* pos = storage.get();

        AffineTransform savedMatrix = gc->getCTM();
        gc->concatCTM(AffineTransform(0, -1, 1, 0, point.x(), point.y()));
        gc->concatCTM(AffineTransform(1, 0, 0, 1, -point.x(), -point.y()));

        const unsigned kMaxBufferLength = 256;
        Vector<FloatPoint, kMaxBufferLength> translations;

        const FontMetrics& metrics = font->fontMetrics();
        SkScalar verticalOriginX = SkFloatToScalar(point.x() + metrics.floatAscent() - metrics.floatAscent(IdeographicBaseline));
        float horizontalOffset = point.x();

        unsigned glyphIndex = 0;
        while (glyphIndex < numGlyphs) {
            unsigned chunkLength = std::min(kMaxBufferLength, numGlyphs - glyphIndex);

            const Glyph* glyphs = glyphBuffer.glyphs(from + glyphIndex);
            translations.resize(chunkLength);
            verticalData->getVerticalTranslationsForGlyphs(font, &glyphs[0], chunkLength, reinterpret_cast<float*>(&translations[0]));

            x = verticalOriginX;
            y = SkFloatToScalar(point.y() + horizontalOffset - point.x());

            float currentWidth = 0;
            for (unsigned i = 0; i < chunkLength; ++i, ++glyphIndex) {
                pos[i].set(
                    x + SkIntToScalar(lroundf(translations[i].x())),
                    y + -SkIntToScalar(-lroundf(currentWidth - translations[i].y())));
                currentWidth += glyphBuffer.advanceAt(from + glyphIndex);
            }
            horizontalOffset += currentWidth;
            paintGlyphs(gc, font, glyphs, chunkLength, pos, textRect);
        }

        gc->setCTM(savedMatrix);
        return;
    }
#endif

    if (!glyphBuffer.hasOffsets()) {
        SkAutoSTMalloc<64, SkScalar> storage(numGlyphs);
        SkScalar* xpos = storage.get();
        const float* adv = glyphBuffer.advances(from);
        for (unsigned i = 0; i < numGlyphs; i++) {
            xpos[i] = x;
            x += SkFloatToScalar(adv[i]);
        }
        const Glyph* glyphs = glyphBuffer.glyphs(from);
        paintGlyphsHorizontal(gc, font, glyphs, numGlyphs, xpos, SkFloatToScalar(y), textRect);
        return;
    }

    // FIXME: text rendering speed:
    // Android has code in their WebCore fork to special case when the
    // GlyphBuffer has no advances other than the defaults. In that case the
    // text drawing can proceed faster. However, it's unclear when those
    // patches may be upstreamed to WebKit so we always use the slower path
    // here.
    SkAutoSTMalloc<32, SkPoint> storage(numGlyphs);
    SkPoint* pos = storage.get();
    const FloatSize* offsets = glyphBuffer.offsets(from);
    const float* advances = glyphBuffer.advances(from);
    SkScalar advanceSoFar = SkFloatToScalar(0);
    for (unsigned i = 0; i < numGlyphs; i++) {
        pos[i].set(
            x + SkFloatToScalar(offsets[i].width()) + advanceSoFar,
            y + SkFloatToScalar(offsets[i].height()));
        advanceSoFar += SkFloatToScalar(advances[i]);
    }

    const Glyph* glyphs = glyphBuffer.glyphs(from);
    paintGlyphs(gc, font, glyphs, numGlyphs, pos, textRect);
}
Ejemplo n.º 19
0
void drawTextWithSpacing(GraphicsContext* graphicsContext, const SimpleFontData* font, const wxColour& color, const GlyphBuffer& glyphBuffer, int from, int numGlyphs, const FloatPoint& point)
{
#if USE(WXGC)
    wxGCDC* dc = static_cast<wxGCDC*>(graphicsContext->platformContext());
    wxGraphicsContext* gc = dc->GetGraphicsContext();
    gc->PushState();
    cairo_t* cr = (cairo_t*)gc->GetNativeContext();

    wxFont* wxfont = font->getWxFont();
    cairo_scaled_font_t* scaled_font = 0;
#if wxUSE_PANGO
    PangoFont* pangoFont = createPangoFontForFont(wxfont);
    PangoFontMap* fontMap = pangoFontMap();
    PangoContext* pangoContext = pango_cairo_font_map_create_context(PANGO_CAIRO_FONT_MAP(fontMap));
    scaled_font = createScaledFontForFont(wxfont); 
#elif __WXMSW__
    cairo_matrix_t sizeMatrix, ctm;
    cairo_matrix_init_identity(&ctm);
    int size = font->platformData().size();
    cairo_matrix_init_scale(&sizeMatrix, size, size);

    cairo_font_options_t* fontOptions = cairo_font_options_create();
    cairo_font_options_set_antialias(fontOptions, CAIRO_ANTIALIAS_SUBPIXEL);
    
    cairo_font_face_t* win_face = cairo_win32_font_face_create_for_hfont((HFONT)wxfont->GetHFONT());
    scaled_font = cairo_scaled_font_create(win_face, &sizeMatrix, &ctm, fontOptions); 
#endif
    ASSERT(scaled_font);

    GlyphBufferGlyph* glyphs = const_cast<GlyphBufferGlyph*>(glyphBuffer.glyphs(from));
    float offset = point.x();

    for (int i = 0; i < numGlyphs; i++) {
#if wxUSE_PANGO
        glyphs[i].index = pango_font_get_glyph(pangoFont, pangoContext, glyphBuffer.glyphAt(from + i));
#endif
        glyphs[i].x = offset;
        glyphs[i].y = point.y();
        offset += glyphBuffer.advanceAt(from + i);
    }

    cairo_set_source_rgba(cr, color.Red()/255.0, color.Green()/255.0, color.Blue()/255.0, color.Alpha()/255.0);
    cairo_set_scaled_font(cr, scaled_font);
    
    cairo_show_glyphs(cr, glyphs, numGlyphs);

    cairo_scaled_font_destroy(scaled_font);
    gc->PopState();
#else
    wxDC* dc = graphicsContext->platformContext();

    wxFont* wxfont = font->getWxFont();
    if (wxfont && wxfont->IsOk())
        dc->SetFont(*wxfont);
    dc->SetTextForeground(color);

    // convert glyphs to wxString
    GlyphBufferGlyph* glyphs = const_cast<GlyphBufferGlyph*>(glyphBuffer.glyphs(from));
    int offset = point.x();
    wxString text = wxEmptyString;
    for (unsigned i = 0; i < numGlyphs; i++) {
        text = text.Append((wxChar)glyphs[i]);
        offset += glyphBuffer.advanceAt(from + i);
    }
    
    // the y point is actually the bottom point of the text, turn it into the top
    float height = font->ascent() - font->descent();
    wxCoord ypoint = (wxCoord) (point.y() - height);
     
    dc->DrawText(text, (wxCoord)point.x(), ypoint);
#endif
}
Ejemplo n.º 20
0
void Image::drawPattern(GraphicsContext* context,
                        const FloatRect& floatSrcRect,
                        const AffineTransform& patternTransform,
                        const FloatPoint& phase,
                        ColorSpace styleColorSpace,
                        CompositeOperator compositeOp,
                        const FloatRect& destRect)
{
    FloatRect normSrcRect = normalizeRect(floatSrcRect);
    if (destRect.isEmpty() || normSrcRect.isEmpty())
        return;  // nothing to draw

    NativeImageSkia* bitmap = nativeImageForCurrentFrame();
    if (!bitmap)
        return;

    // This is a very inexpensive operation. It will generate a new bitmap but
    // it will internally reference the old bitmap's pixels, adjusting the row
    // stride so the extra pixels appear as padding to the subsetted bitmap.
    SkBitmap srcSubset;
    SkIRect srcRect = enclosingIntRect(normSrcRect);
    bitmap->extractSubset(&srcSubset, srcRect);

    SkBitmap resampled;
    SkShader* shader;

    // Figure out what size the bitmap will be in the destination. The
    // destination rect is the bounds of the pattern, we need to use the
    // matrix to see how bit it will be.
    float destBitmapWidth, destBitmapHeight;
    TransformDimensions(patternTransform, srcRect.width(), srcRect.height(),
                        &destBitmapWidth, &destBitmapHeight);

    // Compute the resampling mode.
    ResamplingMode resampling;
    if (context->platformContext()->isPrinting())
      resampling = RESAMPLE_LINEAR;
    else {
      resampling = computeResamplingMode(context->platformContext(), *bitmap,
                                         srcRect.width(), srcRect.height(),
                                         destBitmapWidth, destBitmapHeight);
    }

    // Load the transform WebKit requested.
    SkMatrix matrix(patternTransform);

    if (resampling == RESAMPLE_AWESOME) {
        // Do nice resampling.
        SkBitmap resampled;
        int width = static_cast<int>(destBitmapWidth);
        int height = static_cast<int>(destBitmapHeight);
        if (!srcRect.fLeft && !srcRect.fTop
            && srcRect.fRight == bitmap->width() && srcRect.fBottom == bitmap->height()
            && (bitmap->hasResizedBitmap(width, height)
                || bitmap->shouldCacheResampling(width, height, width, height))) {
            // resizedBitmap() caches resized image.
            resampled = bitmap->resizedBitmap(width, height);
        } else {
            resampled = skia::ImageOperations::Resize(srcSubset,
                skia::ImageOperations::RESIZE_LANCZOS3, width, height);
        }
        shader = SkShader::CreateBitmapShader(resampled, SkShader::kRepeat_TileMode, SkShader::kRepeat_TileMode);

        // Since we just resized the bitmap, we need to undo the scale set in
        // the image transform.
        matrix.setScaleX(SkIntToScalar(1));
        matrix.setScaleY(SkIntToScalar(1));
    } else {
        // No need to do nice resampling.
        shader = SkShader::CreateBitmapShader(srcSubset, SkShader::kRepeat_TileMode, SkShader::kRepeat_TileMode);
    }

    // We also need to translate it such that the origin of the pattern is the
    // origin of the destination rect, which is what WebKit expects. Skia uses
    // the coordinate system origin as the base for the patter. If WebKit wants
    // a shifted image, it will shift it from there using the patternTransform.
    float adjustedX = phase.x() + normSrcRect.x() *
                      narrowPrecisionToFloat(patternTransform.a());
    float adjustedY = phase.y() + normSrcRect.y() *
                      narrowPrecisionToFloat(patternTransform.d());
    matrix.postTranslate(SkFloatToScalar(adjustedX),
                         SkFloatToScalar(adjustedY));
    shader->setLocalMatrix(matrix);

    SkPaint paint;
    paint.setShader(shader)->unref();
    paint.setXfermodeMode(WebCoreCompositeToSkiaComposite(compositeOp));
    paint.setFilterBitmap(resampling == RESAMPLE_LINEAR);

    context->platformContext()->paintSkPaint(destRect, paint);
}
Ejemplo n.º 21
0
void ScrollAnimator::scrollToOffsetWithoutAnimation(const FloatPoint& offset)
{
    m_currentPosX = offset.x();
    m_currentPosY = offset.y();
    notifyPositionChanged();
}
Ejemplo n.º 22
0
void Image::drawPattern(GraphicsContext* ctxt, const FloatRect& tileRect, const AffineTransform& patternTransform,
                        const FloatPoint& phase, ColorSpace styleColorSpace, CompositeOperator op, const FloatRect& destRect)
{
    if (!nativeImageForCurrentFrame())
        return;

    ASSERT(patternTransform.isInvertible());
    if (!patternTransform.isInvertible())
        // Avoid a hang under CGContextDrawTiledImage on release builds.
        return;

    CGContextRef context = ctxt->platformContext();
    GraphicsContextStateSaver stateSaver(*ctxt);
    CGContextClipToRect(context, destRect);
    ctxt->setCompositeOperation(op);
    CGContextTranslateCTM(context, destRect.x(), destRect.y() + destRect.height());
    CGContextScaleCTM(context, 1, -1);

    // Compute the scaled tile size.
    float scaledTileHeight = tileRect.height() * narrowPrecisionToFloat(patternTransform.d());

    // We have to adjust the phase to deal with the fact we're in Cartesian space now (with the bottom left corner of destRect being
    // the origin).
    float adjustedX = phase.x() - destRect.x() + tileRect.x() * narrowPrecisionToFloat(patternTransform.a()); // We translated the context so that destRect.x() is the origin, so subtract it out.
    float adjustedY = destRect.height() - (phase.y() - destRect.y() + tileRect.y() * narrowPrecisionToFloat(patternTransform.d()) + scaledTileHeight);

    CGImageRef tileImage = nativeImageForCurrentFrame();
    float h = CGImageGetHeight(tileImage);

    RetainPtr<CGImageRef> subImage;
    if (tileRect.size() == size())
        subImage = tileImage;
    else {
        // Copying a sub-image out of a partially-decoded image stops the decoding of the original image. It should never happen
        // because sub-images are only used for border-image, which only renders when the image is fully decoded.
        ASSERT(h == height());
        subImage.adoptCF(CGImageCreateWithImageInRect(tileImage, tileRect));
    }

    // Adjust the color space.
    subImage = imageWithColorSpace(subImage.get(), styleColorSpace);

    // Leopard has an optimized call for the tiling of image patterns, but we can only use it if the image has been decoded enough that
    // its buffer is the same size as the overall image.  Because a partially decoded CGImageRef with a smaller width or height than the
    // overall image buffer needs to tile with "gaps", we can't use the optimized tiling call in that case.
    // FIXME: We cannot use CGContextDrawTiledImage with scaled tiles on Leopard, because it suffers from rounding errors.  Snow Leopard is ok.
    float scaledTileWidth = tileRect.width() * narrowPrecisionToFloat(patternTransform.a());
    float w = CGImageGetWidth(tileImage);
#ifdef BUILDING_ON_LEOPARD
    if (w == size().width() && h == size().height() && scaledTileWidth == tileRect.width() && scaledTileHeight == tileRect.height())
#else
    if (w == size().width() && h == size().height())
#endif
        CGContextDrawTiledImage(context, FloatRect(adjustedX, adjustedY, scaledTileWidth, scaledTileHeight), subImage.get());
    else {

        // On Leopard and newer, this code now only runs for partially decoded images whose buffers do not yet match the overall size of the image.
        static const CGPatternCallbacks patternCallbacks = { 0, drawPatternCallback, NULL };
        CGAffineTransform matrix = CGAffineTransformMake(narrowPrecisionToCGFloat(patternTransform.a()), 0, 0, narrowPrecisionToCGFloat(patternTransform.d()), adjustedX, adjustedY);
        matrix = CGAffineTransformConcat(matrix, CGContextGetCTM(context));
        // The top of a partially-decoded image is drawn at the bottom of the tile. Map it to the top.
        matrix = CGAffineTransformTranslate(matrix, 0, size().height() - h);
        RetainPtr<CGPatternRef> pattern(AdoptCF, CGPatternCreate(subImage.get(), CGRectMake(0, 0, tileRect.width(), tileRect.height()),
                                        matrix, tileRect.width(), tileRect.height(),
                                        kCGPatternTilingConstantSpacing, true, &patternCallbacks));
        if (!pattern)
            return;

        RetainPtr<CGColorSpaceRef> patternSpace(AdoptCF, CGColorSpaceCreatePattern(0));

        CGFloat alpha = 1;
        RetainPtr<CGColorRef> color(AdoptCF, CGColorCreateWithPattern(patternSpace.get(), pattern.get(), &alpha));
        CGContextSetFillColorSpace(context, patternSpace.get());

        // FIXME: Really want a public API for this.  It is just CGContextSetBaseCTM(context, CGAffineTransformIdentiy).
        //wkSetPatternBaseCTM(context, CGAffineTransformIdentity);
        CGContextSetPatternPhase(context, CGSizeZero);

        CGContextSetFillColorWithColor(context, color.get());
        CGContextFillRect(context, CGContextGetClipBoundingBox(context));

    }

    stateSaver.restore();

    if (imageObserver())
        imageObserver()->didDraw(this);
}
void SVGDocumentExtensions::startPan(const FloatPoint& start)
{
    if (SVGSVGElement* svg = rootElement(*m_document))
        m_translate = FloatPoint(start.x() - svg->currentTranslate().x(), start.y() - svg->currentTranslate().y());
}
Ejemplo n.º 24
0
void Image::drawPattern(GraphicsContext* context,
                        const FloatRect& floatSrcRect,
                        const AffineTransform& patternTransform,
                        const FloatPoint& phase,
                        ColorSpace styleColorSpace,
                        CompositeOperator compositeOp,
                        const FloatRect& destRect,
                        BlendMode)
{
    TRACE_EVENT0("skia", "Image::drawPattern");
    RefPtr<NativeImageSkia> bitmap = nativeImageForCurrentFrame();
    if (!bitmap)
        return;

    FloatRect normSrcRect = normalizeRect(floatSrcRect);
    normSrcRect.intersect(FloatRect(0, 0, bitmap->bitmap().width(), bitmap->bitmap().height()));
    if (destRect.isEmpty() || normSrcRect.isEmpty())
        return; // nothing to draw

    SkMatrix ctm = context->platformContext()->getTotalMatrix();
    SkMatrix totalMatrix;
    totalMatrix.setConcat(ctm, patternTransform);

    // Figure out what size the bitmap will be in the destination. The
    // destination rect is the bounds of the pattern, we need to use the
    // matrix to see how big it will be.
    SkRect destRectTarget;
    totalMatrix.mapRect(&destRectTarget, normSrcRect);

    float destBitmapWidth = SkScalarToFloat(destRectTarget.width());
    float destBitmapHeight = SkScalarToFloat(destRectTarget.height());

    // Compute the resampling mode.
    ResamplingMode resampling;
    if (context->isAccelerated() || context->printing())
        resampling = RESAMPLE_LINEAR;
    else
        resampling = computeResamplingMode(totalMatrix, *bitmap, normSrcRect.width(), normSrcRect.height(), destBitmapWidth, destBitmapHeight);
    resampling = limitResamplingMode(context->platformContext(), resampling);

    // Load the transform WebKit requested.
    SkMatrix matrix(patternTransform);

    SkShader* shader;
    if (resampling == RESAMPLE_AWESOME) {
        // Do nice resampling.
        float scaleX = destBitmapWidth / normSrcRect.width();
        float scaleY = destBitmapHeight / normSrcRect.height();
        SkRect scaledSrcRect;
        SkIRect enclosingScaledSrcRect;

        // The image fragment generated here is not exactly what is
        // requested. The scale factor used is approximated and image
        // fragment is slightly larger to align to integer
        // boundaries.
        SkBitmap resampled = extractScaledImageFragment(*bitmap, normSrcRect, scaleX, scaleY, &scaledSrcRect, &enclosingScaledSrcRect);
        shader = SkShader::CreateBitmapShader(resampled, SkShader::kRepeat_TileMode, SkShader::kRepeat_TileMode);

        // Since we just resized the bitmap, we need to remove the scale
        // applied to the pixels in the bitmap shader. This means we need
        // CTM * patternTransform to have identity scale. Since we
        // can't modify CTM (or the rectangle will be drawn in the wrong
        // place), we must set patternTransform's scale to the inverse of
        // CTM scale.
        matrix.setScaleX(ctm.getScaleX() ? 1 / ctm.getScaleX() : 1);
        matrix.setScaleY(ctm.getScaleY() ? 1 / ctm.getScaleY() : 1);
    } else {
        // No need to do nice resampling.
        SkBitmap srcSubset;
        bitmap->bitmap().extractSubset(&srcSubset, enclosingIntRect(normSrcRect));
        shader = SkShader::CreateBitmapShader(srcSubset, SkShader::kRepeat_TileMode, SkShader::kRepeat_TileMode);
    }

    // We also need to translate it such that the origin of the pattern is the
    // origin of the destination rect, which is what WebKit expects. Skia uses
    // the coordinate system origin as the base for the patter. If WebKit wants
    // a shifted image, it will shift it from there using the patternTransform.
    float adjustedX = phase.x() + normSrcRect.x() *
                      narrowPrecisionToFloat(patternTransform.a());
    float adjustedY = phase.y() + normSrcRect.y() *
                      narrowPrecisionToFloat(patternTransform.d());
    matrix.postTranslate(SkFloatToScalar(adjustedX),
                         SkFloatToScalar(adjustedY));
    shader->setLocalMatrix(matrix);

    SkPaint paint;
    paint.setShader(shader)->unref();
    paint.setXfermodeMode(WebCoreCompositeToSkiaComposite(compositeOp));
    paint.setFilterBitmap(resampling == RESAMPLE_LINEAR);

    context->drawRect(destRect, paint);
}
Ejemplo n.º 25
0
void SVGPathSegListBuilder::curveToQuadraticSmooth(const FloatPoint& targetPoint, PathCoordinateMode mode)
{
    ASSERT(m_pathElement);
    ASSERT(m_pathSegList);
    if (mode == AbsoluteCoordinates)
        m_pathSegList->appendWithoutByteStreamSync(SVGPathSegCurvetoQuadraticSmoothAbs::create(m_pathElement, targetPoint.x(), targetPoint.y()));
    else
        m_pathSegList->appendWithoutByteStreamSync(SVGPathSegCurvetoQuadraticSmoothRel::create(m_pathElement, targetPoint.x(), targetPoint.y()));
}
Ejemplo n.º 26
0
// This works by converting the SVG arc to "simple" beziers.
// Partly adapted from Niko's code in kdelibs/kdecore/svgicons.
// See also SVG implementation notes: http://www.w3.org/TR/SVG/implnote.html#ArcConversionEndpointToCenter
bool SVGPathNormalizer::decomposeArcToCubic(const FloatPoint& currentPoint, const PathSegmentData& arcSegment)
{
    // If rx = 0 or ry = 0 then this arc is treated as a straight line segment (a "lineto") joining the endpoints.
    // http://www.w3.org/TR/SVG/implnote.html#ArcOutOfRangeParameters
    float rx = fabsf(arcSegment.arcRadii().x());
    float ry = fabsf(arcSegment.arcRadii().y());
    if (!rx || !ry)
        return false;

    // If the current point and target point for the arc are identical, it should be treated as a zero length
    // path. This ensures continuity in animations.
    if (arcSegment.targetPoint == currentPoint)
        return false;

    float angle = arcSegment.arcAngle();

    FloatSize midPointDistance = currentPoint - arcSegment.targetPoint;
    midPointDistance.scale(0.5f);

    AffineTransform pointTransform;
    pointTransform.rotate(-angle);

    FloatPoint transformedMidPoint = pointTransform.mapPoint(FloatPoint(midPointDistance.width(), midPointDistance.height()));
    float squareRx = rx * rx;
    float squareRy = ry * ry;
    float squareX = transformedMidPoint.x() * transformedMidPoint.x();
    float squareY = transformedMidPoint.y() * transformedMidPoint.y();

    // Check if the radii are big enough to draw the arc, scale radii if not.
    // http://www.w3.org/TR/SVG/implnote.html#ArcCorrectionOutOfRangeRadii
    float radiiScale = squareX / squareRx + squareY / squareRy;
    if (radiiScale > 1) {
        rx *= sqrtf(radiiScale);
        ry *= sqrtf(radiiScale);
    }

    pointTransform.makeIdentity();
    pointTransform.scale(1 / rx, 1 / ry);
    pointTransform.rotate(-angle);

    FloatPoint point1 = pointTransform.mapPoint(currentPoint);
    FloatPoint point2 = pointTransform.mapPoint(arcSegment.targetPoint);
    FloatSize delta = point2 - point1;

    float d = delta.width() * delta.width() + delta.height() * delta.height();
    float scaleFactorSquared = std::max(1 / d - 0.25f, 0.f);

    float scaleFactor = sqrtf(scaleFactorSquared);
    if (arcSegment.arcSweep == arcSegment.arcLarge)
        scaleFactor = -scaleFactor;

    delta.scale(scaleFactor);
    FloatPoint centerPoint = point1 + point2;
    centerPoint.scale(0.5f, 0.5f);
    centerPoint.move(-delta.height(), delta.width());

    float theta1 = FloatPoint(point1 - centerPoint).slopeAngleRadians();
    float theta2 = FloatPoint(point2 - centerPoint).slopeAngleRadians();

    float thetaArc = theta2 - theta1;
    if (thetaArc < 0 && arcSegment.arcSweep)
        thetaArc += twoPiFloat;
    else if (thetaArc > 0 && !arcSegment.arcSweep)
        thetaArc -= twoPiFloat;

    pointTransform.makeIdentity();
    pointTransform.rotate(angle);
    pointTransform.scale(rx, ry);

    // Some results of atan2 on some platform implementations are not exact enough. So that we get more
    // cubic curves than expected here. Adding 0.001f reduces the count of sgements to the correct count.
    int segments = ceilf(fabsf(thetaArc / (piOverTwoFloat + 0.001f)));
    for (int i = 0; i < segments; ++i) {
        float startTheta = theta1 + i * thetaArc / segments;
        float endTheta = theta1 + (i + 1) * thetaArc / segments;

        float t = (8 / 6.f) * tanf(0.25f * (endTheta - startTheta));
        if (!std::isfinite(t))
            return false;
        float sinStartTheta = sinf(startTheta);
        float cosStartTheta = cosf(startTheta);
        float sinEndTheta = sinf(endTheta);
        float cosEndTheta = cosf(endTheta);

        point1 = FloatPoint(cosStartTheta - t * sinStartTheta, sinStartTheta + t * cosStartTheta);
        point1.move(centerPoint.x(), centerPoint.y());
        FloatPoint targetPoint = FloatPoint(cosEndTheta, sinEndTheta);
        targetPoint.move(centerPoint.x(), centerPoint.y());
        point2 = targetPoint;
        point2.move(t * sinEndTheta, -t * cosEndTheta);

        PathSegmentData cubicSegment;
        cubicSegment.command = PathSegCurveToCubicAbs;
        cubicSegment.point1 = pointTransform.mapPoint(point1);
        cubicSegment.point2 = pointTransform.mapPoint(point2);
        cubicSegment.targetPoint = pointTransform.mapPoint(targetPoint);

        m_consumer->emitSegment(cubicSegment);
    }
    return true;
}
Ejemplo n.º 27
0
void SVGPathSegListBuilder::moveTo(const FloatPoint& targetPoint, bool, PathCoordinateMode mode)
{
    ASSERT(m_pathElement);
    ASSERT(m_pathSegList);
    if (mode == AbsoluteCoordinates)
        m_pathSegList->appendWithoutByteStreamSync(SVGPathSegMovetoAbs::create(m_pathElement, targetPoint.x(), targetPoint.y()));
    else
        m_pathSegList->appendWithoutByteStreamSync(SVGPathSegMovetoRel::create(m_pathElement, targetPoint.x(), targetPoint.y()));
}
Ejemplo n.º 28
0
void HTMLAnchorElement::defaultEventHandler(Event* evt)
{
    // React on clicks and on keypresses.
    // Don't make this KEYUP_EVENT again, it makes khtml follow links it shouldn't,
    // when pressing Enter in the combo.
    if (isLink() && (evt->type() == eventNames().clickEvent || (evt->type() == eventNames().keydownEvent && focused()))) {
        MouseEvent* e = 0;
        if (evt->type() == eventNames().clickEvent && evt->isMouseEvent())
            e = static_cast<MouseEvent*>(evt);

        KeyboardEvent* k = 0;
        if (evt->type() == eventNames().keydownEvent && evt->isKeyboardEvent())
            k = static_cast<KeyboardEvent*>(evt);

        if (e && e->button() == RightButton) {
            HTMLElement::defaultEventHandler(evt);
            return;
        }

        // If the link is editable, then we need to check the settings to see whether or not to follow the link
        if (isContentEditable()) {
            EditableLinkBehavior editableLinkBehavior = EditableLinkDefaultBehavior;
            if (Settings* settings = document()->settings())
                editableLinkBehavior = settings->editableLinkBehavior();
                
            switch (editableLinkBehavior) {
                // Always follow the link (Safari 2.0 behavior)
                default:
                case EditableLinkDefaultBehavior:
                case EditableLinkAlwaysLive:
                    break;

                case EditableLinkNeverLive:
                    HTMLElement::defaultEventHandler(evt);
                    return;

                // If the selection prior to clicking on this link resided in the same editable block as this link,
                // and the shift key isn't pressed, we don't want to follow the link
                case EditableLinkLiveWhenNotFocused:
                    if (e && !e->shiftKey() && m_rootEditableElementForSelectionOnMouseDown == rootEditableElement()) {
                        HTMLElement::defaultEventHandler(evt);
                        return;
                    }
                    break;

                // Only follow the link if the shift key is down (WinIE/Firefox behavior)
                case EditableLinkOnlyLiveWithShiftKey:
                    if (e && !e->shiftKey()) {
                        HTMLElement::defaultEventHandler(evt);
                        return;
                    }
                    break;
            }
        }

        if (k) {
            if (k->keyIdentifier() != "Enter") {
                HTMLElement::defaultEventHandler(evt);
                return;
            }
            evt->setDefaultHandled();
            dispatchSimulatedClick(evt);
            return;
        }

        String url = deprecatedParseURL(getAttribute(hrefAttr));

        ASSERT(evt->target());
        ASSERT(evt->target()->toNode());
        if (evt->target()->toNode()->hasTagName(imgTag)) {
            HTMLImageElement* img = static_cast<HTMLImageElement*>(evt->target()->toNode());
            if (img && img->isServerMap()) {
                RenderImage* r = toRenderImage(img->renderer());
                if (r && e) {
                    // FIXME: broken with transforms
                    FloatPoint absPos = r->localToAbsolute();
                    int x = e->pageX() - absPos.x();
                    int y = e->pageY() - absPos.y();
                    url += "?";
                    url += String::number(x);
                    url += ",";
                    url += String::number(y);
                } else {
                    evt->setDefaultHandled();
                    HTMLElement::defaultEventHandler(evt);
                    return;
                }
            }
        }

        if (!evt->defaultPrevented() && document()->frame())
            document()->frame()->loader()->urlSelected(document()->completeURL(url), getAttribute(targetAttr), evt, false, false, true, hasRel(RelationNoReferrer) ? NoReferrer : SendReferrer);

        evt->setDefaultHandled();
    } else if (isLink() && isContentEditable()) {
        // This keeps track of the editable block that the selection was in (if it was in one) just before the link was clicked
        // for the LiveWhenNotFocused editable link behavior
        if (evt->type() == eventNames().mousedownEvent && evt->isMouseEvent() && static_cast<MouseEvent*>(evt)->button() != RightButton && document()->frame() && document()->frame()->selection()) {
            MouseEvent* e = static_cast<MouseEvent*>(evt);

            m_rootEditableElementForSelectionOnMouseDown = document()->frame()->selection()->rootEditableElement();
            m_wasShiftKeyDownOnMouseDown = e && e->shiftKey();
        } else if (evt->type() == eventNames().mouseoverEvent) {
            // These are cleared on mouseover and not mouseout because their values are needed for drag events, but these happen
            // after mouse out events.
            m_rootEditableElementForSelectionOnMouseDown = 0;
            m_wasShiftKeyDownOnMouseDown = false;
        }
    }

    HTMLElement::defaultEventHandler(evt);
}