Esempio n. 1
0
void Image::drawPattern(GraphicsContext* ctxt, const FloatRect& tileRect, const AffineTransform& patternTransform,
                        const FloatPoint& phase, ColorSpace styleColorSpace, CompositeOperator op, const FloatRect& destRect)
{
    if (!nativeImageForCurrentFrame())
        return;

    ASSERT(patternTransform.isInvertible());
    if (!patternTransform.isInvertible())
        // Avoid a hang under CGContextDrawTiledImage on release builds.
        return;

    CGContextRef context = ctxt->platformContext();
    GraphicsContextStateSaver stateSaver(*ctxt);
    CGContextClipToRect(context, destRect);
    ctxt->setCompositeOperation(op);
    CGContextTranslateCTM(context, destRect.x(), destRect.y() + destRect.height());
    CGContextScaleCTM(context, 1, -1);
    
    // Compute the scaled tile size.
    float scaledTileHeight = tileRect.height() * narrowPrecisionToFloat(patternTransform.d());
    
    // We have to adjust the phase to deal with the fact we're in Cartesian space now (with the bottom left corner of destRect being
    // the origin).
    float adjustedX = phase.x() - destRect.x() + tileRect.x() * narrowPrecisionToFloat(patternTransform.a()); // We translated the context so that destRect.x() is the origin, so subtract it out.
    float adjustedY = destRect.height() - (phase.y() - destRect.y() + tileRect.y() * narrowPrecisionToFloat(patternTransform.d()) + scaledTileHeight);

    CGImageRef tileImage = nativeImageForCurrentFrame();
    float h = CGImageGetHeight(tileImage);

    RetainPtr<CGImageRef> subImage;
    if (tileRect.size() == size())
        subImage = tileImage;
    else {
        // Copying a sub-image out of a partially-decoded image stops the decoding of the original image. It should never happen
        // because sub-images are only used for border-image, which only renders when the image is fully decoded.
        ASSERT(h == height());
        subImage.adoptCF(CGImageCreateWithImageInRect(tileImage, tileRect));
    }

    // Adjust the color space.
    subImage = Image::imageWithColorSpace(subImage.get(), styleColorSpace);
    
    // Leopard has an optimized call for the tiling of image patterns, but we can only use it if the image has been decoded enough that
    // its buffer is the same size as the overall image.  Because a partially decoded CGImageRef with a smaller width or height than the
    // overall image buffer needs to tile with "gaps", we can't use the optimized tiling call in that case.
    // FIXME: We cannot use CGContextDrawTiledImage with scaled tiles on Leopard, because it suffers from rounding errors.  Snow Leopard is ok.
    float scaledTileWidth = tileRect.width() * narrowPrecisionToFloat(patternTransform.a());
    float w = CGImageGetWidth(tileImage);
#ifdef BUILDING_ON_LEOPARD
    if (w == size().width() && h == size().height() && scaledTileWidth == tileRect.width() && scaledTileHeight == tileRect.height())
#else
    if (w == size().width() && h == size().height())
#endif
        CGContextDrawTiledImage(context, FloatRect(adjustedX, adjustedY, scaledTileWidth, scaledTileHeight), subImage.get());
    else {

    // On Leopard and newer, this code now only runs for partially decoded images whose buffers do not yet match the overall size of the image.
    static const CGPatternCallbacks patternCallbacks = { 0, drawPatternCallback, NULL };
    CGAffineTransform matrix = CGAffineTransformMake(narrowPrecisionToCGFloat(patternTransform.a()), 0, 0, narrowPrecisionToCGFloat(patternTransform.d()), adjustedX, adjustedY);
    matrix = CGAffineTransformConcat(matrix, CGContextGetCTM(context));
    // The top of a partially-decoded image is drawn at the bottom of the tile. Map it to the top.
    matrix = CGAffineTransformTranslate(matrix, 0, size().height() - h);
    RetainPtr<CGPatternRef> pattern(AdoptCF, CGPatternCreate(subImage.get(), CGRectMake(0, 0, tileRect.width(), tileRect.height()),
                                             matrix, tileRect.width(), tileRect.height(), 
                                             kCGPatternTilingConstantSpacing, true, &patternCallbacks));
    if (!pattern)
        return;

    RetainPtr<CGColorSpaceRef> patternSpace(AdoptCF, CGColorSpaceCreatePattern(0));
    
    CGFloat alpha = 1;
    RetainPtr<CGColorRef> color(AdoptCF, CGColorCreateWithPattern(patternSpace.get(), pattern.get(), &alpha));
    CGContextSetFillColorSpace(context, patternSpace.get());

    // FIXME: Really want a public API for this.  It is just CGContextSetBaseCTM(context, CGAffineTransformIdentiy).
    wkSetBaseCTM(context, CGAffineTransformIdentity);
    CGContextSetPatternPhase(context, CGSizeZero);

    CGContextSetFillColorWithColor(context, color.get());
    CGContextFillRect(context, CGContextGetClipBoundingBox(context));

    }

    stateSaver.restore();

    if (imageObserver())
        imageObserver()->didDraw(this);
}
Esempio n. 2
0
bool FilterEffectRenderer::build(Document* document, const FilterOperations& operations)
{
#if !ENABLE(CSS_SHADERS) || !ENABLE(WEBGL)
    UNUSED_PARAM(document);
#else
    CustomFilterProgramList cachedCustomFilterPrograms;
#endif

    m_hasFilterThatMovesPixels = operations.hasFilterThatMovesPixels();
    if (m_hasFilterThatMovesPixels)
        operations.getOutsets(m_topOutset, m_rightOutset, m_bottomOutset, m_leftOutset);
    m_effects.clear();

    RefPtr<FilterEffect> previousEffect;
    for (size_t i = 0; i < operations.operations().size(); ++i) {
        RefPtr<FilterEffect> effect;
        FilterOperation* filterOperation = operations.operations().at(i).get();
        switch (filterOperation->getOperationType()) {
        case FilterOperation::REFERENCE: {
            // FIXME: Not yet implemented.
            // https://bugs.webkit.org/show_bug.cgi?id=72443
            break;
        }
        case FilterOperation::GRAYSCALE: {
            BasicColorMatrixFilterOperation* colorMatrixOperation = static_cast<BasicColorMatrixFilterOperation*>(filterOperation);
            Vector<float> inputParameters;
            double oneMinusAmount = clampTo(1 - colorMatrixOperation->amount(), 0.0, 1.0);

            // See https://dvcs.w3.org/hg/FXTF/raw-file/tip/filters/index.html#grayscaleEquivalent
            // for information on parameters.

            inputParameters.append(narrowPrecisionToFloat(0.2126 + 0.7874 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.7152 - 0.7152 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.0722 - 0.0722 * oneMinusAmount));
            endMatrixRow(inputParameters);

            inputParameters.append(narrowPrecisionToFloat(0.2126 - 0.2126 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.7152 + 0.2848 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.0722 - 0.0722 * oneMinusAmount));
            endMatrixRow(inputParameters);

            inputParameters.append(narrowPrecisionToFloat(0.2126 - 0.2126 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.7152 - 0.7152 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.0722 + 0.9278 * oneMinusAmount));
            endMatrixRow(inputParameters);

            lastMatrixRow(inputParameters);

            effect = FEColorMatrix::create(this, FECOLORMATRIX_TYPE_MATRIX, inputParameters);
            break;
        }
        case FilterOperation::SEPIA: {
            BasicColorMatrixFilterOperation* colorMatrixOperation = static_cast<BasicColorMatrixFilterOperation*>(filterOperation);
            Vector<float> inputParameters;
            double oneMinusAmount = clampTo(1 - colorMatrixOperation->amount(), 0.0, 1.0);

            // See https://dvcs.w3.org/hg/FXTF/raw-file/tip/filters/index.html#sepiaEquivalent
            // for information on parameters.

            inputParameters.append(narrowPrecisionToFloat(0.393 + 0.607 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.769 - 0.769 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.189 - 0.189 * oneMinusAmount));
            endMatrixRow(inputParameters);

            inputParameters.append(narrowPrecisionToFloat(0.349 - 0.349 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.686 + 0.314 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.168 - 0.168 * oneMinusAmount));
            endMatrixRow(inputParameters);

            inputParameters.append(narrowPrecisionToFloat(0.272 - 0.272 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.534 - 0.534 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.131 + 0.869 * oneMinusAmount));
            endMatrixRow(inputParameters);

            lastMatrixRow(inputParameters);

            effect = FEColorMatrix::create(this, FECOLORMATRIX_TYPE_MATRIX, inputParameters);
            break;
        }
        case FilterOperation::SATURATE: {
            BasicColorMatrixFilterOperation* colorMatrixOperation = static_cast<BasicColorMatrixFilterOperation*>(filterOperation);
            Vector<float> inputParameters;
            inputParameters.append(narrowPrecisionToFloat(colorMatrixOperation->amount()));
            effect = FEColorMatrix::create(this, FECOLORMATRIX_TYPE_SATURATE, inputParameters);
            break;
        }
        case FilterOperation::HUE_ROTATE: {
            BasicColorMatrixFilterOperation* colorMatrixOperation = static_cast<BasicColorMatrixFilterOperation*>(filterOperation);
            Vector<float> inputParameters;
            inputParameters.append(narrowPrecisionToFloat(colorMatrixOperation->amount()));
            effect = FEColorMatrix::create(this, FECOLORMATRIX_TYPE_HUEROTATE, inputParameters);
            break;
        }
        case FilterOperation::INVERT: {
            BasicComponentTransferFilterOperation* componentTransferOperation = static_cast<BasicComponentTransferFilterOperation*>(filterOperation);
            ComponentTransferFunction transferFunction;
            transferFunction.type = FECOMPONENTTRANSFER_TYPE_TABLE;
            Vector<float> transferParameters;
            transferParameters.append(narrowPrecisionToFloat(componentTransferOperation->amount()));
            transferParameters.append(narrowPrecisionToFloat(1 - componentTransferOperation->amount()));
            transferFunction.tableValues = transferParameters;

            ComponentTransferFunction nullFunction;
            effect = FEComponentTransfer::create(this, transferFunction, transferFunction, transferFunction, nullFunction);
            break;
        }
        case FilterOperation::OPACITY: {
            BasicComponentTransferFilterOperation* componentTransferOperation = static_cast<BasicComponentTransferFilterOperation*>(filterOperation);
            ComponentTransferFunction transferFunction;
            transferFunction.type = FECOMPONENTTRANSFER_TYPE_TABLE;
            Vector<float> transferParameters;
            transferParameters.append(0);
            transferParameters.append(narrowPrecisionToFloat(componentTransferOperation->amount()));
            transferFunction.tableValues = transferParameters;

            ComponentTransferFunction nullFunction;
            effect = FEComponentTransfer::create(this, nullFunction, nullFunction, nullFunction, transferFunction);
            break;
        }
        case FilterOperation::BRIGHTNESS: {
            BasicComponentTransferFilterOperation* componentTransferOperation = static_cast<BasicComponentTransferFilterOperation*>(filterOperation);
            ComponentTransferFunction transferFunction;
            transferFunction.type = FECOMPONENTTRANSFER_TYPE_LINEAR;
            transferFunction.slope = 1;
            transferFunction.intercept = narrowPrecisionToFloat(componentTransferOperation->amount());

            ComponentTransferFunction nullFunction;
            effect = FEComponentTransfer::create(this, transferFunction, transferFunction, transferFunction, nullFunction);
            break;
        }
        case FilterOperation::CONTRAST: {
            BasicComponentTransferFilterOperation* componentTransferOperation = static_cast<BasicComponentTransferFilterOperation*>(filterOperation);
            ComponentTransferFunction transferFunction;
            transferFunction.type = FECOMPONENTTRANSFER_TYPE_LINEAR;
            float amount = narrowPrecisionToFloat(componentTransferOperation->amount());
            transferFunction.slope = amount;
            transferFunction.intercept = -0.5 * amount + 0.5;
            
            ComponentTransferFunction nullFunction;
            effect = FEComponentTransfer::create(this, transferFunction, transferFunction, transferFunction, nullFunction);
            break;
        }
        case FilterOperation::BLUR: {
            BlurFilterOperation* blurOperation = static_cast<BlurFilterOperation*>(filterOperation);
            float stdDeviation = floatValueForLength(blurOperation->stdDeviation(), 0);
            effect = FEGaussianBlur::create(this, stdDeviation, stdDeviation);
            break;
        }
        case FilterOperation::DROP_SHADOW: {
            DropShadowFilterOperation* dropShadowOperation = static_cast<DropShadowFilterOperation*>(filterOperation);
            effect = FEDropShadow::create(this, dropShadowOperation->stdDeviation(), dropShadowOperation->stdDeviation(),
                                                dropShadowOperation->x(), dropShadowOperation->y(), dropShadowOperation->color(), 1);
            break;
        }
#if ENABLE(CSS_SHADERS)
        case FilterOperation::CUSTOM: {
#if ENABLE(WEBGL)
            if (!isCSSCustomFilterEnabled(document))
                continue;
            
            CustomFilterOperation* customFilterOperation = static_cast<CustomFilterOperation*>(filterOperation);
            RefPtr<CustomFilterProgram> program = customFilterOperation->program();
            cachedCustomFilterPrograms.append(program);
            program->addClient(this);
            if (program->isLoaded()) {
                effect = FECustomFilter::create(this, document->view()->root()->hostWindow(), program, customFilterOperation->parameters(),
                                                customFilterOperation->meshRows(), customFilterOperation->meshColumns(),
                                                customFilterOperation->meshBoxType(), customFilterOperation->meshType());
            }
#endif
            break;
        }
#endif
        default:
            break;
        }

        if (effect) {
            // Unlike SVG, filters applied here should not clip to their primitive subregions.
            effect->setClipsToBounds(false);
            
            if (previousEffect)
                effect->inputEffects().append(previousEffect);
            m_effects.append(effect);
            previousEffect = effect.release();
        }
    }

#if ENABLE(CSS_SHADERS) && ENABLE(WEBGL)
    removeCustomFilterClients();
    m_cachedCustomFilterPrograms.swap(cachedCustomFilterPrograms);
#endif

    // If we didn't make any effects, tell our caller we are not valid
    if (!previousEffect)
        return false;

    m_effects.first()->inputEffects().append(m_sourceGraphic);
    setMaxEffectRects(m_sourceDrawingRegion);
    
    return true;
}
Esempio n. 3
0
float SVGSVGElement::getCurrentTime() const
{
    return narrowPrecisionToFloat(m_timeContainer->elapsed().value());
}
Esempio n. 4
0
bool FilterEffectRenderer::build(RenderObject* renderer, const FilterOperations& operations, FilterConsumer consumer)
{
#if ENABLE(CSS_SHADERS)
    m_hasCustomShaderFilter = false;
#endif
    m_hasFilterThatMovesPixels = operations.hasFilterThatMovesPixels();
    if (m_hasFilterThatMovesPixels)
        m_outsets = operations.outsets();
    
    // Keep the old effects on the stack until we've created the new effects.
    // New FECustomFilters can reuse cached resources from old FECustomFilters.
    FilterEffectList oldEffects;
    m_effects.swap(oldEffects);

    RefPtr<FilterEffect> previousEffect = m_sourceGraphic;
    for (size_t i = 0; i < operations.operations().size(); ++i) {
        RefPtr<FilterEffect> effect;
        FilterOperation* filterOperation = operations.operations().at(i).get();
        switch (filterOperation->getOperationType()) {
        case FilterOperation::REFERENCE: {
            ReferenceFilterOperation* referenceOperation = static_cast<ReferenceFilterOperation*>(filterOperation);
            effect = buildReferenceFilter(renderer, previousEffect, referenceOperation);
            referenceOperation->setFilterEffect(effect);
            break;
        }
        case FilterOperation::GRAYSCALE: {
            BasicColorMatrixFilterOperation* colorMatrixOperation = static_cast<BasicColorMatrixFilterOperation*>(filterOperation);
            Vector<float> inputParameters;
            double oneMinusAmount = clampTo(1 - colorMatrixOperation->amount(), 0.0, 1.0);

            // See https://dvcs.w3.org/hg/FXTF/raw-file/tip/filters/index.html#grayscaleEquivalent
            // for information on parameters.

            inputParameters.append(narrowPrecisionToFloat(0.2126 + 0.7874 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.7152 - 0.7152 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.0722 - 0.0722 * oneMinusAmount));
            endMatrixRow(inputParameters);

            inputParameters.append(narrowPrecisionToFloat(0.2126 - 0.2126 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.7152 + 0.2848 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.0722 - 0.0722 * oneMinusAmount));
            endMatrixRow(inputParameters);

            inputParameters.append(narrowPrecisionToFloat(0.2126 - 0.2126 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.7152 - 0.7152 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.0722 + 0.9278 * oneMinusAmount));
            endMatrixRow(inputParameters);

            lastMatrixRow(inputParameters);

            effect = FEColorMatrix::create(this, FECOLORMATRIX_TYPE_MATRIX, inputParameters);
            break;
        }
        case FilterOperation::SEPIA: {
            BasicColorMatrixFilterOperation* colorMatrixOperation = static_cast<BasicColorMatrixFilterOperation*>(filterOperation);
            Vector<float> inputParameters;
            double oneMinusAmount = clampTo(1 - colorMatrixOperation->amount(), 0.0, 1.0);

            // See https://dvcs.w3.org/hg/FXTF/raw-file/tip/filters/index.html#sepiaEquivalent
            // for information on parameters.

            inputParameters.append(narrowPrecisionToFloat(0.393 + 0.607 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.769 - 0.769 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.189 - 0.189 * oneMinusAmount));
            endMatrixRow(inputParameters);

            inputParameters.append(narrowPrecisionToFloat(0.349 - 0.349 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.686 + 0.314 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.168 - 0.168 * oneMinusAmount));
            endMatrixRow(inputParameters);

            inputParameters.append(narrowPrecisionToFloat(0.272 - 0.272 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.534 - 0.534 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.131 + 0.869 * oneMinusAmount));
            endMatrixRow(inputParameters);

            lastMatrixRow(inputParameters);

            effect = FEColorMatrix::create(this, FECOLORMATRIX_TYPE_MATRIX, inputParameters);
            break;
        }
        case FilterOperation::SATURATE: {
            BasicColorMatrixFilterOperation* colorMatrixOperation = static_cast<BasicColorMatrixFilterOperation*>(filterOperation);
            Vector<float> inputParameters;
            inputParameters.append(narrowPrecisionToFloat(colorMatrixOperation->amount()));
            effect = FEColorMatrix::create(this, FECOLORMATRIX_TYPE_SATURATE, inputParameters);
            break;
        }
        case FilterOperation::HUE_ROTATE: {
            BasicColorMatrixFilterOperation* colorMatrixOperation = static_cast<BasicColorMatrixFilterOperation*>(filterOperation);
            Vector<float> inputParameters;
            inputParameters.append(narrowPrecisionToFloat(colorMatrixOperation->amount()));
            effect = FEColorMatrix::create(this, FECOLORMATRIX_TYPE_HUEROTATE, inputParameters);
            break;
        }
        case FilterOperation::INVERT: {
            BasicComponentTransferFilterOperation* componentTransferOperation = static_cast<BasicComponentTransferFilterOperation*>(filterOperation);
            ComponentTransferFunction transferFunction;
            transferFunction.type = FECOMPONENTTRANSFER_TYPE_TABLE;
            Vector<float> transferParameters;
            transferParameters.append(narrowPrecisionToFloat(componentTransferOperation->amount()));
            transferParameters.append(narrowPrecisionToFloat(1 - componentTransferOperation->amount()));
            transferFunction.tableValues = transferParameters;

            ComponentTransferFunction nullFunction;
            effect = FEComponentTransfer::create(this, transferFunction, transferFunction, transferFunction, nullFunction);
            break;
        }
        case FilterOperation::OPACITY: {
            BasicComponentTransferFilterOperation* componentTransferOperation = static_cast<BasicComponentTransferFilterOperation*>(filterOperation);
            ComponentTransferFunction transferFunction;
            transferFunction.type = FECOMPONENTTRANSFER_TYPE_TABLE;
            Vector<float> transferParameters;
            transferParameters.append(0);
            transferParameters.append(narrowPrecisionToFloat(componentTransferOperation->amount()));
            transferFunction.tableValues = transferParameters;

            ComponentTransferFunction nullFunction;
            effect = FEComponentTransfer::create(this, nullFunction, nullFunction, nullFunction, transferFunction);
            break;
        }
        case FilterOperation::BRIGHTNESS: {
            BasicComponentTransferFilterOperation* componentTransferOperation = static_cast<BasicComponentTransferFilterOperation*>(filterOperation);
            ComponentTransferFunction transferFunction;
            transferFunction.type = FECOMPONENTTRANSFER_TYPE_LINEAR;
            transferFunction.slope = narrowPrecisionToFloat(componentTransferOperation->amount());
            transferFunction.intercept = 0;

            ComponentTransferFunction nullFunction;
            effect = FEComponentTransfer::create(this, transferFunction, transferFunction, transferFunction, nullFunction);
            break;
        }
        case FilterOperation::CONTRAST: {
            BasicComponentTransferFilterOperation* componentTransferOperation = static_cast<BasicComponentTransferFilterOperation*>(filterOperation);
            ComponentTransferFunction transferFunction;
            transferFunction.type = FECOMPONENTTRANSFER_TYPE_LINEAR;
            float amount = narrowPrecisionToFloat(componentTransferOperation->amount());
            transferFunction.slope = amount;
            transferFunction.intercept = -0.5 * amount + 0.5;
            
            ComponentTransferFunction nullFunction;
            effect = FEComponentTransfer::create(this, transferFunction, transferFunction, transferFunction, nullFunction);
            break;
        }
        case FilterOperation::BLUR: {
            BlurFilterOperation* blurOperation = static_cast<BlurFilterOperation*>(filterOperation);
            float stdDeviation = floatValueForLength(blurOperation->stdDeviation(), 0);
            effect = FEGaussianBlur::create(this, stdDeviation, stdDeviation, consumer == FilterProperty ? EDGEMODE_NONE : EDGEMODE_DUPLICATE);
            break;
        }
        case FilterOperation::DROP_SHADOW: {
            DropShadowFilterOperation* dropShadowOperation = static_cast<DropShadowFilterOperation*>(filterOperation);
            effect = FEDropShadow::create(this, dropShadowOperation->stdDeviation(), dropShadowOperation->stdDeviation(),
                                                dropShadowOperation->x(), dropShadowOperation->y(), dropShadowOperation->color(), 1);
            break;
        }
#if ENABLE(CSS_SHADERS) && USE(3D_GRAPHICS)
        case FilterOperation::CUSTOM:
            // CUSTOM operations are always converted to VALIDATED_CUSTOM before getting here.
            // The conversion happens in RenderLayer::computeFilterOperations.
            ASSERT_NOT_REACHED();
            break;
        case FilterOperation::VALIDATED_CUSTOM: {
            ValidatedCustomFilterOperation* customFilterOperation = static_cast<ValidatedCustomFilterOperation*>(filterOperation);
            Document* document = renderer ? &renderer->document() : 0;
            effect = createCustomFilterEffect(this, document, customFilterOperation);
            if (effect)
                m_hasCustomShaderFilter = true;
            break;
        }
#endif
        default:
            break;
        }

        if (effect) {
            // Unlike SVG Filters and CSSFilterImages, filter functions on the filter
            // property applied here should not clip to their primitive subregions.
            effect->setClipsToBounds(consumer == FilterFunction);
            effect->setOperatingColorSpace(ColorSpaceDeviceRGB);
            
            if (filterOperation->getOperationType() != FilterOperation::REFERENCE) {
                effect->inputEffects().append(previousEffect);
                m_effects.append(effect);
            }
            previousEffect = effect.release();
        }
    }

    // If we didn't make any effects, tell our caller we are not valid
    if (!m_effects.size())
        return false;

    setMaxEffectRects(m_sourceDrawingRegion);
    
    return true;
}
float SVGAnimationElement::getSimpleDuration(ExceptionCode&) const
{
    return narrowPrecisionToFloat(simpleDuration().value());
}    
Esempio n. 6
0
FloatSize FloatSize::narrowPrecision(double width, double height)
{
    return FloatSize(narrowPrecisionToFloat(width), narrowPrecisionToFloat(height));
}
void AudioBufferSourceNode::process(size_t framesToProcess)
{
    AudioBus* outputBus = output(0)->bus();

    if (!isInitialized()) {
        outputBus->zero();
        return;
    }

    // The audio thread can't block on this lock, so we call tryLock() instead.
    // Careful - this is a tryLock() and not an autolocker, so we must unlock() before every return.
    if (m_processLock.tryLock()) {
        // Check if it's time to start playing.
        float sampleRate = this->sampleRate();
        double quantumStartTime = context()->currentTime();
        double quantumEndTime = quantumStartTime + framesToProcess / sampleRate;

        // If we know the end time and it's already passed, then don't bother doing any more rendering this cycle.
        if (m_endTime != UnknownTime && m_endTime <= quantumStartTime) {
            m_isPlaying = false;
            m_virtualReadIndex = 0;
            finish();
        }
        
        if (!m_isPlaying || m_hasFinished || !buffer() || m_startTime >= quantumEndTime) {
            // FIXME: can optimize here by propagating silent hint instead of forcing the whole chain to process silence.
            outputBus->zero();
            m_processLock.unlock();
            return;
        }

        double quantumTimeOffset = m_startTime > quantumStartTime ? m_startTime - quantumStartTime : 0;
        size_t quantumFrameOffset = static_cast<unsigned>(quantumTimeOffset * sampleRate);
        quantumFrameOffset = min(quantumFrameOffset, framesToProcess); // clamp to valid range
        size_t bufferFramesToProcess = framesToProcess - quantumFrameOffset;

        // Render by reading directly from the buffer.
        renderFromBuffer(outputBus, quantumFrameOffset, bufferFramesToProcess);

        // Apply the gain (in-place) to the output bus.
        double totalGain = gain()->value() * m_buffer->gain();
        outputBus->copyWithGainFrom(*outputBus, &m_lastGain, totalGain);

        // If the end time is somewhere in the middle of this time quantum, then simply zero out the
        // frames starting at the end time.
        if (m_endTime != UnknownTime && m_endTime >= quantumStartTime && m_endTime < quantumEndTime) {
            size_t zeroStartFrame = narrowPrecisionToFloat((m_endTime - quantumStartTime) * sampleRate);
            size_t framesToZero = framesToProcess - zeroStartFrame;

            bool isSafe = zeroStartFrame < framesToProcess && framesToZero <= framesToProcess && zeroStartFrame + framesToZero <= framesToProcess;
            ASSERT(isSafe);
            
            if (isSafe) {
                for (unsigned i = 0; i < outputBus->numberOfChannels(); ++i)
                    memset(outputBus->channel(i)->data() + zeroStartFrame, 0, sizeof(float) * framesToZero);
            }

            m_isPlaying = false;
            m_virtualReadIndex = 0;
            finish();
        }

        m_processLock.unlock();
    } else {
        // Too bad - the tryLock() failed.  We must be in the middle of changing buffers and were already outputting silence anyway.
        outputBus->zero();
    }
}
float SVGAnimationElement::getStartTime() const
{
    return narrowPrecisionToFloat(intervalBegin().value());
}
Esempio n. 9
0
void Image::drawPattern(GraphicsContext* ctxt, const FloatRect& srcRect, const AffineTransform& patternTransform, const FloatPoint& phase, ColorSpace, CompositeOperator, const FloatRect& dstRect)
{
#if USE(WXGC)
    wxGCDC* context = (wxGCDC*)ctxt->platformContext();
    wxGraphicsBitmap* bitmap = nativeImageForCurrentFrame();
#else
    wxDC* context = ctxt->platformContext();
    wxBitmap* bitmap = nativeImageForCurrentFrame();
#endif

    if (!bitmap) // If it's too early we won't have an image yet.
        return;

    ctxt->save();
    ctxt->clip(IntRect(dstRect.x(), dstRect.y(), dstRect.width(), dstRect.height()));
    
    float currentW = 0;
    float currentH = 0;
    
#if USE(WXGC)
    wxGraphicsContext* gc = context->GetGraphicsContext();

    float adjustedX = phase.x() + srcRect.x() *
                      narrowPrecisionToFloat(patternTransform.a());
    float adjustedY = phase.y() + srcRect.y() *
                      narrowPrecisionToFloat(patternTransform.d());
                      
    gc->ConcatTransform(patternTransform);
#else
    float adjustedX = phase.x();
    float adjustedY = phase.y();
    wxMemoryDC mydc;
    mydc.SelectObject(*bitmap);

    ctxt->concatCTM(patternTransform);
#endif

    //wxPoint origin(context->GetDeviceOrigin());
    AffineTransform mat(ctxt->getCTM());
    wxPoint origin(mat.mapPoint(IntPoint(0, 0)));
    wxSize clientSize(context->GetSize());

    wxCoord w = srcRect.width();
    wxCoord h = srcRect.height();
    wxCoord srcx = srcRect.x();
    wxCoord srcy = srcRect.y();

    while (currentW < dstRect.right() - phase.x() && origin.x + adjustedX + currentW < clientSize.x) {
        while (currentH < dstRect.bottom() - phase.y() && origin.y + adjustedY + currentH < clientSize.y) {
#if USE(WXGC)
#if wxCHECK_VERSION(2,9,0)
            gc->DrawBitmap(*bitmap, adjustedX + currentW, adjustedY + currentH, (wxDouble)srcRect.width(), (wxDouble)srcRect.height());
#else
            gc->DrawGraphicsBitmap(*bitmap, adjustedX + currentW, adjustedY + currentH, (wxDouble)srcRect.width(), (wxDouble)srcRect.height());
#endif
#else
            context->Blit(adjustedX + currentW, adjustedY + currentH,
                          w, h, &mydc, srcx, srcy, wxCOPY, true);
#endif
            currentH += srcRect.height();
        }
        currentW += srcRect.width();
        currentH = 0;
    }
    ctxt->restore();

#if !USE(WXGC)
    mydc.SelectObject(wxNullBitmap);
#endif    
    
    // NB: delete is causing crashes during page load, but not during the deletion
    // itself. It occurs later on when a valid bitmap created in frameAtIndex
    // suddenly becomes invalid after returning. It's possible these errors deal
    // with reentrancy and threding problems.
    //delete bitmap;

    startAnimation();

    if (ImageObserver* observer = imageObserver())
        observer->didDraw(this);
}
void AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destinationFrameOffset, size_t numberOfFrames)
{
    ASSERT(context()->isAudioThread());
    
    // Basic sanity checking
    ASSERT(bus);
    ASSERT(buffer());
    if (!bus || !buffer())
        return;

    unsigned numberOfChannels = this->numberOfChannels();
    unsigned busNumberOfChannels = bus->numberOfChannels();

    // FIXME: we can add support for sources with more than two channels, but this is not a common case.
    bool channelCountGood = numberOfChannels == busNumberOfChannels && (numberOfChannels == 1 || numberOfChannels == 2);
    ASSERT(channelCountGood);
    if (!channelCountGood)
        return;

    // Get the destination pointers.
    float* destinationL = bus->channel(0)->data();
    ASSERT(destinationL);
    if (!destinationL)
        return;
    float* destinationR = (numberOfChannels < 2) ? 0 : bus->channel(1)->data();
    
    bool isStereo = destinationR;
    
    // Sanity check destinationFrameOffset, numberOfFrames.
    size_t destinationLength = bus->length();

    bool isLengthGood = destinationLength <= 4096 && numberOfFrames <= 4096;
    ASSERT(isLengthGood);
    if (!isLengthGood)
        return;

    bool isOffsetGood = destinationFrameOffset <= destinationLength && destinationFrameOffset + numberOfFrames <= destinationLength;
    ASSERT(isOffsetGood);
    if (!isOffsetGood)
        return;
        
    // Potentially zero out initial frames leading up to the offset.
    if (destinationFrameOffset) {
        memset(destinationL, 0, sizeof(float) * destinationFrameOffset);
        if (destinationR)
            memset(destinationR, 0, sizeof(float) * destinationFrameOffset);
    }

    // Offset the pointers to the correct offset frame.
    destinationL += destinationFrameOffset;
    if (destinationR)
        destinationR += destinationFrameOffset;

    size_t bufferLength = buffer()->length();
    double bufferSampleRate = buffer()->sampleRate();

    // Calculate the start and end frames in our buffer that we want to play.
    // If m_isGrain is true, then we will be playing a portion of the total buffer.
    unsigned startFrame = m_isGrain ? static_cast<unsigned>(m_grainOffset * bufferSampleRate) : 0;
    unsigned endFrame = m_isGrain ? static_cast<unsigned>(startFrame + m_grainDuration * bufferSampleRate) : bufferLength;
    
    ASSERT(endFrame >= startFrame);
    if (endFrame < startFrame)
        return;
    
    unsigned deltaFrames = endFrame - startFrame;
    
    // This is a HACK to allow for HRTF tail-time - avoids glitch at end.
    // FIXME: implement tailTime for each AudioNode for a more general solution to this problem.
    if (m_isGrain)
        endFrame += 512;

    // Do some sanity checking.
    if (startFrame >= bufferLength)
        startFrame = !bufferLength ? 0 : bufferLength - 1;
    if (endFrame > bufferLength)
        endFrame = bufferLength;
    if (m_virtualReadIndex >= endFrame)
        m_virtualReadIndex = startFrame; // reset to start

    // Get pointers to the start of the sample buffer.
    float* sourceL = m_buffer->getChannelData(0)->data();
    float* sourceR = m_buffer->numberOfChannels() == 2 ? m_buffer->getChannelData(1)->data() : 0;
    
    double pitchRate = totalPitchRate();

    // Get local copy.
    double virtualReadIndex = m_virtualReadIndex;

    // Render loop - reading from the source buffer to the destination using linear interpolation.
    // FIXME: optimize for the very common case of playing back with pitchRate == 1.
    // We can avoid the linear interpolation.
    int framesToProcess = numberOfFrames;
    while (framesToProcess--) {
        unsigned readIndex = static_cast<unsigned>(virtualReadIndex);
        double interpolationFactor = virtualReadIndex - readIndex;
        
        // For linear interpolation we need the next sample-frame too.
        unsigned readIndex2 = readIndex + 1;
        if (readIndex2 >= endFrame) {
            if (loop()) {
                // Make sure to wrap around at the end of the buffer.
                readIndex2 -= deltaFrames;
            } else
                readIndex2 = readIndex;
        }

        // Final sanity check on buffer access.
        // FIXME: as an optimization, try to get rid of this inner-loop check and put assertions and guards before the loop.
        if (readIndex >= bufferLength || readIndex2 >= bufferLength)
            break;

        // Linear interpolation.
        double sampleL1 = sourceL[readIndex];
        double sampleL2 = sourceL[readIndex2];
        double sampleL = (1.0 - interpolationFactor) * sampleL1 + interpolationFactor * sampleL2;
        *destinationL++ = narrowPrecisionToFloat(sampleL);

        if (isStereo) {
            double sampleR1 = sourceR[readIndex];
            double sampleR2 = sourceR[readIndex2];
            double sampleR = (1.0 - interpolationFactor) * sampleR1 + interpolationFactor * sampleR2;
            *destinationR++ = narrowPrecisionToFloat(sampleR);
        }

        virtualReadIndex += pitchRate;

        // Wrap-around, retaining sub-sample position since virtualReadIndex is floating-point.
        if (virtualReadIndex >= endFrame) {
            virtualReadIndex -= deltaFrames;

            if (!loop()) {
                // If we're not looping, then stop playing when we get to the end.
                m_isPlaying = false;

                if (framesToProcess > 0) {
                    // We're not looping and we've reached the end of the sample data, but we still need to provide more output,
                    // so generate silence for the remaining.
                    memset(destinationL, 0, sizeof(float) * framesToProcess);

                    if (isStereo)
                        memset(destinationR, 0, sizeof(float) * framesToProcess);
                }

                finish();
                break;
            }
        }
    }
    
    m_virtualReadIndex = virtualReadIndex;
}
Esempio n. 11
0
String SVGTransformValue::valueAsString() const
{
    const String& prefix = transformTypePrefixForParsing(m_type);
    switch (m_type) {
    case SVG_TRANSFORM_UNKNOWN:
        return prefix;
    case SVG_TRANSFORM_MATRIX: {
        StringBuilder builder;
        builder.append(prefix);
        builder.appendNumber(m_matrix.a());
        builder.append(' ');
        builder.appendNumber(m_matrix.b());
        builder.append(' ');
        builder.appendNumber(m_matrix.c());
        builder.append(' ');
        builder.appendNumber(m_matrix.d());
        builder.append(' ');
        builder.appendNumber(m_matrix.e());
        builder.append(' ');
        builder.appendNumber(m_matrix.f());
        builder.append(')');
        return builder.toString();
    }
    case SVG_TRANSFORM_TRANSLATE: {
        StringBuilder builder;
        builder.append(prefix);
        builder.appendNumber(m_matrix.e());
        builder.append(' ');
        builder.appendNumber(m_matrix.f());
        builder.append(')');
        return builder.toString();
    }
    case SVG_TRANSFORM_SCALE: {
        StringBuilder builder;
        builder.append(prefix);
        builder.appendNumber(m_matrix.xScale());
        builder.append(' ');
        builder.appendNumber(m_matrix.yScale());
        builder.append(')');
        return builder.toString();
    }
    case SVG_TRANSFORM_ROTATE: {
        double angleInRad = deg2rad(m_angle);
        double cosAngle = std::cos(angleInRad);
        double sinAngle = std::sin(angleInRad);
        float cx = narrowPrecisionToFloat(cosAngle != 1 ? (m_matrix.e() * (1 - cosAngle) - m_matrix.f() * sinAngle) / (1 - cosAngle) / 2 : 0);
        float cy = narrowPrecisionToFloat(cosAngle != 1 ? (m_matrix.e() * sinAngle / (1 - cosAngle) + m_matrix.f()) / 2 : 0);
        StringBuilder builder;
        builder.append(prefix);
        builder.appendNumber(m_angle);
        if (cx || cy) {
            builder.append(' ');
            builder.appendNumber(cx);
            builder.append(' ');
            builder.appendNumber(cy);
        }
        builder.append(')');
        return builder.toString();
    }
    case SVG_TRANSFORM_SKEWX:
    case SVG_TRANSFORM_SKEWY: {
        StringBuilder builder;
        builder.append(prefix);
        builder.appendNumber(m_angle);
        builder.append(')');
        return builder.toString();
    }
    }

    ASSERT_NOT_REACHED();
    return emptyString();
}
Esempio n. 12
0
PassRefPtr<AnimatableValue> AnimatableSVGLength::interpolateTo(const AnimatableValue* value, double fraction) const
{
    return create(toAnimatableSVGLength(value)->toSVGLength().blend(m_length, narrowPrecisionToFloat(fraction)));
}
Esempio n. 13
0
bool SVGTransformable::parseTransformAttribute(SVGTransformList* list, const AtomicString& transform)
{
    double x[] = {0, 0, 0, 0, 0, 0};
    int nr = 0, required = 0, optional = 0;
    
    const UChar* currTransform = transform.characters();
    const UChar* end = currTransform + transform.length();

    bool delimParsed = false;
    while (currTransform < end) {
        delimParsed = false;
        unsigned short type = SVGTransform::SVG_TRANSFORM_UNKNOWN;
        skipOptionalSpaces(currTransform, end);
        
        if (currTransform >= end)
            return false;
        
        if (*currTransform == 's') {
            if (skipString(currTransform, end, skewXDesc, sizeof(skewXDesc) / sizeof(UChar))) {
                required = 1;
                optional = 0;
                type = SVGTransform::SVG_TRANSFORM_SKEWX;
            } else if (skipString(currTransform, end, skewYDesc, sizeof(skewYDesc) / sizeof(UChar))) {
                required = 1;
                optional = 0;
                type = SVGTransform::SVG_TRANSFORM_SKEWY;
            } else if (skipString(currTransform, end, scaleDesc, sizeof(scaleDesc) / sizeof(UChar))) {
                required = 1;
                optional = 1;
                type = SVGTransform::SVG_TRANSFORM_SCALE;
            } else
                return false;
        } else if (skipString(currTransform, end, translateDesc, sizeof(translateDesc) / sizeof(UChar))) {
            required = 1;
            optional = 1;
            type = SVGTransform::SVG_TRANSFORM_TRANSLATE;
        } else if (skipString(currTransform, end, rotateDesc, sizeof(rotateDesc) / sizeof(UChar))) {
            required = 1;
            optional = 2;
            type = SVGTransform::SVG_TRANSFORM_ROTATE;
        } else if (skipString(currTransform, end, matrixDesc, sizeof(matrixDesc) / sizeof(UChar))) {
            required = 6;
            optional = 0;
            type = SVGTransform::SVG_TRANSFORM_MATRIX;
        } else 
            return false;

        if ((nr = parseTransformParamList(currTransform, end, x, required, optional)) < 0)
            return false;

        SVGTransform t;

        switch (type) {
            case SVGTransform::SVG_TRANSFORM_SKEWX:
               t.setSkewX(narrowPrecisionToFloat(x[0]));
                break;
            case SVGTransform::SVG_TRANSFORM_SKEWY:
               t.setSkewY(narrowPrecisionToFloat(x[0]));
                break;
            case SVGTransform::SVG_TRANSFORM_SCALE:
                  if (nr == 1) // Spec: if only one param given, assume uniform scaling
                      t.setScale(narrowPrecisionToFloat(x[0]), narrowPrecisionToFloat(x[0]));
                  else
                      t.setScale(narrowPrecisionToFloat(x[0]), narrowPrecisionToFloat(x[1]));
                break;
            case SVGTransform::SVG_TRANSFORM_TRANSLATE:
                  if (nr == 1) // Spec: if only one param given, assume 2nd param to be 0
                      t.setTranslate(narrowPrecisionToFloat(x[0]), 0);
                  else
                      t.setTranslate(narrowPrecisionToFloat(x[0]), narrowPrecisionToFloat(x[1]));
                break;
            case SVGTransform::SVG_TRANSFORM_ROTATE:
                  if (nr == 1)
                      t.setRotate(narrowPrecisionToFloat(x[0]), 0, 0);
                  else
                      t.setRotate(narrowPrecisionToFloat(x[0]), narrowPrecisionToFloat(x[1]), narrowPrecisionToFloat(x[2]));
                break;
            case SVGTransform::SVG_TRANSFORM_MATRIX:
                t.setMatrix(AffineTransform(x[0], x[1], x[2], x[3], x[4], x[5]));
                break;
        }

        ExceptionCode ec = 0;
        list->appendItem(t, ec);
        skipOptionalSpaces(currTransform, end);
        if (currTransform < end && *currTransform == ',') {
            delimParsed = true;
            currTransform++;
        }
        skipOptionalSpaces(currTransform, end);
    }

    return !delimParsed;
}
Esempio n. 14
0
void Image::drawPattern(GraphicsContext* ctxt, const FloatRect& tileRect, const TransformationMatrix& patternTransform,
                        const FloatPoint& phase, CompositeOperator op, const FloatRect& destRect)
{
    if (!nativeImageForCurrentFrame())
        return;

    ASSERT(patternTransform.isInvertible());
    if (!patternTransform.isInvertible())
        // Avoid a hang under CGContextDrawTiledImage on release builds.
        return;

    CGContextRef context = ctxt->platformContext();
    ctxt->save();
    CGContextClipToRect(context, destRect);
    ctxt->setCompositeOperation(op);
    CGContextTranslateCTM(context, destRect.x(), destRect.y() + destRect.height());
    CGContextScaleCTM(context, 1, -1);
    
    // Compute the scaled tile size.
    float scaledTileHeight = tileRect.height() * narrowPrecisionToFloat(patternTransform.d());
    
    // We have to adjust the phase to deal with the fact we're in Cartesian space now (with the bottom left corner of destRect being
    // the origin).
    float adjustedX = phase.x() - destRect.x() + tileRect.x() * narrowPrecisionToFloat(patternTransform.a()); // We translated the context so that destRect.x() is the origin, so subtract it out.
    float adjustedY = destRect.height() - (phase.y() - destRect.y() + tileRect.y() * narrowPrecisionToFloat(patternTransform.d()) + scaledTileHeight);

    CGImageRef tileImage = nativeImageForCurrentFrame();
    float h = CGImageGetHeight(tileImage);

    CGImageRef subImage;
    if (tileRect.size() == size())
        subImage = tileImage;
    else {
        // Copying a sub-image out of a partially-decoded image stops the decoding of the original image. It should never happen
        // because sub-images are only used for border-image, which only renders when the image is fully decoded.
        ASSERT(h == height());
        subImage = CGImageCreateWithImageInRect(tileImage, tileRect);
    }
    
#ifndef BUILDING_ON_TIGER
    // Leopard has an optimized call for the tiling of image patterns, but we can only use it if the image has been decoded enough that
    // its buffer is the same size as the overall image.  Because a partially decoded CGImageRef with a smaller width or height than the
    // overall image buffer needs to tile with "gaps", we can't use the optimized tiling call in that case.
    // FIXME: Could create WebKitSystemInterface SPI for CGCreatePatternWithImage2 and probably make Tiger tile faster as well.
    // FIXME: We cannot use CGContextDrawTiledImage with scaled tiles on Leopard, because it suffers from rounding errors.  Snow Leopard is ok.
    float scaledTileWidth = tileRect.width() * narrowPrecisionToFloat(patternTransform.a());
    float w = CGImageGetWidth(tileImage);
#ifdef BUILDING_ON_LEOPARD
    if (w == size().width() && h == size().height() && scaledTileWidth == tileRect.width() && scaledTileHeight == tileRect.height())
#else
    if (w == size().width() && h == size().height())
#endif
        CGContextDrawTiledImage(context, FloatRect(adjustedX, adjustedY, scaledTileWidth, scaledTileHeight), subImage);
    else {
#endif

    // On Leopard, this code now only runs for partially decoded images whose buffers do not yet match the overall size of the image.
    // On Tiger this code runs all the time.  This code is suboptimal because the pattern does not reference the image directly, and the
    // pattern is destroyed before exiting the function.  This means any decoding the pattern does doesn't end up cached anywhere, so we
    // redecode every time we paint.
    static const CGPatternCallbacks patternCallbacks = { 0, drawPatternCallback, NULL };
    CGAffineTransform matrix = CGAffineTransformMake(narrowPrecisionToCGFloat(patternTransform.a()), 0, 0, narrowPrecisionToCGFloat(patternTransform.d()), adjustedX, adjustedY);
    matrix = CGAffineTransformConcat(matrix, CGContextGetCTM(context));
    // The top of a partially-decoded image is drawn at the bottom of the tile. Map it to the top.
    matrix = CGAffineTransformTranslate(matrix, 0, size().height() - h);
    CGPatternRef pattern = CGPatternCreate(subImage, CGRectMake(0, 0, tileRect.width(), tileRect.height()),
                                           matrix, tileRect.width(), tileRect.height(), 
                                           kCGPatternTilingConstantSpacing, true, &patternCallbacks);
    if (pattern == NULL) {
        if (subImage != tileImage)
            CGImageRelease(subImage);
        ctxt->restore();
        return;
    }

    CGColorSpaceRef patternSpace = CGColorSpaceCreatePattern(NULL);
    
    CGFloat alpha = 1;
    CGColorRef color = CGColorCreateWithPattern(patternSpace, pattern, &alpha);
    CGContextSetFillColorSpace(context, patternSpace);
    CGColorSpaceRelease(patternSpace);
    CGPatternRelease(pattern);

    // FIXME: Really want a public API for this.  It is just CGContextSetBaseCTM(context, CGAffineTransformIdentiy).
    wkSetPatternBaseCTM(context, CGAffineTransformIdentity);
    CGContextSetPatternPhase(context, CGSizeZero);

    CGContextSetFillColorWithColor(context, color);
    CGContextFillRect(context, CGContextGetClipBoundingBox(context));
    
    CGColorRelease(color);
    
#ifndef BUILDING_ON_TIGER
    }
#endif

    if (subImage != tileImage)
        CGImageRelease(subImage);
    ctxt->restore();

    if (imageObserver())
        imageObserver()->didDraw(this);
}
Esempio n. 15
0
PassOwnPtr<AudioBus> AudioFileReader::createBus(float sampleRate, bool mixToMono)
{
    if (!m_extAudioFileRef)
        return nullptr;

    // Get file's data format
    UInt32 size = sizeof(m_fileDataFormat);
    OSStatus result = ExtAudioFileGetProperty(m_extAudioFileRef, kExtAudioFileProperty_FileDataFormat, &size, &m_fileDataFormat);
    if (result != noErr)
        return nullptr;

    // Number of channels
    size_t numberOfChannels = m_fileDataFormat.mChannelsPerFrame;

    // Number of frames
    SInt64 numberOfFrames64 = 0;
    size = sizeof(numberOfFrames64);
    result = ExtAudioFileGetProperty(m_extAudioFileRef, kExtAudioFileProperty_FileLengthFrames, &size, &numberOfFrames64);
    if (result != noErr)
        return nullptr;

    // Sample-rate
    double fileSampleRate = m_fileDataFormat.mSampleRate;

    // Make client format same number of channels as file format, but tweak a few things.
    // Client format will be linear PCM (canonical), and potentially change sample-rate.
    m_clientDataFormat = m_fileDataFormat;

    m_clientDataFormat.mFormatID = kAudioFormatLinearPCM;
    m_clientDataFormat.mFormatFlags = kAudioFormatFlagsCanonical;
    m_clientDataFormat.mBitsPerChannel = 8 * sizeof(AudioSampleType);
    m_clientDataFormat.mChannelsPerFrame = numberOfChannels;
    m_clientDataFormat.mFramesPerPacket = 1;
    m_clientDataFormat.mBytesPerPacket = sizeof(AudioSampleType);
    m_clientDataFormat.mBytesPerFrame = sizeof(AudioSampleType);
    m_clientDataFormat.mFormatFlags |= kAudioFormatFlagIsNonInterleaved;

    if (sampleRate)
        m_clientDataFormat.mSampleRate = sampleRate;

    result = ExtAudioFileSetProperty(m_extAudioFileRef, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &m_clientDataFormat);
    if (result != noErr)
        return nullptr;

    // Change numberOfFrames64 to destination sample-rate
    numberOfFrames64 = numberOfFrames64 * (m_clientDataFormat.mSampleRate / fileSampleRate);
    size_t numberOfFrames = static_cast<size_t>(numberOfFrames64);

    size_t busChannelCount = mixToMono ? 1 : numberOfChannels;

    // Create AudioBus where we'll put the PCM audio data
    OwnPtr<AudioBus> audioBus = adoptPtr(new AudioBus(busChannelCount, numberOfFrames));
    audioBus->setSampleRate(narrowPrecisionToFloat(m_clientDataFormat.mSampleRate)); // save for later

    // Only allocated in the mixToMono case
    AudioFloatArray bufL;
    AudioFloatArray bufR;
    float* bufferL = 0;
    float* bufferR = 0;
    
    // Setup AudioBufferList in preparation for reading
    AudioBufferList* bufferList = createAudioBufferList(numberOfChannels);

    if (mixToMono && numberOfChannels == 2) {
        bufL.allocate(numberOfFrames);
        bufR.allocate(numberOfFrames);
        bufferL = bufL.data();
        bufferR = bufR.data();

        bufferList->mBuffers[0].mNumberChannels = 1;
        bufferList->mBuffers[0].mDataByteSize = numberOfFrames * sizeof(float);
        bufferList->mBuffers[0].mData = bufferL;

        bufferList->mBuffers[1].mNumberChannels = 1;
        bufferList->mBuffers[1].mDataByteSize = numberOfFrames * sizeof(float);
        bufferList->mBuffers[1].mData = bufferR;
    } else {
        ASSERT(!mixToMono || numberOfChannels == 1);

        // for True-stereo (numberOfChannels == 4)
        for (size_t i = 0; i < numberOfChannels; ++i) {
            bufferList->mBuffers[i].mNumberChannels = 1;
            bufferList->mBuffers[i].mDataByteSize = numberOfFrames * sizeof(float);
            bufferList->mBuffers[i].mData = audioBus->channel(i)->mutableData();
        }
    }

    // Read from the file (or in-memory version)
    UInt32 framesToRead = numberOfFrames;
    result = ExtAudioFileRead(m_extAudioFileRef, &framesToRead, bufferList);
    if (result != noErr) {
        destroyAudioBufferList(bufferList);
        return nullptr;
    }

    if (mixToMono && numberOfChannels == 2) {
        // Mix stereo down to mono
        float* destL = audioBus->channel(0)->mutableData();
        for (size_t i = 0; i < numberOfFrames; i++)
            destL[i] = 0.5f * (bufferL[i] + bufferR[i]);
    }

    // Cleanup
    destroyAudioBufferList(bufferList);

    return audioBus.release();
}
Esempio n. 16
0
FloatPoint FloatPoint::narrowPrecision(double x, double y)
{
    return FloatPoint(narrowPrecisionToFloat(x), narrowPrecisionToFloat(y));
}
float charactersToFloat(const UChar* data, size_t length, bool* ok)
{
    // FIXME: This will return ok even when the string fits into a double but not a float.
    return narrowPrecisionToFloat(charactersToDouble(data, length, ok));
}
Esempio n. 18
0
float SVGAnimationElement::getCurrentTime() const
{
    return narrowPrecisionToFloat(elapsed().value());
}
Esempio n. 19
0
float SVGSVGElement::getCurrentTime() const
{
    ASSERT(RuntimeEnabledFeatures::smilEnabled());
    return narrowPrecisionToFloat(m_timeContainer->elapsed().value());
}
static inline void copyTransform(CATransform3D& toT3D, const TransformationMatrix& t)
{
    toT3D.m11 = narrowPrecisionToFloat(t.m11());
    toT3D.m12 = narrowPrecisionToFloat(t.m12());
    toT3D.m13 = narrowPrecisionToFloat(t.m13());
    toT3D.m14 = narrowPrecisionToFloat(t.m14());
    toT3D.m21 = narrowPrecisionToFloat(t.m21());
    toT3D.m22 = narrowPrecisionToFloat(t.m22());
    toT3D.m23 = narrowPrecisionToFloat(t.m23());
    toT3D.m24 = narrowPrecisionToFloat(t.m24());
    toT3D.m31 = narrowPrecisionToFloat(t.m31());
    toT3D.m32 = narrowPrecisionToFloat(t.m32());
    toT3D.m33 = narrowPrecisionToFloat(t.m33());
    toT3D.m34 = narrowPrecisionToFloat(t.m34());
    toT3D.m41 = narrowPrecisionToFloat(t.m41());
    toT3D.m42 = narrowPrecisionToFloat(t.m42());
    toT3D.m43 = narrowPrecisionToFloat(t.m43());
    toT3D.m44 = narrowPrecisionToFloat(t.m44());
}
bool FilterEffectRenderer::build(RenderElement* renderer, const FilterOperations& operations, FilterConsumer consumer)
{
    m_hasFilterThatMovesPixels = operations.hasFilterThatMovesPixels();
    if (m_hasFilterThatMovesPixels)
        m_outsets = operations.outsets();

    m_effects.clear();

    RefPtr<FilterEffect> previousEffect = m_sourceGraphic;
    for (size_t i = 0; i < operations.operations().size(); ++i) {
        RefPtr<FilterEffect> effect;
        FilterOperation& filterOperation = *operations.operations().at(i);
        switch (filterOperation.type()) {
        case FilterOperation::REFERENCE: {
            ReferenceFilterOperation& referenceOperation = downcast<ReferenceFilterOperation>(filterOperation);
            effect = buildReferenceFilter(renderer, previousEffect, &referenceOperation);
            referenceOperation.setFilterEffect(effect);
            break;
        }
        case FilterOperation::GRAYSCALE: {
            BasicColorMatrixFilterOperation& colorMatrixOperation = downcast<BasicColorMatrixFilterOperation>(filterOperation);
            Vector<float> inputParameters;
            double oneMinusAmount = clampTo(1 - colorMatrixOperation.amount(), 0.0, 1.0);

            // See https://dvcs.w3.org/hg/FXTF/raw-file/tip/filters/index.html#grayscaleEquivalent
            // for information on parameters.

            inputParameters.append(narrowPrecisionToFloat(0.2126 + 0.7874 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.7152 - 0.7152 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.0722 - 0.0722 * oneMinusAmount));
            endMatrixRow(inputParameters);

            inputParameters.append(narrowPrecisionToFloat(0.2126 - 0.2126 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.7152 + 0.2848 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.0722 - 0.0722 * oneMinusAmount));
            endMatrixRow(inputParameters);

            inputParameters.append(narrowPrecisionToFloat(0.2126 - 0.2126 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.7152 - 0.7152 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.0722 + 0.9278 * oneMinusAmount));
            endMatrixRow(inputParameters);

            lastMatrixRow(inputParameters);

            effect = FEColorMatrix::create(*this, FECOLORMATRIX_TYPE_MATRIX, inputParameters);
            break;
        }
        case FilterOperation::SEPIA: {
            BasicColorMatrixFilterOperation& colorMatrixOperation = downcast<BasicColorMatrixFilterOperation>(filterOperation);
            Vector<float> inputParameters;
            double oneMinusAmount = clampTo(1 - colorMatrixOperation.amount(), 0.0, 1.0);

            // See https://dvcs.w3.org/hg/FXTF/raw-file/tip/filters/index.html#sepiaEquivalent
            // for information on parameters.

            inputParameters.append(narrowPrecisionToFloat(0.393 + 0.607 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.769 - 0.769 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.189 - 0.189 * oneMinusAmount));
            endMatrixRow(inputParameters);

            inputParameters.append(narrowPrecisionToFloat(0.349 - 0.349 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.686 + 0.314 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.168 - 0.168 * oneMinusAmount));
            endMatrixRow(inputParameters);

            inputParameters.append(narrowPrecisionToFloat(0.272 - 0.272 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.534 - 0.534 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.131 + 0.869 * oneMinusAmount));
            endMatrixRow(inputParameters);

            lastMatrixRow(inputParameters);

            effect = FEColorMatrix::create(*this, FECOLORMATRIX_TYPE_MATRIX, inputParameters);
            break;
        }
        case FilterOperation::SATURATE: {
            BasicColorMatrixFilterOperation& colorMatrixOperation = downcast<BasicColorMatrixFilterOperation>(filterOperation);
            Vector<float> inputParameters;
            inputParameters.append(narrowPrecisionToFloat(colorMatrixOperation.amount()));
            effect = FEColorMatrix::create(*this, FECOLORMATRIX_TYPE_SATURATE, inputParameters);
            break;
        }
        case FilterOperation::HUE_ROTATE: {
            BasicColorMatrixFilterOperation& colorMatrixOperation = downcast<BasicColorMatrixFilterOperation>(filterOperation);
            Vector<float> inputParameters;
            inputParameters.append(narrowPrecisionToFloat(colorMatrixOperation.amount()));
            effect = FEColorMatrix::create(*this, FECOLORMATRIX_TYPE_HUEROTATE, inputParameters);
            break;
        }
        case FilterOperation::INVERT: {
            BasicComponentTransferFilterOperation& componentTransferOperation = downcast<BasicComponentTransferFilterOperation>(filterOperation);
            ComponentTransferFunction transferFunction;
            transferFunction.type = FECOMPONENTTRANSFER_TYPE_TABLE;
            Vector<float> transferParameters;
            transferParameters.append(narrowPrecisionToFloat(componentTransferOperation.amount()));
            transferParameters.append(narrowPrecisionToFloat(1 - componentTransferOperation.amount()));
            transferFunction.tableValues = transferParameters;

            ComponentTransferFunction nullFunction;
            effect = FEComponentTransfer::create(*this, transferFunction, transferFunction, transferFunction, nullFunction);
            break;
        }
        case FilterOperation::OPACITY: {
            BasicComponentTransferFilterOperation& componentTransferOperation = downcast<BasicComponentTransferFilterOperation>(filterOperation);
            ComponentTransferFunction transferFunction;
            transferFunction.type = FECOMPONENTTRANSFER_TYPE_TABLE;
            Vector<float> transferParameters;
            transferParameters.append(0);
            transferParameters.append(narrowPrecisionToFloat(componentTransferOperation.amount()));
            transferFunction.tableValues = transferParameters;

            ComponentTransferFunction nullFunction;
            effect = FEComponentTransfer::create(*this, nullFunction, nullFunction, nullFunction, transferFunction);
            break;
        }
        case FilterOperation::BRIGHTNESS: {
            BasicComponentTransferFilterOperation& componentTransferOperation = downcast<BasicComponentTransferFilterOperation>(filterOperation);
            ComponentTransferFunction transferFunction;
            transferFunction.type = FECOMPONENTTRANSFER_TYPE_LINEAR;
            transferFunction.slope = narrowPrecisionToFloat(componentTransferOperation.amount());
            transferFunction.intercept = 0;

            ComponentTransferFunction nullFunction;
            effect = FEComponentTransfer::create(*this, transferFunction, transferFunction, transferFunction, nullFunction);
            break;
        }
        case FilterOperation::CONTRAST: {
            BasicComponentTransferFilterOperation& componentTransferOperation = downcast<BasicComponentTransferFilterOperation>(filterOperation);
            ComponentTransferFunction transferFunction;
            transferFunction.type = FECOMPONENTTRANSFER_TYPE_LINEAR;
            float amount = narrowPrecisionToFloat(componentTransferOperation.amount());
            transferFunction.slope = amount;
            transferFunction.intercept = -0.5 * amount + 0.5;
            
            ComponentTransferFunction nullFunction;
            effect = FEComponentTransfer::create(*this, transferFunction, transferFunction, transferFunction, nullFunction);
            break;
        }
        case FilterOperation::BLUR: {
            BlurFilterOperation& blurOperation = downcast<BlurFilterOperation>(filterOperation);
            float stdDeviation = floatValueForLength(blurOperation.stdDeviation(), 0);
            effect = FEGaussianBlur::create(*this, stdDeviation, stdDeviation, consumer == FilterProperty ? EDGEMODE_NONE : EDGEMODE_DUPLICATE);
            break;
        }
        case FilterOperation::DROP_SHADOW: {
            DropShadowFilterOperation& dropShadowOperation = downcast<DropShadowFilterOperation>(filterOperation);
            effect = FEDropShadow::create(*this, dropShadowOperation.stdDeviation(), dropShadowOperation.stdDeviation(),
                dropShadowOperation.x(), dropShadowOperation.y(), dropShadowOperation.color(), 1);
            break;
        }
        default:
            break;
        }

        if (effect) {
            // Unlike SVG Filters and CSSFilterImages, filter functions on the filter
            // property applied here should not clip to their primitive subregions.
            effect->setClipsToBounds(consumer == FilterFunction);
            effect->setOperatingColorSpace(ColorSpaceDeviceRGB);
            
            if (filterOperation.type() != FilterOperation::REFERENCE) {
                effect->inputEffects().append(previousEffect);
                m_effects.append(effect);
            }
            previousEffect = effect.release();
        }
    }

    // If we didn't make any effects, tell our caller we are not valid
    if (!m_effects.size())
        return false;

    setMaxEffectRects(m_sourceDrawingRegion);
    
    return true;
}
Esempio n. 22
0
float AudioParam::smoothedValue()
{
    return narrowPrecisionToFloat(m_smoothedValue);
}
bool FilterEffectBuilder::build(Element* element, const FilterOperations& operations, float zoom)
{
    // Create a parent filter for shorthand filters. These have already been scaled by the CSS code for page zoom, so scale is 1.0 here.
    RefPtrWillBeRawPtr<ReferenceFilter> parentFilter = ReferenceFilter::create(1.0f);
    RefPtrWillBeRawPtr<FilterEffect> previousEffect = SourceGraphic::create(parentFilter.get());
    for (size_t i = 0; i < operations.operations().size(); ++i) {
        RefPtrWillBeRawPtr<FilterEffect> effect = nullptr;
        FilterOperation* filterOperation = operations.operations().at(i).get();
        switch (filterOperation->type()) {
        case FilterOperation::REFERENCE: {
            RefPtrWillBeRawPtr<ReferenceFilter> referenceFilter = ReferenceFilterBuilder::build(zoom, element, previousEffect.get(), toReferenceFilterOperation(*filterOperation));
            if (referenceFilter) {
                effect = referenceFilter->lastEffect();
                m_referenceFilters.append(referenceFilter);
            }
            break;
        }
        case FilterOperation::GRAYSCALE: {
            Vector<float> inputParameters;
            double oneMinusAmount = clampTo(1 - toBasicColorMatrixFilterOperation(filterOperation)->amount(), 0.0, 1.0);

            // See https://dvcs.w3.org/hg/FXTF/raw-file/tip/filters/index.html#grayscaleEquivalent
            // for information on parameters.

            inputParameters.append(narrowPrecisionToFloat(0.2126 + 0.7874 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.7152 - 0.7152 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.0722 - 0.0722 * oneMinusAmount));
            endMatrixRow(inputParameters);

            inputParameters.append(narrowPrecisionToFloat(0.2126 - 0.2126 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.7152 + 0.2848 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.0722 - 0.0722 * oneMinusAmount));
            endMatrixRow(inputParameters);

            inputParameters.append(narrowPrecisionToFloat(0.2126 - 0.2126 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.7152 - 0.7152 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.0722 + 0.9278 * oneMinusAmount));
            endMatrixRow(inputParameters);

            lastMatrixRow(inputParameters);

            effect = FEColorMatrix::create(parentFilter.get(), FECOLORMATRIX_TYPE_MATRIX, inputParameters);
            break;
        }
        case FilterOperation::SEPIA: {
            Vector<float> inputParameters;
            double oneMinusAmount = clampTo(1 - toBasicColorMatrixFilterOperation(filterOperation)->amount(), 0.0, 1.0);

            // See https://dvcs.w3.org/hg/FXTF/raw-file/tip/filters/index.html#sepiaEquivalent
            // for information on parameters.

            inputParameters.append(narrowPrecisionToFloat(0.393 + 0.607 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.769 - 0.769 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.189 - 0.189 * oneMinusAmount));
            endMatrixRow(inputParameters);

            inputParameters.append(narrowPrecisionToFloat(0.349 - 0.349 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.686 + 0.314 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.168 - 0.168 * oneMinusAmount));
            endMatrixRow(inputParameters);

            inputParameters.append(narrowPrecisionToFloat(0.272 - 0.272 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.534 - 0.534 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.131 + 0.869 * oneMinusAmount));
            endMatrixRow(inputParameters);

            lastMatrixRow(inputParameters);

            effect = FEColorMatrix::create(parentFilter.get(), FECOLORMATRIX_TYPE_MATRIX, inputParameters);
            break;
        }
        case FilterOperation::SATURATE: {
            Vector<float> inputParameters;
            inputParameters.append(narrowPrecisionToFloat(toBasicColorMatrixFilterOperation(filterOperation)->amount()));
            effect = FEColorMatrix::create(parentFilter.get(), FECOLORMATRIX_TYPE_SATURATE, inputParameters);
            break;
        }
        case FilterOperation::HUE_ROTATE: {
            Vector<float> inputParameters;
            inputParameters.append(narrowPrecisionToFloat(toBasicColorMatrixFilterOperation(filterOperation)->amount()));
            effect = FEColorMatrix::create(parentFilter.get(), FECOLORMATRIX_TYPE_HUEROTATE, inputParameters);
            break;
        }
        case FilterOperation::INVERT: {
            BasicComponentTransferFilterOperation* componentTransferOperation = toBasicComponentTransferFilterOperation(filterOperation);
            ComponentTransferFunction transferFunction;
            transferFunction.type = FECOMPONENTTRANSFER_TYPE_TABLE;
            Vector<float> transferParameters;
            transferParameters.append(narrowPrecisionToFloat(componentTransferOperation->amount()));
            transferParameters.append(narrowPrecisionToFloat(1 - componentTransferOperation->amount()));
            transferFunction.tableValues = transferParameters;

            ComponentTransferFunction nullFunction;
            effect = FEComponentTransfer::create(parentFilter.get(), transferFunction, transferFunction, transferFunction, nullFunction);
            break;
        }
        case FilterOperation::OPACITY: {
            ComponentTransferFunction transferFunction;
            transferFunction.type = FECOMPONENTTRANSFER_TYPE_TABLE;
            Vector<float> transferParameters;
            transferParameters.append(0);
            transferParameters.append(narrowPrecisionToFloat(toBasicComponentTransferFilterOperation(filterOperation)->amount()));
            transferFunction.tableValues = transferParameters;

            ComponentTransferFunction nullFunction;
            effect = FEComponentTransfer::create(parentFilter.get(), nullFunction, nullFunction, nullFunction, transferFunction);
            break;
        }
        case FilterOperation::BRIGHTNESS: {
            ComponentTransferFunction transferFunction;
            transferFunction.type = FECOMPONENTTRANSFER_TYPE_LINEAR;
            transferFunction.slope = narrowPrecisionToFloat(toBasicComponentTransferFilterOperation(filterOperation)->amount());
            transferFunction.intercept = 0;

            ComponentTransferFunction nullFunction;
            effect = FEComponentTransfer::create(parentFilter.get(), transferFunction, transferFunction, transferFunction, nullFunction);
            break;
        }
        case FilterOperation::CONTRAST: {
            ComponentTransferFunction transferFunction;
            transferFunction.type = FECOMPONENTTRANSFER_TYPE_LINEAR;
            float amount = narrowPrecisionToFloat(toBasicComponentTransferFilterOperation(filterOperation)->amount());
            transferFunction.slope = amount;
            transferFunction.intercept = -0.5 * amount + 0.5;

            ComponentTransferFunction nullFunction;
            effect = FEComponentTransfer::create(parentFilter.get(), transferFunction, transferFunction, transferFunction, nullFunction);
            break;
        }
        case FilterOperation::BLUR: {
            float stdDeviation = floatValueForLength(toBlurFilterOperation(filterOperation)->stdDeviation(), 0);
            effect = FEGaussianBlur::create(parentFilter.get(), stdDeviation, stdDeviation);
            break;
        }
        case FilterOperation::DROP_SHADOW: {
            DropShadowFilterOperation* dropShadowOperation = toDropShadowFilterOperation(filterOperation);
            float stdDeviation = dropShadowOperation->stdDeviation();
            float x = dropShadowOperation->x();
            float y = dropShadowOperation->y();
            effect = FEDropShadow::create(parentFilter.get(), stdDeviation, stdDeviation, x, y, dropShadowOperation->color(), 1);
            break;
        }
        default:
            break;
        }

        if (effect) {
            if (filterOperation->type() != FilterOperation::REFERENCE) {
                // Unlike SVG, filters applied here should not clip to their primitive subregions.
                effect->setClipsToBounds(false);
                effect->setOperatingColorSpace(ColorSpaceDeviceRGB);
                effect->inputEffects().append(previousEffect);
            }
            previousEffect = effect.release();
        }
    }

    m_referenceFilters.append(parentFilter);

    // We need to keep the old effects alive until this point, so that SVG reference filters
    // can share cached resources across frames.
    m_lastEffect = previousEffect;

    // If we didn't make any effects, tell our caller we are not valid
    if (!m_lastEffect.get())
        return false;

    return true;
}
Esempio n. 24
0
FloatRect FloatRect::narrowPrecision(double x, double y, double width, double height)
{
    return FloatRect(narrowPrecisionToFloat(x), narrowPrecisionToFloat(y), narrowPrecisionToFloat(width), narrowPrecisionToFloat(height));
}
bool FilterEffectRenderer::build(RenderObject* renderer, const FilterOperations& operations)
{
    m_hasCustomShaderFilter = false;
    m_hasFilterThatMovesPixels = operations.hasFilterThatMovesPixels();

    // Inverse zoom the pre-zoomed CSS shorthand filters, so that they are in the same zoom as the unzoomed reference filters.
    const RenderStyle* style = renderer->style();
    // FIXME: The effects now contain high dpi information, but the software path doesn't (yet) scale its backing.
    //        When the proper dpi dependant backing size is allocated, we should remove deviceScaleFactor(...) here.
    float invZoom = 1.0f / ((style ? style->effectiveZoom() : 1.0f) * deviceScaleFactor(renderer->frame()));

    RefPtr<FilterEffect> previousEffect = m_sourceGraphic;
    for (size_t i = 0; i < operations.operations().size(); ++i) {
        RefPtr<FilterEffect> effect;
        FilterOperation* filterOperation = operations.operations().at(i).get();
        switch (filterOperation->getOperationType()) {
        case FilterOperation::REFERENCE: {
            ReferenceFilterOperation* referenceOperation = static_cast<ReferenceFilterOperation*>(filterOperation);
            effect = ReferenceFilterBuilder::build(this, renderer, previousEffect.get(), referenceOperation);
            break;
        }
        case FilterOperation::GRAYSCALE: {
            BasicColorMatrixFilterOperation* colorMatrixOperation = static_cast<BasicColorMatrixFilterOperation*>(filterOperation);
            Vector<float> inputParameters;
            double oneMinusAmount = clampTo(1 - colorMatrixOperation->amount(), 0.0, 1.0);

            // See https://dvcs.w3.org/hg/FXTF/raw-file/tip/filters/index.html#grayscaleEquivalent
            // for information on parameters.

            inputParameters.append(narrowPrecisionToFloat(0.2126 + 0.7874 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.7152 - 0.7152 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.0722 - 0.0722 * oneMinusAmount));
            endMatrixRow(inputParameters);

            inputParameters.append(narrowPrecisionToFloat(0.2126 - 0.2126 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.7152 + 0.2848 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.0722 - 0.0722 * oneMinusAmount));
            endMatrixRow(inputParameters);

            inputParameters.append(narrowPrecisionToFloat(0.2126 - 0.2126 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.7152 - 0.7152 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.0722 + 0.9278 * oneMinusAmount));
            endMatrixRow(inputParameters);

            lastMatrixRow(inputParameters);

            effect = FEColorMatrix::create(this, FECOLORMATRIX_TYPE_MATRIX, inputParameters);
            break;
        }
        case FilterOperation::SEPIA: {
            BasicColorMatrixFilterOperation* colorMatrixOperation = static_cast<BasicColorMatrixFilterOperation*>(filterOperation);
            Vector<float> inputParameters;
            double oneMinusAmount = clampTo(1 - colorMatrixOperation->amount(), 0.0, 1.0);

            // See https://dvcs.w3.org/hg/FXTF/raw-file/tip/filters/index.html#sepiaEquivalent
            // for information on parameters.

            inputParameters.append(narrowPrecisionToFloat(0.393 + 0.607 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.769 - 0.769 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.189 - 0.189 * oneMinusAmount));
            endMatrixRow(inputParameters);

            inputParameters.append(narrowPrecisionToFloat(0.349 - 0.349 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.686 + 0.314 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.168 - 0.168 * oneMinusAmount));
            endMatrixRow(inputParameters);

            inputParameters.append(narrowPrecisionToFloat(0.272 - 0.272 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.534 - 0.534 * oneMinusAmount));
            inputParameters.append(narrowPrecisionToFloat(0.131 + 0.869 * oneMinusAmount));
            endMatrixRow(inputParameters);

            lastMatrixRow(inputParameters);

            effect = FEColorMatrix::create(this, FECOLORMATRIX_TYPE_MATRIX, inputParameters);
            break;
        }
        case FilterOperation::SATURATE: {
            BasicColorMatrixFilterOperation* colorMatrixOperation = static_cast<BasicColorMatrixFilterOperation*>(filterOperation);
            Vector<float> inputParameters;
            inputParameters.append(narrowPrecisionToFloat(colorMatrixOperation->amount()));
            effect = FEColorMatrix::create(this, FECOLORMATRIX_TYPE_SATURATE, inputParameters);
            break;
        }
        case FilterOperation::HUE_ROTATE: {
            BasicColorMatrixFilterOperation* colorMatrixOperation = static_cast<BasicColorMatrixFilterOperation*>(filterOperation);
            Vector<float> inputParameters;
            inputParameters.append(narrowPrecisionToFloat(colorMatrixOperation->amount()));
            effect = FEColorMatrix::create(this, FECOLORMATRIX_TYPE_HUEROTATE, inputParameters);
            break;
        }
        case FilterOperation::INVERT: {
            BasicComponentTransferFilterOperation* componentTransferOperation = static_cast<BasicComponentTransferFilterOperation*>(filterOperation);
            ComponentTransferFunction transferFunction;
            transferFunction.type = FECOMPONENTTRANSFER_TYPE_TABLE;
            Vector<float> transferParameters;
            transferParameters.append(narrowPrecisionToFloat(componentTransferOperation->amount()));
            transferParameters.append(narrowPrecisionToFloat(1 - componentTransferOperation->amount()));
            transferFunction.tableValues = transferParameters;

            ComponentTransferFunction nullFunction;
            effect = FEComponentTransfer::create(this, transferFunction, transferFunction, transferFunction, nullFunction);
            break;
        }
        case FilterOperation::OPACITY: {
            BasicComponentTransferFilterOperation* componentTransferOperation = static_cast<BasicComponentTransferFilterOperation*>(filterOperation);
            ComponentTransferFunction transferFunction;
            transferFunction.type = FECOMPONENTTRANSFER_TYPE_TABLE;
            Vector<float> transferParameters;
            transferParameters.append(0);
            transferParameters.append(narrowPrecisionToFloat(componentTransferOperation->amount()));
            transferFunction.tableValues = transferParameters;

            ComponentTransferFunction nullFunction;
            effect = FEComponentTransfer::create(this, nullFunction, nullFunction, nullFunction, transferFunction);
            break;
        }
        case FilterOperation::BRIGHTNESS: {
            BasicComponentTransferFilterOperation* componentTransferOperation = static_cast<BasicComponentTransferFilterOperation*>(filterOperation);
            ComponentTransferFunction transferFunction;
            transferFunction.type = FECOMPONENTTRANSFER_TYPE_LINEAR;
            transferFunction.slope = narrowPrecisionToFloat(componentTransferOperation->amount());
            transferFunction.intercept = 0;

            ComponentTransferFunction nullFunction;
            effect = FEComponentTransfer::create(this, transferFunction, transferFunction, transferFunction, nullFunction);
            break;
        }
        case FilterOperation::CONTRAST: {
            BasicComponentTransferFilterOperation* componentTransferOperation = static_cast<BasicComponentTransferFilterOperation*>(filterOperation);
            ComponentTransferFunction transferFunction;
            transferFunction.type = FECOMPONENTTRANSFER_TYPE_LINEAR;
            float amount = narrowPrecisionToFloat(componentTransferOperation->amount());
            transferFunction.slope = amount;
            transferFunction.intercept = -0.5 * amount + 0.5;

            ComponentTransferFunction nullFunction;
            effect = FEComponentTransfer::create(this, transferFunction, transferFunction, transferFunction, nullFunction);
            break;
        }
        case FilterOperation::BLUR: {
            BlurFilterOperation* blurOperation = static_cast<BlurFilterOperation*>(filterOperation);
            float stdDeviation = floatValueForLength(blurOperation->stdDeviation(), 0) * invZoom;
            effect = FEGaussianBlur::create(this, stdDeviation, stdDeviation);
            break;
        }
        case FilterOperation::DROP_SHADOW: {
            DropShadowFilterOperation* dropShadowOperation = static_cast<DropShadowFilterOperation*>(filterOperation);
            float stdDeviation = dropShadowOperation->stdDeviation() * invZoom;
            float x = dropShadowOperation->x() * invZoom;
            float y = dropShadowOperation->y() * invZoom;
            effect = FEDropShadow::create(this, stdDeviation, stdDeviation, x, y, dropShadowOperation->color(), 1);
            break;
        }
        case FilterOperation::CUSTOM:
            // CUSTOM operations are always converted to VALIDATED_CUSTOM before getting here.
            // The conversion happens in RenderLayer::computeFilterOperations.
            ASSERT_NOT_REACHED();
            break;
        case FilterOperation::VALIDATED_CUSTOM: {
            ValidatedCustomFilterOperation* customFilterOperation = static_cast<ValidatedCustomFilterOperation*>(filterOperation);
            Document* document = renderer ? &renderer->document() : 0;
            effect = createCustomFilterEffect(this, document, customFilterOperation);
            if (effect)
                m_hasCustomShaderFilter = true;
            break;
        }
        default:
            break;
        }

        if (effect) {
            if (filterOperation->getOperationType() != FilterOperation::REFERENCE) {
                // Unlike SVG, filters applied here should not clip to their primitive subregions.
                effect->setClipsToBounds(false);
                effect->setOperatingColorSpace(ColorSpaceDeviceRGB);
                effect->inputEffects().append(previousEffect);
            }
            previousEffect = effect.release();
        }
    }

    // We need to keep the old effects alive until this point, so that filters like FECustomFilter
    // can share cached resources across frames.
    m_lastEffect = previousEffect;

    // If we didn't make any effects, tell our caller we are not valid
    if (!m_lastEffect.get())
        return false;

    return true;
}
Esempio n. 26
0
void Image::drawPattern(GraphicsContext* context,
                        const FloatRect& floatSrcRect,
                        const AffineTransform& patternTransform,
                        const FloatPoint& phase,
                        ColorSpace styleColorSpace,
                        CompositeOperator compositeOp,
                        const FloatRect& destRect)
{
    FloatRect normSrcRect = normalizeRect(floatSrcRect);
    if (destRect.isEmpty() || normSrcRect.isEmpty())
        return; // nothing to draw

    NativeImageSkia* bitmap = nativeImageForCurrentFrame();
    if (!bitmap)
        return;

    SkIRect srcRect = enclosingIntRect(normSrcRect);

    // Figure out what size the bitmap will be in the destination. The
    // destination rect is the bounds of the pattern, we need to use the
    // matrix to see how big it will be.
    float destBitmapWidth, destBitmapHeight;
    TransformDimensions(patternTransform, srcRect.width(), srcRect.height(),
                        &destBitmapWidth, &destBitmapHeight);

    // Compute the resampling mode.
    ResamplingMode resampling;
    if (context->platformContext()->isAccelerated() || context->platformContext()->printing())
        resampling = RESAMPLE_LINEAR;
    else
        resampling = computeResamplingMode(context->platformContext(), *bitmap, srcRect.width(), srcRect.height(), destBitmapWidth, destBitmapHeight);

    // Load the transform WebKit requested.
    SkMatrix matrix(patternTransform);

    SkShader* shader;
    if (resampling == RESAMPLE_AWESOME) {
        // Do nice resampling.
        int width = static_cast<int>(destBitmapWidth);
        int height = static_cast<int>(destBitmapHeight);
        SkBitmap resampled = bitmap->resizedBitmap(srcRect, width, height);
        shader = SkShader::CreateBitmapShader(resampled, SkShader::kRepeat_TileMode, SkShader::kRepeat_TileMode);

        // Since we just resized the bitmap, we need to undo the scale set in
        // the image transform.
        matrix.setScaleX(SkIntToScalar(1));
        matrix.setScaleY(SkIntToScalar(1));
    } else {
        // No need to do nice resampling.
        SkBitmap srcSubset;
        bitmap->bitmap().extractSubset(&srcSubset, srcRect);
        shader = SkShader::CreateBitmapShader(srcSubset, SkShader::kRepeat_TileMode, SkShader::kRepeat_TileMode);
    }

    // We also need to translate it such that the origin of the pattern is the
    // origin of the destination rect, which is what WebKit expects. Skia uses
    // the coordinate system origin as the base for the patter. If WebKit wants
    // a shifted image, it will shift it from there using the patternTransform.
    float adjustedX = phase.x() + normSrcRect.x() *
                      narrowPrecisionToFloat(patternTransform.a());
    float adjustedY = phase.y() + normSrcRect.y() *
                      narrowPrecisionToFloat(patternTransform.d());
    matrix.postTranslate(SkFloatToScalar(adjustedX),
                         SkFloatToScalar(adjustedY));
    shader->setLocalMatrix(matrix);

    SkPaint paint;
    paint.setShader(shader)->unref();
    paint.setXfermodeMode(WebCoreCompositeToSkiaComposite(compositeOp));
    paint.setFilterBitmap(resampling == RESAMPLE_LINEAR);

    context->platformContext()->paintSkPaint(destRect, paint);
}
TransformationMatrix::operator CATransform3D() const
{
    CATransform3D toT3D;
    toT3D.m11 = narrowPrecisionToFloat(m11());
    toT3D.m12 = narrowPrecisionToFloat(m12());
    toT3D.m13 = narrowPrecisionToFloat(m13());
    toT3D.m14 = narrowPrecisionToFloat(m14());
    toT3D.m21 = narrowPrecisionToFloat(m21());
    toT3D.m22 = narrowPrecisionToFloat(m22());
    toT3D.m23 = narrowPrecisionToFloat(m23());
    toT3D.m24 = narrowPrecisionToFloat(m24());
    toT3D.m31 = narrowPrecisionToFloat(m31());
    toT3D.m32 = narrowPrecisionToFloat(m32());
    toT3D.m33 = narrowPrecisionToFloat(m33());
    toT3D.m34 = narrowPrecisionToFloat(m34());
    toT3D.m41 = narrowPrecisionToFloat(m41());
    toT3D.m42 = narrowPrecisionToFloat(m42());
    toT3D.m43 = narrowPrecisionToFloat(m43());
    toT3D.m44 = narrowPrecisionToFloat(m44());
    return toT3D;
}
bool AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destinationFrameOffset, size_t numberOfFrames)
{
    ASSERT(context()->isAudioThread());
    
    // Basic sanity checking
    ASSERT(bus);
    ASSERT(buffer());
    if (!bus || !buffer())
        return false;

    unsigned numberOfChannels = this->numberOfChannels();
    unsigned busNumberOfChannels = bus->numberOfChannels();

    bool channelCountGood = numberOfChannels && numberOfChannels == busNumberOfChannels;
    ASSERT(channelCountGood);
    if (!channelCountGood)
        return false;

    // Sanity check destinationFrameOffset, numberOfFrames.
    size_t destinationLength = bus->length();

    bool isLengthGood = destinationLength <= 4096 && numberOfFrames <= 4096;
    ASSERT(isLengthGood);
    if (!isLengthGood)
        return false;

    bool isOffsetGood = destinationFrameOffset <= destinationLength && destinationFrameOffset + numberOfFrames <= destinationLength;
    ASSERT(isOffsetGood);
    if (!isOffsetGood)
        return false;

    // Potentially zero out initial frames leading up to the offset.
    if (destinationFrameOffset) {
        for (unsigned i = 0; i < numberOfChannels; ++i) 
            memset(m_destinationChannels[i], 0, sizeof(float) * destinationFrameOffset);
    }

    // Offset the pointers to the correct offset frame.
    unsigned writeIndex = destinationFrameOffset;

    size_t bufferLength = buffer()->length();
    double bufferSampleRate = buffer()->sampleRate();

    // Avoid converting from time to sample-frames twice by computing
    // the grain end time first before computing the sample frame.
    unsigned endFrame = m_isGrain ? AudioUtilities::timeToSampleFrame(m_grainOffset + m_grainDuration, bufferSampleRate) : bufferLength;
    
    // This is a HACK to allow for HRTF tail-time - avoids glitch at end.
    // FIXME: implement tailTime for each AudioNode for a more general solution to this problem.
    // https://bugs.webkit.org/show_bug.cgi?id=77224
    if (m_isGrain)
        endFrame += 512;

    // Do some sanity checking.
    if (endFrame > bufferLength)
        endFrame = bufferLength;
    if (m_virtualReadIndex >= endFrame)
        m_virtualReadIndex = 0; // reset to start

    // If the .loop attribute is true, then values of m_loopStart == 0 && m_loopEnd == 0 implies
    // that we should use the entire buffer as the loop, otherwise use the loop values in m_loopStart and m_loopEnd.
    double virtualEndFrame = endFrame;
    double virtualDeltaFrames = endFrame;

    if (loop() && (m_loopStart || m_loopEnd) && m_loopStart >= 0 && m_loopEnd > 0 && m_loopStart < m_loopEnd) {
        // Convert from seconds to sample-frames.
        double loopStartFrame = m_loopStart * buffer()->sampleRate();
        double loopEndFrame = m_loopEnd * buffer()->sampleRate();

        virtualEndFrame = min(loopEndFrame, virtualEndFrame);
        virtualDeltaFrames = virtualEndFrame - loopStartFrame;
    }


    double pitchRate = totalPitchRate();

    // Sanity check that our playback rate isn't larger than the loop size.
    if (pitchRate >= virtualDeltaFrames)
        return false;

    // Get local copy.
    double virtualReadIndex = m_virtualReadIndex;

    // Render loop - reading from the source buffer to the destination using linear interpolation.
    int framesToProcess = numberOfFrames;

    const float** sourceChannels = m_sourceChannels.get();
    float** destinationChannels = m_destinationChannels.get();

    // Optimize for the very common case of playing back with pitchRate == 1.
    // We can avoid the linear interpolation.
    if (pitchRate == 1 && virtualReadIndex == floor(virtualReadIndex)
        && virtualDeltaFrames == floor(virtualDeltaFrames)
        && virtualEndFrame == floor(virtualEndFrame)) {
        unsigned readIndex = static_cast<unsigned>(virtualReadIndex);
        unsigned deltaFrames = static_cast<unsigned>(virtualDeltaFrames);
        endFrame = static_cast<unsigned>(virtualEndFrame);
        while (framesToProcess > 0) {
            int framesToEnd = endFrame - readIndex;
            int framesThisTime = min(framesToProcess, framesToEnd);
            framesThisTime = max(0, framesThisTime);

            for (unsigned i = 0; i < numberOfChannels; ++i) 
                memcpy(destinationChannels[i] + writeIndex, sourceChannels[i] + readIndex, sizeof(float) * framesThisTime);

            writeIndex += framesThisTime;
            readIndex += framesThisTime;
            framesToProcess -= framesThisTime;

            // Wrap-around.
            if (readIndex >= endFrame) {
                readIndex -= deltaFrames;
                if (renderSilenceAndFinishIfNotLooping(bus, writeIndex, framesToProcess))
                    break;
            }
        }
        virtualReadIndex = readIndex;
    } else {
        while (framesToProcess--) {
            unsigned readIndex = static_cast<unsigned>(virtualReadIndex);
            double interpolationFactor = virtualReadIndex - readIndex;

            // For linear interpolation we need the next sample-frame too.
            unsigned readIndex2 = readIndex + 1;
            if (readIndex2 >= bufferLength) {
                if (loop()) {
                    // Make sure to wrap around at the end of the buffer.
                    readIndex2 = static_cast<unsigned>(virtualReadIndex + 1 - virtualDeltaFrames);
                } else
                    readIndex2 = readIndex;
            }

            // Final sanity check on buffer access.
            // FIXME: as an optimization, try to get rid of this inner-loop check and put assertions and guards before the loop.
            if (readIndex >= bufferLength || readIndex2 >= bufferLength)
                break;

            // Linear interpolation.
            for (unsigned i = 0; i < numberOfChannels; ++i) {
                float* destination = destinationChannels[i];
                const float* source = sourceChannels[i];

                double sample1 = source[readIndex];
                double sample2 = source[readIndex2];
                double sample = (1.0 - interpolationFactor) * sample1 + interpolationFactor * sample2;

                destination[writeIndex] = narrowPrecisionToFloat(sample);
            }
            writeIndex++;

            virtualReadIndex += pitchRate;

            // Wrap-around, retaining sub-sample position since virtualReadIndex is floating-point.
            if (virtualReadIndex >= virtualEndFrame) {
                virtualReadIndex -= virtualDeltaFrames;
                if (renderSilenceAndFinishIfNotLooping(bus, writeIndex, framesToProcess))
                    break;
            }
        }
    }

    bus->clearSilentFlag();

    m_virtualReadIndex = virtualReadIndex;

    return true;
}
Esempio n. 29
0
void Image::drawPattern(GraphicsContext* context,
                        const FloatRect& floatSrcRect,
                        const AffineTransform& patternTransform,
                        const FloatPoint& phase,
                        ColorSpace styleColorSpace,
                        CompositeOperator compositeOp,
                        const FloatRect& destRect,
                        BlendMode blendMode)
{
    TRACE_EVENT0("skia", "Image::drawPattern");
    RefPtr<NativeImageSkia> bitmap = nativeImageForCurrentFrame();
    if (!bitmap)
        return;

    FloatRect normSrcRect = adjustForNegativeSize(floatSrcRect);
    normSrcRect.intersect(FloatRect(0, 0, bitmap->bitmap().width(), bitmap->bitmap().height()));
    if (destRect.isEmpty() || normSrcRect.isEmpty())
        return; // nothing to draw

    SkMatrix ctm = context->getTotalMatrix();
    SkMatrix totalMatrix;
    totalMatrix.setConcat(ctm, patternTransform);

    // Figure out what size the bitmap will be in the destination. The
    // destination rect is the bounds of the pattern, we need to use the
    // matrix to see how big it will be.
    SkRect destRectTarget;
    totalMatrix.mapRect(&destRectTarget, normSrcRect);

    float destBitmapWidth = SkScalarToFloat(destRectTarget.width());
    float destBitmapHeight = SkScalarToFloat(destRectTarget.height());

    // Compute the resampling mode.
    ResamplingMode resampling;
    if (context->isAccelerated() || context->printing())
        resampling = RESAMPLE_LINEAR;
    else
        resampling = computeResamplingMode(totalMatrix, *bitmap, normSrcRect.width(), normSrcRect.height(), destBitmapWidth, destBitmapHeight);
    resampling = limitResamplingMode(context, resampling);

    // Load the transform WebKit requested.
    SkMatrix matrix(patternTransform);

    SkShader* shader;
    if (resampling == RESAMPLE_AWESOME) {
        // Do nice resampling.
        float scaleX = destBitmapWidth / normSrcRect.width();
        float scaleY = destBitmapHeight / normSrcRect.height();
        SkRect scaledSrcRect;
        SkIRect enclosingScaledSrcRect;

        // The image fragment generated here is not exactly what is
        // requested. The scale factor used is approximated and image
        // fragment is slightly larger to align to integer
        // boundaries.
        SkBitmap resampled = extractScaledImageFragment(*bitmap, normSrcRect, scaleX, scaleY, &scaledSrcRect, &enclosingScaledSrcRect);
        shader = SkShader::CreateBitmapShader(resampled, SkShader::kRepeat_TileMode, SkShader::kRepeat_TileMode);

        // Since we just resized the bitmap, we need to remove the scale
        // applied to the pixels in the bitmap shader. This means we need
        // CTM * patternTransform to have identity scale. Since we
        // can't modify CTM (or the rectangle will be drawn in the wrong
        // place), we must set patternTransform's scale to the inverse of
        // CTM scale.
        matrix.setScaleX(ctm.getScaleX() ? 1 / ctm.getScaleX() : 1);
        matrix.setScaleY(ctm.getScaleY() ? 1 / ctm.getScaleY() : 1);
    } else {
        // No need to do nice resampling.
        SkBitmap srcSubset;
        bitmap->bitmap().extractSubset(&srcSubset, enclosingIntRect(normSrcRect));
        shader = SkShader::CreateBitmapShader(srcSubset, SkShader::kRepeat_TileMode, SkShader::kRepeat_TileMode);
    }

    // We also need to translate it such that the origin of the pattern is the
    // origin of the destination rect, which is what WebKit expects. Skia uses
    // the coordinate system origin as the base for the patter. If WebKit wants
    // a shifted image, it will shift it from there using the patternTransform.
    float adjustedX = phase.x() + normSrcRect.x() *
                      narrowPrecisionToFloat(patternTransform.a());
    float adjustedY = phase.y() + normSrcRect.y() *
                      narrowPrecisionToFloat(patternTransform.d());
    matrix.postTranslate(SkFloatToScalar(adjustedX),
                         SkFloatToScalar(adjustedY));
    shader->setLocalMatrix(matrix);

    SkPaint paint;
    paint.setShader(shader)->unref();
    paint.setXfermodeMode(WebCoreCompositeToSkiaComposite(compositeOp, blendMode));

    paint.setFilterBitmap(resampling == RESAMPLE_LINEAR);

    context->drawRect(destRect, paint);
}