示例#1
0
SkImage* SkSurface_Gpu::onNewImageSnapshot(SkBudgeted budgeted, ForceCopyMode forceCopyMode) {
    GrRenderTarget* rt = fDevice->accessRenderTarget();
    SkASSERT(rt);
    GrTexture* tex = rt->asTexture();
    SkAutoTUnref<GrTexture> copy;
    // TODO: Force a copy when the rt is an external resource.
    if (kYes_ForceCopyMode == forceCopyMode || !tex) {
        GrSurfaceDesc desc = fDevice->accessRenderTarget()->desc();
        GrContext* ctx = fDevice->context();
        desc.fFlags = desc.fFlags & ~kRenderTarget_GrSurfaceFlag;
        copy.reset(ctx->textureProvider()->createTexture(desc, budgeted));
        if (!copy) {
            return nullptr;
        }
        if (!ctx->copySurface(copy, rt)) {
            return nullptr;
        }
        tex = copy;
    }
    const SkImageInfo info = fDevice->imageInfo();
    SkImage* image = nullptr;
    if (tex) {
        image = new SkImage_Gpu(info.width(), info.height(), kNeedNewImageUniqueID,
                                info.alphaType(), tex, budgeted);
    }
    return image;
}
bool GrClipMaskManager::setupScissorClip(const GrPipelineBuilder& pipelineBuilder,
                                         GrPipelineBuilder::AutoRestoreStencil* ars,
                                         const SkIRect& clipScissor,
                                         const SkRect* devBounds,
                                         GrAppliedClip* out) {
    if (kRespectClip_StencilClipMode == fClipMode) {
        fClipMode = kIgnoreClip_StencilClipMode;
    }

    GrRenderTarget* rt = pipelineBuilder.getRenderTarget();

    SkIRect clipSpaceRTIBounds = SkIRect::MakeWH(rt->width(), rt->height());
    SkIRect devBoundsScissor;
    const SkIRect* scissor = &clipScissor;
    bool doDevBoundsClip = fDebugClipBatchToBounds && devBounds;
    if (doDevBoundsClip) {
        devBounds->roundOut(&devBoundsScissor);
        if (devBoundsScissor.intersect(clipScissor)) {
            scissor = &devBoundsScissor;
        }
    }

    if (scissor->contains(clipSpaceRTIBounds)) {
        // This counts as wide open
        this->setPipelineBuilderStencil(pipelineBuilder, ars);
        return true;
    }

    if (clipSpaceRTIBounds.intersect(*scissor)) {
        out->fScissorState.set(clipSpaceRTIBounds);
        this->setPipelineBuilderStencil(pipelineBuilder, ars);
        return true;
    }
    return false;
}
void GrGLProgram::setRenderTargetState(const GrPrimitiveProcessor& primProc,
                                       const GrRenderTargetProxy* proxy) {
    GrRenderTarget* rt = proxy->priv().peekRenderTarget();
    // Load the RT height uniform if it is needed to y-flip gl_FragCoord.
    if (fBuiltinUniformHandles.fRTHeightUni.isValid() &&
        fRenderTargetState.fRenderTargetSize.fHeight != rt->height()) {
        fProgramDataManager.set1f(fBuiltinUniformHandles.fRTHeightUni, SkIntToScalar(rt->height()));
    }

    // set RT adjustment
    SkISize size;
    size.set(rt->width(), rt->height());
    if (!primProc.isPathRendering()) {
        if (fRenderTargetState.fRenderTargetOrigin != proxy->origin() ||
            fRenderTargetState.fRenderTargetSize != size) {
            fRenderTargetState.fRenderTargetSize = size;
            fRenderTargetState.fRenderTargetOrigin = proxy->origin();

            float rtAdjustmentVec[4];
            fRenderTargetState.getRTAdjustmentVec(rtAdjustmentVec);
            fProgramDataManager.set4fv(fBuiltinUniformHandles.fRTAdjustmentUni, 1, rtAdjustmentVec);
        }
    } else {
        SkASSERT(fGpu->glCaps().shaderCaps()->pathRenderingSupport());
        const GrPathProcessor& pathProc = primProc.cast<GrPathProcessor>();
        fGpu->glPathRendering()->setProjectionMatrix(pathProc.viewMatrix(),
                                                     size, proxy->origin());
    }
}
示例#4
0
void GrDrawTarget::stencilPath(GrPipelineBuilder* pipelineBuilder,
                               const GrPathProcessor* pathProc,
                               const GrPath* path,
                               GrPathRendering::FillType fill) {
    // TODO: extract portions of checkDraw that are relevant to path stenciling.
    SkASSERT(path);
    SkASSERT(this->caps()->shaderCaps()->pathRenderingSupport());
    SkASSERT(pipelineBuilder);

    // Setup clip
    GrScissorState scissorState;
    GrPipelineBuilder::AutoRestoreFragmentProcessors arfp;
    GrPipelineBuilder::AutoRestoreStencil ars;
    if (!this->setupClip(pipelineBuilder, &arfp, &ars, &scissorState, NULL)) {
        return;
    }

    // set stencil settings for path
    GrStencilSettings stencilSettings;
    GrRenderTarget* rt = pipelineBuilder->getRenderTarget();
    GrStencilAttachment* sb = rt->renderTargetPriv().attachStencilAttachment();
    this->getPathStencilSettingsForFilltype(fill, sb, &stencilSettings);

    this->onStencilPath(*pipelineBuilder, pathProc, path, scissorState, stencilSettings);
}
示例#5
0
sk_sp<SkImage> SkSurface_Gpu::onNewImageSnapshot(SkBudgeted budgeted, ForceCopyMode forceCopyMode) {
    GrRenderTarget* rt = fDevice->accessDrawContext()->accessRenderTarget();
    SkASSERT(rt);
    GrTexture* tex = rt->asTexture();
    SkAutoTUnref<GrTexture> copy;
    // If the original render target is a buffer originally created by the client, then we don't
    // want to ever retarget the SkSurface at another buffer we create. Force a copy now to avoid
    // copy-on-write.
    if (kYes_ForceCopyMode == forceCopyMode || !tex || rt->resourcePriv().refsWrappedObjects()) {
        GrSurfaceDesc desc = fDevice->accessDrawContext()->desc();
        GrContext* ctx = fDevice->context();
        desc.fFlags = desc.fFlags & ~kRenderTarget_GrSurfaceFlag;
        copy.reset(ctx->textureProvider()->createTexture(desc, budgeted));
        if (!copy) {
            return nullptr;
        }
        if (!ctx->copySurface(copy, rt)) {
            return nullptr;
        }
        tex = copy;
    }
    const SkImageInfo info = fDevice->imageInfo();
    sk_sp<SkImage> image;
    if (tex) {
        image = sk_make_sp<SkImage_Gpu>(info.width(), info.height(), kNeedNewImageUniqueID,
                                        info.alphaType(), tex, sk_ref_sp(info.colorSpace()),
                                        budgeted);
    }
    return image;
}
示例#6
0
void GrDrawTarget::drawPath(const GrPipelineBuilder& pipelineBuilder,
                            const GrPathProcessor* pathProc,
                            const GrPath* path,
                            GrPathRendering::FillType fill) {
    // TODO: extract portions of checkDraw that are relevant to path rendering.
    SkASSERT(path);
    SkASSERT(this->caps()->shaderCaps()->pathRenderingSupport());

    SkRect devBounds = path->getBounds();
    pathProc->viewMatrix().mapRect(&devBounds);

    // Setup clip
    GrScissorState scissorState;
    GrPipelineBuilder::AutoRestoreFragmentProcessorState arfps;
    GrPipelineBuilder::AutoRestoreStencil ars;
    if (!this->setupClip(pipelineBuilder, &arfps, &ars, &scissorState, &devBounds)) {
       return;
    }

    // set stencil settings for path
    GrStencilSettings stencilSettings;
    GrRenderTarget* rt = pipelineBuilder.getRenderTarget();
    GrStencilAttachment* sb = rt->renderTargetPriv().attachStencilAttachment();
    this->getPathStencilSettingsForFilltype(fill, sb, &stencilSettings);

    GrDrawTarget::PipelineInfo pipelineInfo(pipelineBuilder, &scissorState, pathProc, &devBounds,
                                            this);
    if (pipelineInfo.mustSkipDraw()) {
        return;
    }

    this->onDrawPath(pathProc, path, stencilSettings, pipelineInfo);
}
示例#7
0
SkData* Request::getJsonBatchList(int n) {
    SkCanvas* canvas = this->getCanvas();
    SkASSERT(fGPUEnabled);

    // TODO if this is inefficient we could add a method to GrAuditTrail which takes
    // a Json::Value and is only compiled in this file
    Json::Value parsedFromString;
#if SK_SUPPORT_GPU
    GrRenderTarget* rt = canvas->internal_private_accessTopLayerRenderTarget();
    SkASSERT(rt);
    GrContext* ctx = rt->getContext();
    SkASSERT(ctx);
    GrAuditTrail* at = ctx->getAuditTrail();
    GrAuditTrail::AutoManageBatchList enable(at);
    
    fDebugCanvas->drawTo(canvas, n);

    Json::Reader reader;
    SkDEBUGCODE(bool parsingSuccessful = )reader.parse(at->toJson(true).c_str(),
                                                       parsedFromString);
    SkASSERT(parsingSuccessful);
#endif

    SkDynamicMemoryWStream stream;
    stream.writeText(Json::FastWriter().write(parsedFromString).c_str());

    return stream.copyToData();
}
示例#8
0
Canvas2DLayerBridge::Canvas2DLayerBridge(PassRefPtr<GraphicsContext3D> context, SkDeferredCanvas* canvas, OpacityMode opacityMode, ThreadMode threadMode)
    : m_canvas(canvas)
    , m_context(context)
    , m_bytesAllocated(0)
    , m_didRecordDrawCommand(false)
    , m_framesPending(0)
    , m_next(0)
    , m_prev(0)
#if ENABLE(CANVAS_USES_MAILBOX)
    , m_lastImageId(0)
#endif
{
    ASSERT(m_canvas);
    // Used by browser tests to detect the use of a Canvas2DLayerBridge.
    TRACE_EVENT_INSTANT0("test_gpu", "Canvas2DLayerBridgeCreation");
    m_canvas->setNotificationClient(this);
#if ENABLE(CANVAS_USES_MAILBOX)
    m_layer = adoptPtr(WebKit::Platform::current()->compositorSupport()->createExternalTextureLayerForMailbox(this));
#else
    m_layer = adoptPtr(WebKit::Platform::current()->compositorSupport()->createExternalTextureLayer(this));
    m_layer->setRateLimitContext(threadMode == SingleThread);
    GrRenderTarget* renderTarget = reinterpret_cast<GrRenderTarget*>(m_canvas->getDevice()->accessRenderTarget());
    if (renderTarget) {
        m_layer->setTextureId(renderTarget->asTexture()->getTextureHandle());
    }
#endif
    m_layer->setOpaque(opacityMode == Opaque);
    GraphicsLayerChromium::registerContentsLayer(m_layer->layer());
}
bool SkSurface_Gpu::onGetRenderTargetHandle(GrBackendObject* obj, BackendHandleAccess access) {
    GrRenderTarget* rt = prepare_rt_for_external_access(this, access);
    if (!rt) {
        return false;
    }
    *obj = rt->getRenderTargetHandle();
    return true;
}
Platform3DObject AcceleratedImageBufferSurface::getBackingTexture() const
{
    GrRenderTarget* renderTarget = m_canvas->getTopDevice()->accessRenderTarget();
    if (renderTarget) {
        return renderTarget->asTexture()->getTextureHandle();
    }
    return 0;
}
示例#11
0
SkSurface* SkSurface_Gpu::onNewSurface(const SkImageInfo& info) {
    GrRenderTarget* rt = fDevice->accessRenderTarget();
    int sampleCount = rt->numSamples();
    // TODO: Make caller specify this (change virtual signature of onNewSurface).
    static const Budgeted kBudgeted = kNo_Budgeted;
    return SkSurface::NewRenderTarget(fDevice->context(), kBudgeted, info, sampleCount,
                                      &this->props());
}
示例#12
0
GrBackendObject SkSurface_Gpu::onGetTextureHandle(BackendHandleAccess access) {
    GrRenderTarget* rt = prepare_rt_for_external_access(this, access);
    GrTexture* texture = rt->asTexture();
    if (texture) {
        return texture->getTextureHandle();
    }
    return 0;
}
示例#13
0
void set_dynamic_scissor_state(GrVkGpu* gpu,
                               GrVkCommandBuffer* cmdBuffer,
                               const GrPipeline& pipeline,
                               const GrRenderTarget& target) {
    // We always use one scissor and if it is disabled we just make it the size of the RT
    const GrScissorState& scissorState = pipeline.getScissorState();
    VkRect2D scissor;
    if (scissorState.enabled() &&
        !scissorState.rect().contains(0, 0, target.width(), target.height())) {
        // This all assumes the scissorState has previously been clipped to the device space render
        // target. 
        scissor.offset.x = scissorState.rect().fLeft;
        scissor.extent.width = scissorState.rect().width();
        if (kTopLeft_GrSurfaceOrigin == target.origin()) {
            scissor.offset.y = scissorState.rect().fTop;
        } else {
            SkASSERT(kBottomLeft_GrSurfaceOrigin == target.origin());
            scissor.offset.y = target.height() - scissorState.rect().fBottom;
        }
        scissor.extent.height = scissorState.rect().height();

        SkASSERT(scissor.offset.x >= 0);
        SkASSERT(scissor.offset.x + scissor.extent.width <= (uint32_t)target.width());
        SkASSERT(scissor.offset.y >= 0);
        SkASSERT(scissor.offset.y + scissor.extent.height <= (uint32_t)target.height());
    } else {
        scissor.extent.width = target.width();
        scissor.extent.height = target.height();
        scissor.offset.x = 0;
        scissor.offset.y = 0;
    }
    cmdBuffer->setScissor(gpu, 0, 1, &scissor);
}
示例#14
0
unsigned Canvas2DLayerBridge::backBufferTexture()
{
    contextAcquired();
    m_canvas->flush();
    m_context->flush();
    GrRenderTarget* renderTarget = reinterpret_cast<GrRenderTarget*>(m_canvas->getDevice()->accessRenderTarget());
    if (renderTarget) {
        return renderTarget->asTexture()->getTextureHandle();
    }
    return 0;
}
bool GrAAHairLinePathRenderer::onDrawPath(const DrawPathArgs& args) {
    SkIRect devClipBounds;
    GrRenderTarget* rt = args.fPipelineBuilder->getRenderTarget();
    args.fPipelineBuilder->clip().getConservativeBounds(rt->width(), rt->height(), &devClipBounds);

    SkAutoTUnref<GrDrawBatch> batch(create_hairline_batch(args.fColor, *args.fViewMatrix, *args.fPath,
                                                          *args.fStroke, devClipBounds));
    args.fTarget->drawBatch(*args.fPipelineBuilder, batch);

    return true;
}
示例#16
0
void GrStencilPathOp::onExecute(GrOpFlushState* state) {
    GrRenderTarget* rt = state->drawOpArgs().renderTarget();
    SkASSERT(rt);

    int numStencilBits = rt->renderTargetPriv().numStencilBits();
    GrStencilSettings stencil(GrPathRendering::GetStencilPassSettings(fFillType),
                              fHasStencilClip, numStencilBits);

    GrPathRendering::StencilPathArgs args(fUseHWAA, state->drawOpArgs().fProxy,
                                          &fViewMatrix, &fScissor, &stencil);
    state->gpu()->pathRendering()->stencilPath(args, fPath.get());
}
示例#17
0
bool GrAAHairLinePathRenderer::onDrawPath(const DrawPathArgs& args) {
    GR_AUDIT_TRAIL_AUTO_FRAME(args.fTarget->getAuditTrail(),"GrAAHairlinePathRenderer::onDrawPath");
    SkIRect devClipBounds;
    GrRenderTarget* rt = args.fPipelineBuilder->getRenderTarget();
    args.fClip->getConservativeBounds(rt->width(), rt->height(), &devClipBounds);

    SkAutoTUnref<GrDrawBatch> batch(create_hairline_batch(args.fColor, *args.fViewMatrix, *args.fPath,
                                                          *args.fStyle, devClipBounds));
    args.fTarget->drawBatch(*args.fPipelineBuilder, *args.fClip, batch);

    return true;
}
示例#18
0
////////////////////////////////////////////////////////////////////////////////
// Shared preamble between gpu and SW-only AA clip mask creation paths.
// Handles caching, determination of clip mask bound & allocation (if needed)
// of the result texture
// Returns true if there is no more work to be done (i.e., we got a cache hit)
bool GrClipMaskManager::clipMaskPreamble(GrGpu* gpu,
                                         const GrClip& clipIn,
                                         GrTexture** result,
                                         GrIRect *resultBounds) {
    GrDrawState* origDrawState = gpu->drawState();
    GrAssert(origDrawState->isClipState());

    GrRenderTarget* rt = origDrawState->getRenderTarget();
    GrAssert(NULL != rt);

    GrRect rtRect;
    rtRect.setLTRB(0, 0,
                    GrIntToScalar(rt->width()), GrIntToScalar(rt->height()));

    // unlike the stencil path the alpha path is not bound to the size of the
    // render target - determine the minimum size required for the mask
    GrRect bounds;

    if (clipIn.hasConservativeBounds()) {
        bounds = clipIn.getConservativeBounds();
        if (!bounds.intersect(rtRect)) {
            // the mask will be empty in this case
            GrAssert(false);
            bounds.setEmpty();
        }
    } else {
        // still locked to the size of the render target
        bounds = rtRect;
    }

    GrIRect intBounds;
    bounds.roundOut(&intBounds);

    // need to outset a pixel since the standard bounding box computation
    // path doesn't leave any room for antialiasing (esp. w.r.t. rects)
    intBounds.outset(1, 1);

    // TODO: make sure we don't outset if bounds are still 0,0 @ min

    if (fAACache.canReuse(clipIn, 
                          intBounds.width(),
                          intBounds.height())) {
        *result = fAACache.getLastMask();
        fAACache.getLastBound(resultBounds);
        return true;
    }

    this->setupCache(clipIn, intBounds);

    *resultBounds = intBounds;
    return false;
}
示例#19
0
Platform3DObject Canvas2DLayerBridge::getBackingTexture()
{
    ASSERT(!m_destructionInProgress);
    if (!checkSurfaceValid())
        return 0;
    m_canvas->flush();
    context()->flush();
    GrRenderTarget* renderTarget = m_canvas->getTopDevice()->accessRenderTarget();
    if (renderTarget) {
        return renderTarget->asTexture()->getTextureHandle();
    }
    return 0;
}
示例#20
0
void set_dynamic_viewport_state(GrVkGpu* gpu,
                                GrVkCommandBuffer* cmdBuffer,
                                const GrRenderTarget& target) {
    // We always use one viewport the size of the RT
    VkViewport viewport;
    viewport.x = 0.0f;
    viewport.y = 0.0f;
    viewport.width = SkIntToScalar(target.width());
    viewport.height = SkIntToScalar(target.height());
    viewport.minDepth = 0.0f;
    viewport.maxDepth = 1.0f;
    cmdBuffer->setViewport(gpu, 0, 1, &viewport);
}
示例#21
0
// Create a new render target and, if necessary, copy the contents of the old
// render target into it. Note that this flushes the SkGpuDevice but
// doesn't force an OpenGL flush.
void SkSurface_Gpu::onCopyOnWrite(ContentChangeMode mode) {
    GrRenderTarget* rt = fDevice->accessDrawContext()->accessRenderTarget();
    // are we sharing our render target with the image? Note this call should never create a new
    // image because onCopyOnWrite is only called when there is a cached image.
    sk_sp<SkImage> image(this->refCachedImage(SkBudgeted::kNo, kNo_ForceUnique));
    SkASSERT(image);
    if (rt->asTexture() == as_IB(image)->peekTexture()) {
        this->fDevice->replaceDrawContext(SkSurface::kRetain_ContentChangeMode == mode);
        SkTextureImageApplyBudgetedDecision(image.get());
    } else if (kDiscard_ContentChangeMode == mode) {
        this->SkSurface_Gpu::onDiscard();
    }
}
示例#22
0
// Create a new render target and, if necessary, copy the contents of the old
// render target into it. Note that this flushes the SkGpuDevice but
// doesn't force an OpenGL flush.
void SkSurface_Gpu::onCopyOnWrite(ContentChangeMode mode) {
    GrRenderTarget* rt = fDevice->accessRenderTarget();
    // are we sharing our render target with the image? Note this call should never create a new
    // image because onCopyOnWrite is only called when there is a cached image.
    SkImage* image = this->getCachedImage(kNo_Budgeted);
    SkASSERT(image);
    if (rt->asTexture() == SkTextureImageGetTexture(image)) {
        this->fDevice->replaceRenderTarget(SkSurface::kRetain_ContentChangeMode == mode);
        SkTextureImageApplyBudgetedDecision(image);
    } else if (kDiscard_ContentChangeMode == mode) {
        this->SkSurface_Gpu::onDiscard();
    }
}
示例#23
0
void wrap_rt_test(skiatest::Reporter* reporter, GrContext* context) {
    GrVkGpu* gpu = static_cast<GrVkGpu*>(context->getGpu());

    GrBackendObject backendObj = gpu->createTestingOnlyBackendTexture(nullptr, kW, kH, kPixelConfig,
                                                                      true);
    const GrVkImageInfo* backendTex = reinterpret_cast<const GrVkImageInfo*>(backendObj);

    // check basic borrowed creation
    GrBackendRenderTargetDesc desc;
    desc.fWidth = kW;
    desc.fHeight = kH;
    desc.fConfig = kPixelConfig;
    desc.fOrigin = kTopLeft_GrSurfaceOrigin;
    desc.fSampleCnt = 0;
    desc.fStencilBits = 0;
    desc.fRenderTargetHandle = backendObj;
    GrRenderTarget* rt = gpu->wrapBackendRenderTarget(desc, kBorrow_GrWrapOwnership);
    REPORTER_ASSERT(reporter, rt);
    rt->unref();

    // image is null
    GrVkImageInfo backendCopy = *backendTex;
    backendCopy.fImage = VK_NULL_HANDLE;
    desc.fRenderTargetHandle = (GrBackendObject)&backendCopy;
    rt = gpu->wrapBackendRenderTarget(desc, kBorrow_GrWrapOwnership);
    REPORTER_ASSERT(reporter, !rt);
    rt = gpu->wrapBackendRenderTarget(desc, kAdopt_GrWrapOwnership);
    REPORTER_ASSERT(reporter, !rt);

    // alloc is null
    backendCopy.fImage = backendTex->fImage;
    backendCopy.fAlloc = { VK_NULL_HANDLE, 0, 0 };
    // can wrap null alloc if borrowing
    rt = gpu->wrapBackendRenderTarget(desc, kBorrow_GrWrapOwnership);
    REPORTER_ASSERT(reporter, rt);
    // but not if adopting
    rt = gpu->wrapBackendRenderTarget(desc, kAdopt_GrWrapOwnership);
    REPORTER_ASSERT(reporter, !rt);

    // check adopt creation
    backendCopy.fAlloc = backendTex->fAlloc;
    rt = gpu->wrapBackendRenderTarget(desc, kAdopt_GrWrapOwnership);
    REPORTER_ASSERT(reporter, rt);
    rt->unref();

    gpu->deleteTestingOnlyBackendTexture(backendObj, true);
}
示例#24
0
void SkGLWidget::initializeGL() {
    fCurIntf = GrGLCreateNativeInterface();
    if (!fCurIntf) {
        return;
    }
    glStencilMask(0xffffffff);
    glClearStencil(0);
    glClear(GL_STENCIL_BUFFER_BIT);

    fCurContext = GrContext::Create(kOpenGL_GrBackend, (GrBackendContext) fCurIntf);
    GrBackendRenderTargetDesc desc = this->getDesc(this->width(), this->height());
    desc.fOrigin = kBottomLeft_GrSurfaceOrigin;
    GrRenderTarget* curRenderTarget = fCurContext->wrapBackendRenderTarget(desc);
    fGpuDevice = SkGpuDevice::Create(curRenderTarget);
    fCanvas = new SkCanvas(fGpuDevice);
    curRenderTarget->unref();
}
示例#25
0
static GrRenderTarget* prepare_rt_for_external_access(SkSurface_Gpu* surface,
                                                      SkSurface::BackendHandleAccess access) {
    GrRenderTarget* rt = surface->getDevice()->accessRenderTarget();
    switch (access) {
        case SkSurface::kFlushRead_BackendHandleAccess:
            break;
        case SkSurface::kFlushWrite_BackendHandleAccess:
        case SkSurface::kDiscardWrite_BackendHandleAccess:
            // for now we don't special-case on Discard, but we may in the future.
            surface->notifyContentWillChange(SkSurface::kRetain_ContentChangeMode);
            // legacy: need to dirty the bitmap's genID in our device (curse it)
            surface->getDevice()->accessBitmap(false).notifyPixelsChanged();
            break;
    }
    rt->prepareForExternalIO();
    return rt;
}
示例#26
0
GrTexture* GrTextureProvider::internalRefScratchTexture(const GrSurfaceDesc& inDesc,
                                                        uint32_t flags) {
    SkASSERT(!this->isAbandoned());
    SkASSERT(!GrPixelConfigIsCompressed(inDesc.fConfig));

    SkTCopyOnFirstWrite<GrSurfaceDesc> desc(inDesc);

    if (fGpu->caps()->reuseScratchTextures() || (desc->fFlags & kRenderTarget_GrSurfaceFlag)) {
        if (!(kExact_ScratchTextureFlag & flags)) {
            // bin by pow2 with a reasonable min
            const int minSize = SkTMin(16, fGpu->caps()->minTextureSize());
            GrSurfaceDesc* wdesc = desc.writable();
            wdesc->fWidth  = SkTMax(minSize, GrNextPow2(desc->fWidth));
            wdesc->fHeight = SkTMax(minSize, GrNextPow2(desc->fHeight));
        }

        GrScratchKey key;
        GrTexturePriv::ComputeScratchKey(*desc, &key);
        uint32_t scratchFlags = 0;
        if (kNoPendingIO_ScratchTextureFlag & flags) {
            scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag;
        } else  if (!(desc->fFlags & kRenderTarget_GrSurfaceFlag)) {
            // If it is not a render target then it will most likely be populated by
            // writePixels() which will trigger a flush if the texture has pending IO.
            scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag;
        }
        GrGpuResource* resource = fCache->findAndRefScratchResource(key, scratchFlags);
        if (resource) {
            GrSurface* surface = static_cast<GrSurface*>(resource);
            GrRenderTarget* rt = surface->asRenderTarget();
            if (rt && fGpu->caps()->discardRenderTargetSupport()) {
                rt->discard();
            }
            return surface->asTexture();
        }
    }

    if (!(kNoCreate_ScratchTextureFlag & flags)) {
        return fGpu->createTexture(*desc, true, NULL, 0);
    }

    return NULL;
}
示例#27
0
void GrDrawTarget::drawPaths(GrPipelineBuilder* pipelineBuilder,
                             const GrPathProcessor* pathProc,
                             const GrPathRange* pathRange,
                             const void* indices,
                             PathIndexType indexType,
                             const float transformValues[],
                             PathTransformType transformType,
                             int count,
                             GrPathRendering::FillType fill) {
    SkASSERT(this->caps()->shaderCaps()->pathRenderingSupport());
    SkASSERT(pathRange);
    SkASSERT(indices);
    SkASSERT(0 == reinterpret_cast<long>(indices) % GrPathRange::PathIndexSizeInBytes(indexType));
    SkASSERT(transformValues);
    SkASSERT(pipelineBuilder);

    // Setup clip
    GrScissorState scissorState;
    GrPipelineBuilder::AutoRestoreFragmentProcessors arfp;
    GrPipelineBuilder::AutoRestoreStencil ars;

    if (!this->setupClip(pipelineBuilder, &arfp, &ars, &scissorState, NULL)) {
        return;
    }

    // set stencil settings for path
    GrStencilSettings stencilSettings;
    GrRenderTarget* rt = pipelineBuilder->getRenderTarget();
    GrStencilAttachment* sb = rt->renderTargetPriv().attachStencilAttachment();
    this->getPathStencilSettingsForFilltype(fill, sb, &stencilSettings);

    // Don't compute a bounding box for dst copy texture, we'll opt
    // instead for it to just copy the entire dst. Realistically this is a moot
    // point, because any context that supports NV_path_rendering will also
    // support NV_blend_equation_advanced.
    GrDrawTarget::PipelineInfo pipelineInfo(pipelineBuilder, &scissorState, pathProc, NULL, this);
    if (pipelineInfo.mustSkipDraw()) {
        return;
    }

    this->onDrawPaths(pathProc, pathRange, indices, indexType, transformValues,
                      transformType, count, stencilSettings, pipelineInfo);
}
示例#28
0
static bool check_rect(GrDrawContext* dc, const SkIRect& rect, uint32_t expectedValue,
                       uint32_t* actualValue, int* failX, int* failY) {
    GrRenderTarget* rt = dc->accessRenderTarget();
    int w = rect.width();
    int h = rect.height();
    SkAutoTDeleteArray<uint32_t> pixels(new uint32_t[w * h]);
    memset(pixels.get(), ~expectedValue, sizeof(uint32_t) * w * h);
    rt->readPixels(rect.fLeft, rect.fTop, w, h, kRGBA_8888_GrPixelConfig, pixels.get());
    for (int y = 0; y < h; ++y) {
        for (int x = 0; x < w; ++x) {
            uint32_t pixel = pixels.get()[y * w + x];
            if (pixel != expectedValue) {
                *actualValue = pixel;
                *failX = x + rect.fLeft;
                *failY = y + rect.fTop;
                return false;
            }
        }
    }
    return true;
}
示例#29
0
unsigned Canvas2DLayerBridge::prepareTexture(WebTextureUpdater& updater)
{
#if ENABLE(CANVAS_USES_MAILBOX)
    ASSERT_NOT_REACHED();
    return 0;
#else
    m_context->makeContextCurrent();

    TRACE_EVENT0("cc", "Canvas2DLayerBridge::SkCanvas::flush");
    m_canvas->flush();
    m_context->flush();

    // Notify skia that the state of the backing store texture object will be touched by the compositor
    GrRenderTarget* renderTarget = reinterpret_cast<GrRenderTarget*>(m_canvas->getDevice()->accessRenderTarget());
    if (renderTarget) {
        GrTexture* texture = renderTarget->asTexture();
        texture->invalidateCachedState();
        return texture->getTextureHandle();
    }
    return 0;
#endif  // !ENABLE(CANVAS_USES_MAILBOX)
}
void GrClipMaskManager::setPipelineBuilderStencil(GrPipelineBuilder* pipelineBuilder,
        GrPipelineBuilder::AutoRestoreStencil* ars) {
    // We make two copies of the StencilSettings here (except in the early
    // exit scenario. One copy from draw state to the stack var. Then another
    // from the stack var to the gpu. We could make this class hold a ptr to
    // GrGpu's fStencilSettings and eliminate the stack copy here.

    // use stencil for clipping if clipping is enabled and the clip
    // has been written into the stencil.
    GrStencilSettings settings;

    // The GrGpu client may not be using the stencil buffer but we may need to
    // enable it in order to respect a stencil clip.
    if (pipelineBuilder->getStencil().isDisabled()) {
        if (GrClipMaskManager::kRespectClip_StencilClipMode == fClipMode) {
            settings = basic_apply_stencil_clip_settings();
        } else {
            return;
        }
    } else {
        settings = pipelineBuilder->getStencil();
    }

    int stencilBits = 0;
    GrRenderTarget* rt = pipelineBuilder->getRenderTarget();
    GrStencilBuffer* stencilBuffer = rt->renderTargetPriv().attachStencilBuffer();
    if (stencilBuffer) {
        stencilBits = stencilBuffer->bits();
    }

    SkASSERT(fClipTarget->caps()->stencilWrapOpsSupport() || !settings.usesWrapOp());
    SkASSERT(fClipTarget->caps()->twoSidedStencilSupport() || !settings.isTwoSided());
    this->adjustStencilParams(&settings, fClipMode, stencilBits);
    ars->set(pipelineBuilder);
    pipelineBuilder->setStencil(settings);
}