bool GrSWMaskHelper::init(const GrIRect& resultBounds, const SkMatrix* matrix) { if (NULL != matrix) { fMatrix = *matrix; } else { fMatrix.setIdentity(); } // Now translate so the bound's UL corner is at the origin fMatrix.postTranslate(-resultBounds.fLeft * SK_Scalar1, -resultBounds.fTop * SK_Scalar1); GrIRect bounds = GrIRect::MakeWH(resultBounds.width(), resultBounds.height()); fBM.setConfig(SkBitmap::kA8_Config, bounds.fRight, bounds.fBottom); if (!fBM.allocPixels()) { return false; } sk_bzero(fBM.getPixels(), fBM.getSafeSize()); sk_bzero(&fDraw, sizeof(fDraw)); fRasterClip.setRect(bounds); fDraw.fRC = &fRasterClip; fDraw.fClip = &fRasterClip.bwRgn(); fDraw.fMatrix = &fMatrix; fDraw.fBitmap = &fBM; return true; }
void GrSWMaskHelper::DrawToTargetWithPathMask(GrTexture* texture, GrDrawTarget* target, const GrIRect& rect) { GrDrawState* drawState = target->drawState(); GrDrawState::AutoDeviceCoordDraw adcd(drawState); if (!adcd.succeeded()) { return; } enum { // the SW path renderer shares this stage with glyph // rendering (kGlyphMaskStage in GrBatchedTextContext) kPathMaskStage = GrPaint::kTotalStages, }; GrAssert(!drawState->isStageEnabled(kPathMaskStage)); drawState->stage(kPathMaskStage)->reset(); drawState->createTextureEffect(kPathMaskStage, texture); SkScalar w = SkIntToScalar(rect.width()); SkScalar h = SkIntToScalar(rect.height()); GrRect maskRect = GrRect::MakeWH(w / texture->width(), h / texture->height()); const GrRect* srcRects[GrDrawState::kNumStages] = { NULL }; srcRects[kPathMaskStage] = &maskRect; GrRect dstRect = GrRect::MakeLTRB( SK_Scalar1 * rect.fLeft, SK_Scalar1 * rect.fTop, SK_Scalar1 * rect.fRight, SK_Scalar1 * rect.fBottom); target->drawRect(dstRect, NULL, srcRects, NULL); drawState->disableStage(kPathMaskStage); }
bool GrSWMaskHelper::init(const GrIRect& pathDevBounds, const GrPoint* translate) { fMatrix = fContext->getMatrix(); if (NULL != translate) { fMatrix.postTranslate(translate->fX, translate->fY); } fMatrix.postTranslate(-pathDevBounds.fLeft * SK_Scalar1, -pathDevBounds.fTop * SK_Scalar1); GrIRect bounds = GrIRect::MakeWH(pathDevBounds.width(), pathDevBounds.height()); fBM.setConfig(SkBitmap::kA8_Config, bounds.fRight, bounds.fBottom); if (!fBM.allocPixels()) { return false; } sk_bzero(fBM.getPixels(), fBM.getSafeSize()); sk_bzero(&fDraw, sizeof(fDraw)); fRasterClip.setRect(bounds); fDraw.fRC = &fRasterClip; fDraw.fClip = &fRasterClip.bwRgn(); fDraw.fMatrix = &fMatrix; fDraw.fBitmap = &fBM; return true; }
//////////////////////////////////////////////////////////////////////////////// // Shared preamble between gpu and SW-only AA clip mask creation paths. // Handles caching, determination of clip mask bound & allocation (if needed) // of the result texture // Returns true if there is no more work to be done (i.e., we got a cache hit) bool GrClipMaskManager::clipMaskPreamble(GrGpu* gpu, const GrClip& clipIn, GrTexture** result, GrIRect *resultBounds) { GrDrawState* origDrawState = gpu->drawState(); GrAssert(origDrawState->isClipState()); GrRenderTarget* rt = origDrawState->getRenderTarget(); GrAssert(NULL != rt); GrRect rtRect; rtRect.setLTRB(0, 0, GrIntToScalar(rt->width()), GrIntToScalar(rt->height())); // unlike the stencil path the alpha path is not bound to the size of the // render target - determine the minimum size required for the mask GrRect bounds; if (clipIn.hasConservativeBounds()) { bounds = clipIn.getConservativeBounds(); if (!bounds.intersect(rtRect)) { // the mask will be empty in this case GrAssert(false); bounds.setEmpty(); } } else { // still locked to the size of the render target bounds = rtRect; } GrIRect intBounds; bounds.roundOut(&intBounds); // need to outset a pixel since the standard bounding box computation // path doesn't leave any room for antialiasing (esp. w.r.t. rects) intBounds.outset(1, 1); // TODO: make sure we don't outset if bounds are still 0,0 @ min if (fAACache.canReuse(clipIn, intBounds.width(), intBounds.height())) { *result = fAACache.getLastMask(); fAACache.getLastBound(resultBounds); return true; } this->setupCache(clipIn, intBounds); *resultBounds = intBounds; return false; }
void GrClipMaskManager::setupCache(const SkClipStack& clipIn, const GrIRect& bounds) { // Since we are setting up the cache we know the last lookup was a miss // Free up the currently cached mask so it can be reused fAACache.reset(); GrTextureDesc desc; desc.fFlags = kRenderTarget_GrTextureFlagBit|kNoStencil_GrTextureFlagBit; desc.fWidth = bounds.width(); desc.fHeight = bounds.height(); desc.fConfig = kAlpha_8_GrPixelConfig; fAACache.acquireMask(clipIn, desc, bounds); }
// get a texture to act as a temporary buffer for AA clip boolean operations // TODO: given the expense of createTexture we may want to just cache this too void GrClipMaskManager::getTemp(const GrIRect& bounds, GrAutoScratchTexture* temp) { if (NULL != temp->texture()) { // we've already allocated the temp texture return; } GrTextureDesc desc; desc.fFlags = kRenderTarget_GrTextureFlagBit|kNoStencil_GrTextureFlagBit; desc.fWidth = bounds.width(); desc.fHeight = bounds.height(); desc.fConfig = kAlpha_8_GrPixelConfig; temp->set(this->getContext(), desc); }
void GrClipMaskManager::setupCache(const GrClip& clipIn, const GrIRect& bounds) { // Since we are setting up the cache we know the last lookup was a miss // Free up the currently cached mask so it can be reused fAACache.reset(); const GrTextureDesc desc = { kRenderTarget_GrTextureFlagBit|kNoStencil_GrTextureFlagBit, bounds.width(), bounds.height(), kAlpha_8_GrPixelConfig, 0 // samples }; fAACache.acquireMask(clipIn, desc, bounds); }
// get a texture to act as a temporary buffer for AA clip boolean operations // TODO: given the expense of createTexture we may want to just cache this too void GrClipMaskManager::getTemp(const GrIRect& bounds, GrAutoScratchTexture* temp) { if (NULL != temp->texture()) { // we've already allocated the temp texture return; } const GrTextureDesc desc = { kRenderTarget_GrTextureFlagBit|kNoStencil_GrTextureFlagBit, bounds.width(), bounds.height(), kAlpha_8_GrPixelConfig, 0 // samples }; temp->set(fAACache.getContext(), desc); }