size_t GrGLRenderTarget::gpuMemorySize() const { SkASSERT(kUnknown_GrPixelConfig != fDesc.fConfig); SkASSERT(!GrPixelConfigIsCompressed(fDesc.fConfig)); size_t colorBytes = GrBytesPerPixel(fDesc.fConfig); SkASSERT(colorBytes > 0); return fColorValuesPerPixel * fDesc.fWidth * fDesc.fHeight * colorBytes; }
void GrGLRenderTarget::init(const GrSurfaceDesc& desc, const IDDesc& idDesc) { fRTFBOID = idDesc.fRTFBOID; fTexFBOID = idDesc.fTexFBOID; fMSColorRenderbufferID = idDesc.fMSColorRenderbufferID; fIsWrapped = kWrapped_LifeCycle == idDesc.fLifeCycle; fViewport.fLeft = 0; fViewport.fBottom = 0; fViewport.fWidth = desc.fWidth; fViewport.fHeight = desc.fHeight; // We own one color value for each MSAA sample. int colorValuesPerPixel = SkTMax(1, fDesc.fSampleCnt); if (fTexFBOID != fRTFBOID) { // If we own the resolve buffer then that is one more sample per pixel. colorValuesPerPixel += 1; } else if (fTexFBOID != 0) { // For auto-resolving FBOs, the MSAA buffer is free. colorValuesPerPixel = 1; } SkASSERT(kUnknown_GrPixelConfig != fDesc.fConfig); SkASSERT(!GrPixelConfigIsCompressed(fDesc.fConfig)); size_t colorBytes = GrBytesPerPixel(fDesc.fConfig); SkASSERT(colorBytes > 0); fGpuMemorySize = colorValuesPerPixel * fDesc.fWidth * fDesc.fHeight * colorBytes; }
size_t GrGLRenderTarget::totalBytesPerSample() const { SkASSERT(kUnknown_GrPixelConfig != fDesc.fConfig); SkASSERT(!GrPixelConfigIsCompressed(fDesc.fConfig)); size_t colorBytes = GrBytesPerPixel(fDesc.fConfig); SkASSERT(colorBytes > 0); return fDesc.fWidth * fDesc.fHeight * colorBytes; }
// This tests that GrTextureStripAtlas flushes pending IO on the texture it acquires. DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrTextureStripAtlasFlush, reporter, ctxInfo) { GrContext* context = ctxInfo.grContext(); GrSurfaceDesc desc; desc.fWidth = 32; desc.fHeight = 32; desc.fConfig = kRGBA_8888_GrPixelConfig; GrTexture* texture = context->textureProvider()->createTexture(desc, SkBudgeted::kYes, nullptr, 0); GrSurfaceDesc targetDesc = desc; targetDesc.fFlags = kRenderTarget_GrSurfaceFlag; GrTexture* target = context->textureProvider()->createTexture(targetDesc, SkBudgeted::kYes, nullptr, 0); SkAutoTMalloc<uint32_t> pixels(desc.fWidth * desc.fHeight); memset(pixels.get(), 0xFF, sizeof(uint32_t) * desc.fWidth * desc.fHeight); texture->writePixels(0, 0, desc.fWidth, desc.fHeight, kRGBA_8888_GrPixelConfig, pixels.get()); // Add a pending read to the texture, and then make it available for reuse. context->copySurface(target, texture); texture->unref(); // Create an atlas with parameters that allow it to reuse the texture. GrTextureStripAtlas::Desc atlasDesc; atlasDesc.fContext = context; atlasDesc.fConfig = desc.fConfig; atlasDesc.fWidth = desc.fWidth; atlasDesc.fHeight = desc.fHeight; atlasDesc.fRowHeight = 1; GrTextureStripAtlas* atlas = GrTextureStripAtlas::GetAtlas(atlasDesc); // Write to the atlas' texture. SkImageInfo info = SkImageInfo::MakeN32(desc.fWidth, desc.fHeight, kPremul_SkAlphaType); size_t rowBytes = desc.fWidth * GrBytesPerPixel(desc.fConfig); SkBitmap bitmap; bitmap.allocPixels(info, rowBytes); memset(bitmap.getPixels(), 1, rowBytes * desc.fHeight); int row = atlas->lockRow(bitmap); if (!context->caps()->preferVRAMUseOverFlushes()) REPORTER_ASSERT(reporter, texture == atlas->getTexture()); // The atlas' use of its texture shouldn't change which pixels got copied to the target. SkAutoTMalloc<uint32_t> actualPixels(desc.fWidth * desc.fHeight); bool success = target->readPixels(0, 0, desc.fWidth, desc.fHeight, kRGBA_8888_GrPixelConfig, actualPixels.get()); REPORTER_ASSERT(reporter, success); REPORTER_ASSERT(reporter, !memcmp(pixels.get(), actualPixels.get(), sizeof(uint32_t) * desc.fWidth * desc.fHeight)); target->unref(); atlas->unlockRow(row); }
size_t GrRenderTarget::sizeInBytes() const { int colorBits; if (kUnknown_GrPixelConfig == fConfig) { colorBits = 32; // don't know, make a guess } else { colorBits = GrBytesPerPixel(fConfig); } uint64_t size = fWidth; size *= fHeight; size *= colorBits; size *= GrMax(1,fSampleCnt); return (size_t)(size / 8); }
size_t GrTexture::gpuMemorySize() const { size_t textureSize; if (GrPixelConfigIsCompressed(fDesc.fConfig)) { textureSize = GrCompressedFormatDataSize(fDesc.fConfig, fDesc.fWidth, fDesc.fHeight); } else { textureSize = (size_t) fDesc.fWidth * fDesc.fHeight * GrBytesPerPixel(fDesc.fConfig); } if (this->texturePriv().hasMipMaps()) { // We don't have to worry about the mipmaps being a different size than // we'd expect because we never change fDesc.fWidth/fHeight. textureSize *= 2; } return textureSize; }
size_t GrTexture::onGpuMemorySize() const { size_t textureSize; if (GrPixelConfigIsCompressed(fDesc.fConfig)) { textureSize = GrCompressedFormatDataSize(fDesc.fConfig, fDesc.fWidth, fDesc.fHeight); } else { textureSize = (size_t) fDesc.fWidth * fDesc.fHeight * GrBytesPerPixel(fDesc.fConfig); } if (this->texturePriv().hasMipMaps()) { // We don't have to worry about the mipmaps being a different size than // we'd expect because we never change fDesc.fWidth/fHeight. textureSize += textureSize/3; } SkASSERT(!SkToBool(fDesc.fFlags & kRenderTarget_GrSurfaceFlag)); SkASSERT(textureSize <= WorseCaseSize(fDesc)); return textureSize; }
GrBatchAtlas::BatchPlot::BatchPlot(int index, uint64_t genID, int offX, int offY, int width, int height, GrPixelConfig config) : fLastUpload(GrBatchDrawToken::AlreadyFlushedToken()) , fLastUse(GrBatchDrawToken::AlreadyFlushedToken()) , fIndex(index) , fGenID(genID) , fID(CreateId(fIndex, fGenID)) , fData(nullptr) , fWidth(width) , fHeight(height) , fX(offX) , fY(offY) , fRects(nullptr) , fOffset(SkIPoint16::Make(fX * fWidth, fY * fHeight)) , fConfig(config) , fBytesPerPixel(GrBytesPerPixel(config)) #ifdef SK_DEBUG , fDirty(false) #endif { fDirtyRect.setEmpty(); }
GrAtlas::GrAtlas(GrGpu* gpu, GrPixelConfig config, GrSurfaceFlags flags, const SkISize& backingTextureSize, int numPlotsX, int numPlotsY, bool batchUploads) { fGpu = SkRef(gpu); fPixelConfig = config; fFlags = flags; fBackingTextureSize = backingTextureSize; fNumPlotsX = numPlotsX; fNumPlotsY = numPlotsY; fBatchUploads = batchUploads; fTexture = NULL; int textureWidth = fBackingTextureSize.width(); int textureHeight = fBackingTextureSize.height(); int plotWidth = textureWidth / fNumPlotsX; int plotHeight = textureHeight / fNumPlotsY; SkASSERT(plotWidth * fNumPlotsX == textureWidth); SkASSERT(plotHeight * fNumPlotsY == textureHeight); // We currently do not support compressed atlases... SkASSERT(!GrPixelConfigIsCompressed(config)); // set up allocated plots size_t bpp = GrBytesPerPixel(fPixelConfig); fPlotArray = new GrPlot[(fNumPlotsX * fNumPlotsY)]; GrPlot* currPlot = fPlotArray; for (int y = numPlotsY-1; y >= 0; --y) { for (int x = numPlotsX-1; x >= 0; --x) { currPlot->init(this, y*numPlotsX+x, x, y, plotWidth, plotHeight, bpp, batchUploads); // build LRU list fPlotList.addToHead(currPlot); ++currPlot; } } }
GrAtlasMgr::GrAtlasMgr(GrGpu* gpu, GrPixelConfig config) { fGpu = gpu; fPixelConfig = config; gpu->ref(); fTexture = NULL; // set up allocated plots size_t bpp = GrBytesPerPixel(fPixelConfig); fPlotArray = SkNEW_ARRAY(GrPlot, (GR_PLOT_WIDTH*GR_PLOT_HEIGHT)); GrPlot* currPlot = fPlotArray; for (int y = GR_PLOT_HEIGHT-1; y >= 0; --y) { for (int x = GR_PLOT_WIDTH-1; x >= 0; --x) { currPlot->fAtlasMgr = this; currPlot->fOffset.set(x, y); currPlot->fBytesPerPixel = bpp; // build LRU list fPlotList.addToHead(currPlot); ++currPlot; } } }
bool GrGpu::readPixels(GrSurface* surface, int left, int top, int width, int height, GrPixelConfig config, void* buffer, size_t rowBytes) { this->handleDirtyContext(); // We cannot read pixels into a compressed buffer if (GrPixelConfigIsCompressed(config)) { return false; } size_t bpp = GrBytesPerPixel(config); if (!GrSurfacePriv::AdjustReadPixelParams(surface->width(), surface->height(), bpp, &left, &top, &width, &height, &buffer, &rowBytes)) { return false; } return this->onReadPixels(surface, left, top, width, height, config, buffer, rowBytes); }
void GrGpuGLShaders::buildProgram(GrPrimitiveType type, BlendOptFlags blendOpts, GrBlendCoeff dstCoeff, GrCustomStage** customStages) { ProgramDesc& desc = fCurrentProgram.fProgramDesc; const GrDrawState& drawState = this->getDrawState(); // This should already have been caught GrAssert(!(kSkipDraw_BlendOptFlag & blendOpts)); bool skipCoverage = SkToBool(blendOpts & kEmitTransBlack_BlendOptFlag); bool skipColor = SkToBool(blendOpts & (kEmitTransBlack_BlendOptFlag | kEmitCoverage_BlendOptFlag)); // The descriptor is used as a cache key. Thus when a field of the // descriptor will not affect program generation (because of the vertex // layout in use or other descriptor field settings) it should be set // to a canonical value to avoid duplicate programs with different keys. // Must initialize all fields or cache will have false negatives! desc.fVertexLayout = this->getVertexLayout(); desc.fEmitsPointSize = kPoints_PrimitiveType == type; bool requiresAttributeColors = !skipColor && SkToBool(desc.fVertexLayout & kColor_VertexLayoutBit); bool requiresAttributeCoverage = !skipCoverage && SkToBool(desc.fVertexLayout & kCoverage_VertexLayoutBit); // fColorInput/fCoverageInput records how colors are specified for the. // program. So we strip the bits from the layout to avoid false negatives // when searching for an existing program in the cache. desc.fVertexLayout &= ~(kColor_VertexLayoutBit | kCoverage_VertexLayoutBit); desc.fColorFilterXfermode = skipColor ? SkXfermode::kDst_Mode : drawState.getColorFilterMode(); desc.fColorMatrixEnabled = drawState.isStateFlagEnabled(GrDrawState::kColorMatrix_StateBit); // no reason to do edge aa or look at per-vertex coverage if coverage is // ignored if (skipCoverage) { desc.fVertexLayout &= ~(kEdge_VertexLayoutBit | kCoverage_VertexLayoutBit); } bool colorIsTransBlack = SkToBool(blendOpts & kEmitTransBlack_BlendOptFlag); bool colorIsSolidWhite = (blendOpts & kEmitCoverage_BlendOptFlag) || (!requiresAttributeColors && 0xffffffff == drawState.getColor()); if (GR_AGGRESSIVE_SHADER_OPTS && colorIsTransBlack) { desc.fColorInput = ProgramDesc::kTransBlack_ColorInput; } else if (GR_AGGRESSIVE_SHADER_OPTS && colorIsSolidWhite) { desc.fColorInput = ProgramDesc::kSolidWhite_ColorInput; } else if (GR_GL_NO_CONSTANT_ATTRIBUTES && !requiresAttributeColors) { desc.fColorInput = ProgramDesc::kUniform_ColorInput; } else { desc.fColorInput = ProgramDesc::kAttribute_ColorInput; } bool covIsSolidWhite = !requiresAttributeCoverage && 0xffffffff == drawState.getCoverage(); if (skipCoverage) { desc.fCoverageInput = ProgramDesc::kTransBlack_ColorInput; } else if (covIsSolidWhite) { desc.fCoverageInput = ProgramDesc::kSolidWhite_ColorInput; } else if (GR_GL_NO_CONSTANT_ATTRIBUTES && !requiresAttributeCoverage) { desc.fCoverageInput = ProgramDesc::kUniform_ColorInput; } else { desc.fCoverageInput = ProgramDesc::kAttribute_ColorInput; } int lastEnabledStage = -1; if (!skipCoverage && (desc.fVertexLayout & GrDrawTarget::kEdge_VertexLayoutBit)) { desc.fVertexEdgeType = drawState.getVertexEdgeType(); } else { // use canonical value when not set to avoid cache misses desc.fVertexEdgeType = GrDrawState::kHairLine_EdgeType; } for (int s = 0; s < GrDrawState::kNumStages; ++s) { StageDesc& stage = desc.fStages[s]; stage.fOptFlags = 0; stage.setEnabled(this->isStageEnabled(s)); bool skip = s < drawState.getFirstCoverageStage() ? skipColor : skipCoverage; if (!skip && stage.isEnabled()) { lastEnabledStage = s; const GrGLTexture* texture = static_cast<const GrGLTexture*>(drawState.getTexture(s)); GrAssert(NULL != texture); const GrSamplerState& sampler = drawState.getSampler(s); // we matrix to invert when orientation is TopDown, so make sure // we aren't in that case before flagging as identity. if (TextureMatrixIsIdentity(texture, sampler)) { stage.fOptFlags |= StageDesc::kIdentityMatrix_OptFlagBit; } else if (!sampler.getMatrix().hasPerspective()) { stage.fOptFlags |= StageDesc::kNoPerspective_OptFlagBit; } switch (sampler.getSampleMode()) { case GrSamplerState::kNormal_SampleMode: stage.fCoordMapping = StageDesc::kIdentity_CoordMapping; break; case GrSamplerState::kRadial_SampleMode: stage.fCoordMapping = StageDesc::kRadialGradient_CoordMapping; break; case GrSamplerState::kRadial2_SampleMode: if (sampler.radial2IsDegenerate()) { stage.fCoordMapping = StageDesc::kRadial2GradientDegenerate_CoordMapping; } else { stage.fCoordMapping = StageDesc::kRadial2Gradient_CoordMapping; } break; case GrSamplerState::kSweep_SampleMode: stage.fCoordMapping = StageDesc::kSweepGradient_CoordMapping; break; default: GrCrash("Unexpected sample mode!"); break; } switch (sampler.getFilter()) { // these both can use a regular texture2D() case GrSamplerState::kNearest_Filter: case GrSamplerState::kBilinear_Filter: stage.fFetchMode = StageDesc::kSingle_FetchMode; break; // performs 4 texture2D()s case GrSamplerState::k4x4Downsample_Filter: stage.fFetchMode = StageDesc::k2x2_FetchMode; break; // performs fKernelWidth texture2D()s case GrSamplerState::kConvolution_Filter: stage.fFetchMode = StageDesc::kConvolution_FetchMode; break; case GrSamplerState::kDilate_Filter: stage.fFetchMode = StageDesc::kDilate_FetchMode; break; case GrSamplerState::kErode_Filter: stage.fFetchMode = StageDesc::kErode_FetchMode; break; default: GrCrash("Unexpected filter!"); break; } if (sampler.hasTextureDomain()) { GrAssert(GrSamplerState::kClamp_WrapMode == sampler.getWrapX() && GrSamplerState::kClamp_WrapMode == sampler.getWrapY()); stage.fOptFlags |= StageDesc::kCustomTextureDomain_OptFlagBit; } stage.fInConfigFlags = 0; if (!this->glCaps().textureSwizzleSupport()) { if (GrPixelConfigIsAlphaOnly(texture->config())) { // if we don't have texture swizzle support then // the shader must smear the single channel after // reading the texture if (this->glCaps().textureRedSupport()) { // we can use R8 textures so use kSmearRed stage.fInConfigFlags |= StageDesc::kSmearRed_InConfigFlag; } else { // we can use A8 textures so use kSmearAlpha stage.fInConfigFlags |= StageDesc::kSmearAlpha_InConfigFlag; } } else if (sampler.swapsRAndB()) { stage.fInConfigFlags |= StageDesc::kSwapRAndB_InConfigFlag; } } if (GrPixelConfigIsUnpremultiplied(texture->config())) { // The shader generator assumes that color channels are bytes // when rounding. GrAssert(4 == GrBytesPerPixel(texture->config())); if (kUpOnWrite_DownOnRead_UnpremulConversion == fUnpremulConversion) { stage.fInConfigFlags |= StageDesc::kMulRGBByAlpha_RoundDown_InConfigFlag; } else { stage.fInConfigFlags |= StageDesc::kMulRGBByAlpha_RoundUp_InConfigFlag; } } if (sampler.getFilter() == GrSamplerState::kDilate_Filter || sampler.getFilter() == GrSamplerState::kErode_Filter) { stage.fKernelWidth = sampler.getKernelWidth(); } else { stage.fKernelWidth = 0; } setup_custom_stage(&stage, sampler, customStages, &fCurrentProgram, s); } else { stage.fOptFlags = 0; stage.fCoordMapping = (StageDesc::CoordMapping) 0; stage.fInConfigFlags = 0; stage.fFetchMode = (StageDesc::FetchMode) 0; stage.fKernelWidth = 0; stage.fCustomStageKey = 0; customStages[s] = NULL; } } if (GrPixelConfigIsUnpremultiplied(drawState.getRenderTarget()->config())) { // The shader generator assumes that color channels are bytes // when rounding. GrAssert(4 == GrBytesPerPixel(drawState.getRenderTarget()->config())); if (kUpOnWrite_DownOnRead_UnpremulConversion == fUnpremulConversion) { desc.fOutputConfig = ProgramDesc::kUnpremultiplied_RoundUp_OutputConfig; } else { desc.fOutputConfig = ProgramDesc::kUnpremultiplied_RoundDown_OutputConfig; } } else { desc.fOutputConfig = ProgramDesc::kPremultiplied_OutputConfig; } desc.fDualSrcOutput = ProgramDesc::kNone_DualSrcOutput; // currently the experimental GS will only work with triangle prims // (and it doesn't do anything other than pass through values from // the VS to the FS anyway). #if 0 && GR_GL_EXPERIMENTAL_GS desc.fExperimentalGS = this->getCaps().fGeometryShaderSupport; #endif // we want to avoid generating programs with different "first cov stage" // values when they would compute the same result. // We set field in the desc to kNumStages when either there are no // coverage stages or the distinction between coverage and color is // immaterial. int firstCoverageStage = GrDrawState::kNumStages; desc.fFirstCoverageStage = GrDrawState::kNumStages; bool hasCoverage = drawState.getFirstCoverageStage() <= lastEnabledStage; if (hasCoverage) { firstCoverageStage = drawState.getFirstCoverageStage(); } // other coverage inputs if (!hasCoverage) { hasCoverage = requiresAttributeCoverage || (desc.fVertexLayout & GrDrawTarget::kEdge_VertexLayoutBit); } if (hasCoverage) { // color filter is applied between color/coverage computation if (SkXfermode::kDst_Mode != desc.fColorFilterXfermode) { desc.fFirstCoverageStage = firstCoverageStage; } if (this->getCaps().fDualSourceBlendingSupport && !(blendOpts & (kEmitCoverage_BlendOptFlag | kCoverageAsAlpha_BlendOptFlag))) { if (kZero_BlendCoeff == dstCoeff) { // write the coverage value to second color desc.fDualSrcOutput = ProgramDesc::kCoverage_DualSrcOutput; desc.fFirstCoverageStage = firstCoverageStage; } else if (kSA_BlendCoeff == dstCoeff) { // SA dst coeff becomes 1-(1-SA)*coverage when dst is partially // cover desc.fDualSrcOutput = ProgramDesc::kCoverageISA_DualSrcOutput; desc.fFirstCoverageStage = firstCoverageStage; } else if (kSC_BlendCoeff == dstCoeff) { // SA dst coeff becomes 1-(1-SA)*coverage when dst is partially // cover desc.fDualSrcOutput = ProgramDesc::kCoverageISC_DualSrcOutput; desc.fFirstCoverageStage = firstCoverageStage; } } } }