bool GrAtlasTextBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) { GrAtlasTextBatch* that = t->cast<GrAtlasTextBatch>(); if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(), that->bounds(), caps)) { return false; } if (fMaskType != that->fMaskType) { return false; } if (!this->usesDistanceFields()) { if (kColorBitmapMask_MaskType == fMaskType && this->color() != that->color()) { return false; } if (this->usesLocalCoords() && !this->viewMatrix().cheapEqualTo(that->viewMatrix())) { return false; } } else { if (!this->viewMatrix().cheapEqualTo(that->viewMatrix())) { return false; } if (fFilteredColor != that->fFilteredColor) { return false; } if (fUseBGR != that->fUseBGR) { return false; } } fBatch.fNumGlyphs += that->numGlyphs(); // Reallocate space for geo data if necessary and then import that's geo data. int newGeoCount = that->fGeoCount + fGeoCount; // We assume (and here enforce) that the allocation size is the smallest power of two that // is greater than or equal to the number of geometries (and at least // kMinGeometryAllocated). int newAllocSize = GrNextPow2(newGeoCount); int currAllocSize = SkTMax<int>(kMinGeometryAllocated, GrNextPow2(fGeoCount)); if (newGeoCount > currAllocSize) { fGeoData.realloc(newAllocSize); } memcpy(&fGeoData[fGeoCount], that->fGeoData.get(), that->fGeoCount * sizeof(Geometry)); // We steal the ref on the blobs from the other TextBatch and set its count to 0 so that // it doesn't try to unref them. #ifdef SK_DEBUG for (int i = 0; i < that->fGeoCount; ++i) { that->fGeoData.get()[i].fBlob = (Blob*)0x1; } #endif that->fGeoCount = 0; fGeoCount = newGeoCount; this->joinBounds(*that); return true; }
uint32_t GrPathUtils::quadraticPointCount(const SkPoint points[], SkScalar tol) { if (tol < gMinCurveTol) { tol = gMinCurveTol; } SkASSERT(tol > 0); SkScalar d = points[1].distanceToLineSegmentBetween(points[0], points[2]); if (!SkScalarIsFinite(d)) { return MAX_POINTS_PER_CURVE; } else if (d <= tol) { return 1; } else { // Each time we subdivide, d should be cut in 4. So we need to // subdivide x = log4(d/tol) times. x subdivisions creates 2^(x) // points. // 2^(log4(x)) = sqrt(x); SkScalar divSqrt = SkScalarSqrt(d / tol); if (((SkScalar)SK_MaxS32) <= divSqrt) { return MAX_POINTS_PER_CURVE; } else { int temp = SkScalarCeilToInt(divSqrt); int pow2 = GrNextPow2(temp); // Because of NaNs & INFs we can wind up with a degenerate temp // such that pow2 comes out negative. Also, our point generator // will always output at least one pt. if (pow2 < 1) { pow2 = 1; } return SkTMin(pow2, MAX_POINTS_PER_CURVE); } } }
uint32_t GrPathUtils::cubicPointCount(const GrPoint points[], GrScalar tol) { if (tol < gMinCurveTol) { tol == gMinCurveTol; } GrAssert(tol > 0); GrScalar d = GrMax( points[1].distanceToLineSegmentBetweenSqd(points[0], points[3]), points[2].distanceToLineSegmentBetweenSqd(points[0], points[3])); d = SkScalarSqrt(d); if (d <= tol) { return 1; } else { int temp = SkScalarCeil(SkScalarSqrt(SkScalarDiv(d, tol))); int pow2 = GrNextPow2(temp); // Because of NaNs & INFs we can wind up with a degenerate temp // such that pow2 comes out negative. Also, our point generator // will always output at least one pt. if (pow2 < 1) { pow2 = 1; } return GrMin(pow2, MAX_POINTS_PER_CURVE); } }
GrIndexBuffer* GrResourceProvider::getIndexBuffer(size_t size, bool dynamic, bool calledDuringFlush) { if (this->isAbandoned()) { return NULL; } if (dynamic) { // bin by pow2 with a reasonable min static const uint32_t MIN_SIZE = 1 << 12; size = SkTMax(MIN_SIZE, GrNextPow2(SkToUInt(size))); GrScratchKey key; GrIndexBuffer::ComputeScratchKey(size, dynamic, &key); uint32_t scratchFlags = 0; if (calledDuringFlush) { scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag; } else { scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag; } GrGpuResource* resource = this->cache()->findAndRefScratchResource(key, scratchFlags); if (resource) { return static_cast<GrIndexBuffer*>(resource); } } return this->gpu()->createIndexBuffer(size, dynamic); }
GrIndexBuffer* GrResourceProvider::createIndexBuffer(size_t size, BufferUsage usage, uint32_t flags) { if (this->isAbandoned()) { return nullptr; } bool noPendingIO = SkToBool(flags & kNoPendingIO_Flag); bool dynamic = kDynamic_BufferUsage == usage; if (dynamic) { // bin by pow2 with a reasonable min static const uint32_t MIN_SIZE = 1 << 12; size = SkTMax(MIN_SIZE, GrNextPow2(SkToUInt(size))); GrScratchKey key; GrIndexBuffer::ComputeScratchKey(size, true, &key); uint32_t scratchFlags = 0; if (noPendingIO) { scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag; } else { scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag; } GrGpuResource* resource = this->cache()->findAndRefScratchResource(key, size, scratchFlags); if (resource) { return static_cast<GrIndexBuffer*>(resource); } } return this->gpu()->createIndexBuffer(size, dynamic); }
GrBuffer* GrResourceProvider::createBuffer(size_t size, GrBufferType intendedType, GrAccessPattern accessPattern, uint32_t flags, const void* data) { if (this->isAbandoned()) { return nullptr; } if (kDynamic_GrAccessPattern != accessPattern) { return this->gpu()->createBuffer(size, intendedType, accessPattern, data); } // bin by pow2 with a reasonable min static const uint32_t MIN_SIZE = 1 << 12; size_t allocSize = SkTMax(MIN_SIZE, GrNextPow2(SkToUInt(size))); GrScratchKey key; GrBuffer::ComputeScratchKeyForDynamicBuffer(allocSize, intendedType, &key); uint32_t scratchFlags = 0; if (flags & kNoPendingIO_Flag) { scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag; } else { scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag; } GrBuffer* buffer = static_cast<GrBuffer*>( this->cache()->findAndRefScratchResource(key, allocSize, scratchFlags)); if (!buffer) { buffer = this->gpu()->createBuffer(allocSize, intendedType, kDynamic_GrAccessPattern); if (!buffer) { return nullptr; } } if (data) { buffer->updateData(data, size); } return buffer; }
GrBuffer* GrResourceProvider::createBuffer(GrBufferType type, size_t size, GrAccessPattern accessPattern, uint32_t flags) { if (this->isAbandoned()) { return nullptr; } if (kDynamic_GrAccessPattern == accessPattern) { // bin by pow2 with a reasonable min static const uint32_t MIN_SIZE = 1 << 12; size = SkTMax(MIN_SIZE, GrNextPow2(SkToUInt(size))); GrScratchKey key; GrBuffer::ComputeScratchKeyForDynamicBuffer(type, size, &key); uint32_t scratchFlags = 0; if (flags & kNoPendingIO_Flag) { scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag; } else { scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag; } GrGpuResource* resource = this->cache()->findAndRefScratchResource(key, size, scratchFlags); if (resource) { return static_cast<GrBuffer*>(resource); } } return this->gpu()->createBuffer(type, size, accessPattern); }
uint32_t GrPathUtils::cubicPointCount(const SkPoint points[], SkScalar tol) { if (tol < gMinCurveTol) { tol = gMinCurveTol; } SkASSERT(tol > 0); SkScalar d = SkTMax( points[1].distanceToLineSegmentBetweenSqd(points[0], points[3]), points[2].distanceToLineSegmentBetweenSqd(points[0], points[3])); d = SkScalarSqrt(d); if (!SkScalarIsFinite(d)) { return MAX_POINTS_PER_CURVE; } else if (d <= tol) { return 1; } else { SkScalar divSqrt = SkScalarSqrt(d / tol); if (((SkScalar)SK_MaxS32) <= divSqrt) { return MAX_POINTS_PER_CURVE; } else { int temp = SkScalarCeilToInt(SkScalarSqrt(d / tol)); int pow2 = GrNextPow2(temp); // Because of NaNs & INFs we can wind up with a degenerate temp // such that pow2 comes out negative. Also, our point generator // will always output at least one pt. if (pow2 < 1) { pow2 = 1; } return SkTMin(pow2, MAX_POINTS_PER_CURVE); } } }
uint32_t GrPathUtils::quadraticPointCount(const GrPoint points[], GrScalar tol) { if (tol < gMinCurveTol) { tol == gMinCurveTol; } GrAssert(tol > 0); GrScalar d = points[1].distanceToLineSegmentBetween(points[0], points[2]); if (d <= tol) { return 1; } else { // Each time we subdivide, d should be cut in 4. So we need to // subdivide x = log4(d/tol) times. x subdivisions creates 2^(x) // points. // 2^(log4(x)) = sqrt(x); int temp = SkScalarCeil(SkScalarSqrt(SkScalarDiv(d, tol))); int pow2 = GrNextPow2(temp); // Because of NaNs & INFs we can wind up with a degenerate temp // such that pow2 comes out negative. Also, our point generator // will always output at least one pt. if (pow2 < 1) { pow2 = 1; } return GrMin(pow2, MAX_POINTS_PER_CURVE); } }
GrTexture* GrTextureProvider::internalRefScratchTexture(const GrSurfaceDesc& inDesc, uint32_t flags) { SkASSERT(!this->isAbandoned()); SkASSERT(!GrPixelConfigIsCompressed(inDesc.fConfig)); SkTCopyOnFirstWrite<GrSurfaceDesc> desc(inDesc); if (fGpu->caps()->reuseScratchTextures() || (desc->fFlags & kRenderTarget_GrSurfaceFlag)) { if (!(kExact_ScratchTextureFlag & flags)) { // bin by pow2 with a reasonable min const int minSize = SkTMin(16, fGpu->caps()->minTextureSize()); GrSurfaceDesc* wdesc = desc.writable(); wdesc->fWidth = SkTMax(minSize, GrNextPow2(desc->fWidth)); wdesc->fHeight = SkTMax(minSize, GrNextPow2(desc->fHeight)); } GrScratchKey key; GrTexturePriv::ComputeScratchKey(*desc, &key); uint32_t scratchFlags = 0; if (kNoPendingIO_ScratchTextureFlag & flags) { scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag; } else if (!(desc->fFlags & kRenderTarget_GrSurfaceFlag)) { // If it is not a render target then it will most likely be populated by // writePixels() which will trigger a flush if the texture has pending IO. scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag; } GrGpuResource* resource = fCache->findAndRefScratchResource(key, scratchFlags); if (resource) { GrSurface* surface = static_cast<GrSurface*>(resource); GrRenderTarget* rt = surface->asRenderTarget(); if (rt && fGpu->caps()->discardRenderTargetSupport()) { rt->discard(); } return surface->asTexture(); } } if (!(kNoCreate_ScratchTextureFlag & flags)) { return fGpu->createTexture(*desc, true, NULL, 0); } return NULL; }
GrCCPRAtlas::GrCCPRAtlas(const GrCaps& caps, int minWidth, int minHeight) : fMaxAtlasSize(caps.maxRenderTargetSize()) , fDrawBounds{0, 0} { SkASSERT(fMaxAtlasSize <= caps.maxTextureSize()); SkASSERT(SkTMax(minWidth, minHeight) <= fMaxAtlasSize); int initialSize = GrNextPow2(SkTMax(minWidth, minHeight)); initialSize = SkTMax(int(kMinSize), initialSize); initialSize = SkTMin(initialSize, fMaxAtlasSize); fHeight = fWidth = initialSize; fTopNode = skstd::make_unique<Node>(nullptr, 0, 0, initialSize, initialSize); }
static uint32_t cubic_point_count(const GrPoint points[], GrScalar tol) { GrScalar d = GrMax(points[1].distanceToLineSegmentBetweenSqd(points[0], points[3]), points[2].distanceToLineSegmentBetweenSqd(points[0], points[3])); d = sqrtf(d); if (d < tol) { return 1; } else { d = ceilf(sqrtf(d/tol)); return GrMin(GrNextPow2((uint32_t)d), MAX_POINTS_PER_CURVE); } }
bool GrGpu::IsACopyNeededForTextureParams(const GrCaps* caps, GrTextureProxy* texProxy, int width, int height, const GrSamplerState& textureParams, GrTextureProducer::CopyParams* copyParams, SkScalar scaleAdjust[2]) { if (texProxy) { // If the texture format itself doesn't support repeat wrap mode or mipmapping (and // those capabilities are required) force a copy. if ((textureParams.isRepeated() && texProxy->texPriv().isClampOnly()) || (GrSamplerState::Filter::kMipMap == textureParams.filter() && texProxy->texPriv().doesNotSupportMipMaps())) { copyParams->fFilter = GrSamplerState::Filter::kNearest; copyParams->fWidth = texProxy->width(); copyParams->fHeight = texProxy->height(); return true; } } if (textureParams.isRepeated() && !caps->npotTextureTileSupport() && (!SkIsPow2(width) || !SkIsPow2(height))) { SkASSERT(scaleAdjust); copyParams->fWidth = GrNextPow2(width); copyParams->fHeight = GrNextPow2(height); SkASSERT(scaleAdjust); scaleAdjust[0] = ((SkScalar) copyParams->fWidth) / width; scaleAdjust[1] = ((SkScalar) copyParams->fHeight) / height; switch (textureParams.filter()) { case GrSamplerState::Filter::kNearest: copyParams->fFilter = GrSamplerState::Filter::kNearest; break; case GrSamplerState::Filter::kBilerp: case GrSamplerState::Filter::kMipMap: // We are only ever scaling up so no reason to ever indicate kMipMap. copyParams->fFilter = GrSamplerState::Filter::kBilerp; break; } return true; } return false; }
bool GrGpu::makeCopyForTextureParams(int width, int height, const GrTextureParams& textureParams, GrTextureProducer::CopyParams* copyParams) const { const GrCaps& caps = *this->caps(); if (textureParams.isTiled() && !caps.npotTextureTileSupport() && (!SkIsPow2(width) || !SkIsPow2(height))) { copyParams->fWidth = GrNextPow2(width); copyParams->fHeight = GrNextPow2(height); switch (textureParams.filterMode()) { case GrTextureParams::kNone_FilterMode: copyParams->fFilter = GrTextureParams::kNone_FilterMode; break; case GrTextureParams::kBilerp_FilterMode: case GrTextureParams::kMipMap_FilterMode: // We are only ever scaling up so no reason to ever indicate kMipMap. copyParams->fFilter = GrTextureParams::kBilerp_FilterMode; break; } return true; } return false; }
static uint32_t quadratic_point_count(const GrPoint points[], GrScalar tol) { GrScalar d = points[1].distanceToLineSegmentBetween(points[0], points[2]); if (d < tol) { return 1; } else { // Each time we subdivide, d should be cut in 4. So we need to // subdivide x = log4(d/tol) times. x subdivisions creates 2^(x) // points. // 2^(log4(x)) = sqrt(x); d = ceilf(sqrtf(d/tol)); return GrMin(GrNextPow2((uint32_t)d), MAX_POINTS_PER_CURVE); } }
static void get_stretch(const GrContext* ctx, int width, int height, const GrTextureParams* params, SkGrStretch* stretch) { stretch->fType = SkGrStretch::kNone_Type; bool doStretch = false; if (params && params->isTiled() && !ctx->caps()->npotTextureTileSupport() && (!SkIsPow2(width) || !SkIsPow2(height))) { doStretch = true; stretch->fWidth = GrNextPow2(SkTMax(width, ctx->caps()->minTextureSize())); stretch->fHeight = GrNextPow2(SkTMax(height, ctx->caps()->minTextureSize())); } else if (width < ctx->caps()->minTextureSize() || height < ctx->caps()->minTextureSize()) { // The small texture issues appear to be with tiling. Hence it seems ok to scale them // up using the GPU. If issues persist we may need to CPU-stretch. doStretch = true; stretch->fWidth = SkTMax(width, ctx->caps()->minTextureSize()); stretch->fHeight = SkTMax(height, ctx->caps()->minTextureSize()); } if (doStretch) { if (params) { switch(params->filterMode()) { case GrTextureParams::kNone_FilterMode: stretch->fType = SkGrStretch::kNearest_Type; break; case GrTextureParams::kBilerp_FilterMode: case GrTextureParams::kMipMap_FilterMode: stretch->fType = SkGrStretch::kBilerp_Type; break; } } else { stretch->fType = SkGrStretch::kBilerp_Type; } } else { stretch->fWidth = -1; stretch->fHeight = -1; stretch->fType = SkGrStretch::kNone_Type; } }
RectanizerView() : fCurRandRect(0) { for (int i = 0; i < 3; ++i) { fRects[i].setReserve(kNumRandRects); } fRectLocations.setReserve(kNumRandRects); SkRandom random; for (int i = 0; i < kNumRandRects; ++i) { *fRects[0].append() = SkISize::Make(random.nextRangeU(kMinRectSize, kMaxRectSize), random.nextRangeU(kMinRectSize, kMaxRectSize)); *fRects[1].append() = SkISize::Make( GrNextPow2(random.nextRangeU(kMinRectSize, kMaxRectSize)), GrNextPow2(random.nextRangeU(kMinRectSize, kMaxRectSize))); *fRects[2].append() = SkISize::Make(128, 128); *fRectLocations.append() = SkIPoint16::Make(0, 0); } fCurRects = &fRects[0]; fRectanizers[0] = new GrRectanizerPow2(kWidth, kHeight); fRectanizers[1] = new GrRectanizerSkyline(kWidth, kHeight); fCurRectanizer = fRectanizers[0]; }
bool GrRectanizerPow2::addRect(int width, int height, GrIPoint16* loc) { if ((unsigned)width > (unsigned)this->width() || (unsigned)height > (unsigned)this->height()) { return false; } int32_t area = width * height; /* We use bsearch, but there may be more than one row with the same height, so we actually search for height-1, which can only be a pow2 itself if height == 2. Thus we set a minimum height. */ height = GrNextPow2(height); if (height < MIN_HEIGHT_POW2) { height = MIN_HEIGHT_POW2; } Row* row = &fRows[HeightToRowIndex(height)]; GrAssert(row->fRowHeight == 0 || row->fRowHeight == height); if (0 == row->fRowHeight) { if (!this->canAddStrip(height)) { return false; } this->initRow(row, height); } else { if (!row->canAddWidth(width, this->width())) { if (!this->canAddStrip(height)) { return false; } // that row is now "full", so retarget our Row record for // another one this->initRow(row, height); } } GrAssert(row->fRowHeight == height); GrAssert(row->canAddWidth(width, this->width())); *loc = row->fLoc; row->fLoc.fX += width; GrAssert(row->fLoc.fX <= this->width()); GrAssert(row->fLoc.fY <= this->height()); GrAssert(fNextStripY <= this->height()); fAreaSoFar += area; return true; }