SkRect Text::onRevalidate(InvalidationController*, const SkMatrix&) { // TODO: we could potentially track invals which don't require rebuilding the blob. SkPaint font; font.setFlags(fFlags); font.setTypeface(fTypeface); font.setTextSize(fSize); font.setTextScaleX(fScaleX); font.setTextSkewX(fSkewX); font.setTextAlign(fAlign); font.setHinting(fHinting); // First, convert to glyphIDs. font.setTextEncoding(SkPaint::kUTF8_TextEncoding); SkSTArray<256, SkGlyphID, true> glyphs; glyphs.reset(font.textToGlyphs(fText.c_str(), fText.size(), nullptr)); SkAssertResult(font.textToGlyphs(fText.c_str(), fText.size(), glyphs.begin()) == glyphs.count()); font.setTextEncoding(SkPaint::kGlyphID_TextEncoding); // Next, build the cached blob. SkTextBlobBuilder builder; const auto& buf = builder.allocRun(font, glyphs.count(), 0, 0, nullptr); if (!buf.glyphs) { fBlob.reset(); return SkRect::MakeEmpty(); } memcpy(buf.glyphs, glyphs.begin(), glyphs.count() * sizeof(SkGlyphID)); fBlob = builder.make(); return fBlob ? fBlob->bounds().makeOffset(fPosition.x(), fPosition.y()) : SkRect::MakeEmpty(); }
const GrStencilAndCoverTextContext::TextBlob& GrStencilAndCoverTextContext::findOrCreateTextBlob(const SkTextBlob* skBlob, const SkPaint& skPaint) { // The font-related parameters are baked into the text blob and will override this skPaint, so // the only remaining properties that can affect a TextBlob are the ones related to stroke. if (SkPaint::kFill_Style == skPaint.getStyle()) { // Fast path. if (TextBlob** found = fBlobIdCache.find(skBlob->uniqueID())) { fLRUList.remove(*found); fLRUList.addToTail(*found); return **found; } TextBlob* blob = new TextBlob(skBlob->uniqueID(), skBlob, skPaint, fContext, &fSurfaceProps); this->purgeToFit(*blob); fBlobIdCache.set(skBlob->uniqueID(), blob); fLRUList.addToTail(blob); fCacheSize += blob->cpuMemorySize(); return *blob; } else { GrStrokeInfo stroke(skPaint); SkSTArray<4, uint32_t, true> key; key.reset(1 + stroke.computeUniqueKeyFragmentData32Cnt()); key[0] = skBlob->uniqueID(); stroke.asUniqueKeyFragment(&key[1]); if (TextBlob** found = fBlobKeyCache.find(key)) { fLRUList.remove(*found); fLRUList.addToTail(*found); return **found; } TextBlob* blob = new TextBlob(key, skBlob, skPaint, fContext, &fSurfaceProps); this->purgeToFit(*blob); fBlobKeyCache.set(blob); fLRUList.addToTail(blob); fCacheSize += blob->cpuMemorySize(); return *blob; } }
static bool get_geometry(const SkPath& path, const SkMatrix& m, PLSVertices& triVertices, PLSVertices& quadVertices, GrResourceProvider* resourceProvider, SkRect bounds) { SkScalar screenSpaceTol = GrPathUtils::kDefaultTolerance; SkScalar tol = GrPathUtils::scaleToleranceToSrc(screenSpaceTol, m, bounds); int contourCnt; int maxPts = GrPathUtils::worstCasePointCount(path, &contourCnt, tol); if (maxPts <= 0) { return 0; } SkPath linesOnlyPath; linesOnlyPath.setFillType(path.getFillType()); SkSTArray<15, SkPoint, true> quadPoints; SkPath::Iter iter(path, true); bool done = false; while (!done) { SkPoint pts[4]; SkPath::Verb verb = iter.next(pts); switch (verb) { case SkPath::kMove_Verb: SkASSERT(quadPoints.count() % 3 == 0); for (int i = 0; i < quadPoints.count(); i += 3) { add_quad(&quadPoints[i], quadVertices); } quadPoints.reset(); m.mapPoints(&pts[0], 1); linesOnlyPath.moveTo(pts[0]); break; case SkPath::kLine_Verb: m.mapPoints(&pts[1], 1); linesOnlyPath.lineTo(pts[1]); break; case SkPath::kQuad_Verb: m.mapPoints(pts, 3); linesOnlyPath.lineTo(pts[2]); quadPoints.push_back(pts[0]); quadPoints.push_back(pts[1]); quadPoints.push_back(pts[2]); break; case SkPath::kCubic_Verb: { m.mapPoints(pts, 4); SkSTArray<15, SkPoint, true> quads; GrPathUtils::convertCubicToQuads(pts, kCubicTolerance, &quads); int count = quads.count(); for (int q = 0; q < count; q += 3) { linesOnlyPath.lineTo(quads[q + 2]); quadPoints.push_back(quads[q]); quadPoints.push_back(quads[q + 1]); quadPoints.push_back(quads[q + 2]); } break; } case SkPath::kConic_Verb: { m.mapPoints(pts, 3); SkScalar weight = iter.conicWeight(); SkAutoConicToQuads converter; const SkPoint* quads = converter.computeQuads(pts, weight, kConicTolerance); int count = converter.countQuads(); for (int i = 0; i < count; ++i) { linesOnlyPath.lineTo(quads[2 * i + 2]); quadPoints.push_back(quads[2 * i]); quadPoints.push_back(quads[2 * i + 1]); quadPoints.push_back(quads[2 * i + 2]); } break; } case SkPath::kClose_Verb: linesOnlyPath.close(); break; case SkPath::kDone_Verb: done = true; break; default: SkASSERT(false); } } SkASSERT(quadPoints.count() % 3 == 0); for (int i = 0; i < quadPoints.count(); i += 3) { add_quad(&quadPoints[i], quadVertices); } static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain(); GrUniqueKey key; GrUniqueKey::Builder builder(&key, kDomain, 2); builder[0] = path.getGenerationID(); builder[1] = path.getFillType(); builder.finish(); GrTessellator::WindingVertex* windingVertices; int triVertexCount = GrTessellator::PathToVertices(linesOnlyPath, 0, bounds, &windingVertices); if (triVertexCount > 0) { for (int i = 0; i < triVertexCount; i += 3) { SkPoint p1 = windingVertices[i].fPos; SkPoint p2 = windingVertices[i + 1].fPos; SkPoint p3 = windingVertices[i + 2].fPos; int winding = windingVertices[i].fWinding; SkASSERT(windingVertices[i + 1].fWinding == winding); SkASSERT(windingVertices[i + 2].fWinding == winding); SkScalar cross = (p2 - p1).cross(p3 - p1); SkPoint bloated[3] = { p1, p2, p3 }; if (cross < 0.0f) { SkTSwap(p1, p3); } if (bloat_tri(bloated)) { triVertices.push_back({ bloated[0], p1, p2, p3, winding }); triVertices.push_back({ bloated[1], p1, p2, p3, winding }); triVertices.push_back({ bloated[2], p1, p2, p3, winding }); } else { SkScalar minX = SkTMin(p1.fX, SkTMin(p2.fX, p3.fX)) - 1.0f; SkScalar minY = SkTMin(p1.fY, SkTMin(p2.fY, p3.fY)) - 1.0f; SkScalar maxX = SkTMax(p1.fX, SkTMax(p2.fX, p3.fX)) + 1.0f; SkScalar maxY = SkTMax(p1.fY, SkTMax(p2.fY, p3.fY)) + 1.0f; triVertices.push_back({ { minX, minY }, p1, p2, p3, winding }); triVertices.push_back({ { maxX, minY }, p1, p2, p3, winding }); triVertices.push_back({ { minX, maxY }, p1, p2, p3, winding }); triVertices.push_back({ { maxX, minY }, p1, p2, p3, winding }); triVertices.push_back({ { maxX, maxY }, p1, p2, p3, winding }); triVertices.push_back({ { minX, maxY }, p1, p2, p3, winding }); } } delete[] windingVertices; } return triVertexCount > 0 || quadVertices.count() > 0; }
// MDB TODO: make use of the 'proxy' parameter. GrSemaphoresSubmitted GrDrawingManager::internalFlush(GrSurfaceProxy*, GrResourceCache::FlushType type, int numSemaphores, GrBackendSemaphore backendSemaphores[]) { GR_CREATE_TRACE_MARKER_CONTEXT("GrDrawingManager", "internalFlush", fContext); if (fFlushing || this->wasAbandoned()) { return GrSemaphoresSubmitted::kNo; } GrGpu* gpu = fContext->contextPriv().getGpu(); if (!gpu) { return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording } fFlushing = true; for (int i = 0; i < fOpLists.count(); ++i) { // Semi-usually the GrOpLists are already closed at this point, but sometimes Ganesh // needs to flush mid-draw. In that case, the SkGpuDevice's GrOpLists won't be closed // but need to be flushed anyway. Closing such GrOpLists here will mean new // GrOpLists will be created to replace them if the SkGpuDevice(s) write to them again. fOpLists[i]->makeClosed(*fContext->caps()); } #ifdef SK_DEBUG // This block checks for any unnecessary splits in the opLists. If two sequential opLists // share the same backing GrSurfaceProxy it means the opList was artificially split. if (fOpLists.count()) { GrRenderTargetOpList* prevOpList = fOpLists[0]->asRenderTargetOpList(); for (int i = 1; i < fOpLists.count(); ++i) { GrRenderTargetOpList* curOpList = fOpLists[i]->asRenderTargetOpList(); if (prevOpList && curOpList) { SkASSERT(prevOpList->fTarget.get() != curOpList->fTarget.get()); } prevOpList = curOpList; } } #endif if (fSortRenderTargets) { SkDEBUGCODE(bool result =) SkTTopoSort<GrOpList, GrOpList::TopoSortTraits>(&fOpLists); SkASSERT(result); } GrOpFlushState flushState(gpu, fContext->contextPriv().resourceProvider(), &fTokenTracker); GrOnFlushResourceProvider onFlushProvider(this); // TODO: AFAICT the only reason fFlushState is on GrDrawingManager rather than on the // stack here is to preserve the flush tokens. // Prepare any onFlush op lists (e.g. atlases). if (!fOnFlushCBObjects.empty()) { fFlushingOpListIDs.reset(fOpLists.count()); for (int i = 0; i < fOpLists.count(); ++i) { fFlushingOpListIDs[i] = fOpLists[i]->uniqueID(); } SkSTArray<4, sk_sp<GrRenderTargetContext>> renderTargetContexts; for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) { onFlushCBObject->preFlush(&onFlushProvider, fFlushingOpListIDs.begin(), fFlushingOpListIDs.count(), &renderTargetContexts); for (const sk_sp<GrRenderTargetContext>& rtc : renderTargetContexts) { sk_sp<GrRenderTargetOpList> onFlushOpList = sk_ref_sp(rtc->getRTOpList()); if (!onFlushOpList) { continue; // Odd - but not a big deal } #ifdef SK_DEBUG // OnFlush callbacks are already invoked during flush, and are therefore expected to // handle resource allocation & usage on their own. (No deferred or lazy proxies!) onFlushOpList->visitProxies_debugOnly([](GrSurfaceProxy* p) { SkASSERT(!p->asTextureProxy() || !p->asTextureProxy()->texPriv().isDeferred()); SkASSERT(GrSurfaceProxy::LazyState::kNot == p->lazyInstantiationState()); }); #endif onFlushOpList->makeClosed(*fContext->caps()); onFlushOpList->prepare(&flushState); fOnFlushCBOpLists.push_back(std::move(onFlushOpList)); } renderTargetContexts.reset(); } } #if 0 // Enable this to print out verbose GrOp information for (int i = 0; i < fOpLists.count(); ++i) { SkDEBUGCODE(fOpLists[i]->dump();) } #endif int startIndex, stopIndex; bool flushed = false; { GrResourceAllocator alloc(fContext->contextPriv().resourceProvider()); for (int i = 0; i < fOpLists.count(); ++i) { fOpLists[i]->gatherProxyIntervals(&alloc); alloc.markEndOfOpList(i); } GrResourceAllocator::AssignError error = GrResourceAllocator::AssignError::kNoError; while (alloc.assign(&startIndex, &stopIndex, flushState.uninstantiateProxyTracker(), &error)) { if (GrResourceAllocator::AssignError::kFailedProxyInstantiation == error) { for (int i = startIndex; i < stopIndex; ++i) { fOpLists[i]->purgeOpsWithUninstantiatedProxies(); } } if (this->executeOpLists(startIndex, stopIndex, &flushState)) { flushed = true; } } } fOpLists.reset(); GrSemaphoresSubmitted result = gpu->finishFlush(numSemaphores, backendSemaphores); flushState.uninstantiateProxyTracker()->uninstantiateAllProxies(); // We always have to notify the cache when it requested a flush so it can reset its state. if (flushed || type == GrResourceCache::FlushType::kCacheRequested) { fContext->contextPriv().getResourceCache()->notifyFlushOccurred(type); } for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) { onFlushCBObject->postFlush(fTokenTracker.nextTokenToFlush(), fFlushingOpListIDs.begin(), fFlushingOpListIDs.count()); } fFlushingOpListIDs.reset(); fFlushing = false; return result; }
bool GrCCPathParser::finalize(GrOnFlushResourceProvider* onFlushRP) { SkASSERT(!fParsingPath); // Call saveParsedPath() or discardParsedPath(). SkASSERT(fCoverageCountBatches.back().fEndNonScissorIndices == // Call closeCurrentBatch(). fTotalPrimitiveCounts[(int)ScissorMode::kNonScissored]); SkASSERT(fCoverageCountBatches.back().fEndScissorSubBatchIdx == fScissorSubBatches.count()); // Here we build a single instance buffer to share with every internal batch. // // CCPR processs 3 different types of primitives: triangles, quadratics, cubics. Each primitive // type is further divided into instances that require a scissor and those that don't. This // leaves us with 3*2 = 6 independent instance arrays to build for the GPU. // // Rather than place each instance array in its own GPU buffer, we allocate a single // megabuffer and lay them all out side-by-side. We can offset the "baseInstance" parameter in // our draw calls to direct the GPU to the applicable elements within a given array. // // We already know how big to make each of the 6 arrays from fTotalPrimitiveCounts, so layout is // straightforward. Start with triangles and quadratics. They both view the instance buffer as // an array of TriPointInstance[], so we can begin at zero and lay them out one after the other. fBaseInstances[0].fTriangles = 0; fBaseInstances[1].fTriangles = fBaseInstances[0].fTriangles + fTotalPrimitiveCounts[0].fTriangles; fBaseInstances[0].fQuadratics = fBaseInstances[1].fTriangles + fTotalPrimitiveCounts[1].fTriangles; fBaseInstances[1].fQuadratics = fBaseInstances[0].fQuadratics + fTotalPrimitiveCounts[0].fQuadratics; int triEndIdx = fBaseInstances[1].fQuadratics + fTotalPrimitiveCounts[1].fQuadratics; // Wound triangles and cubics both view the same instance buffer as an array of // QuadPointInstance[]. So, reinterpreting the instance data as QuadPointInstance[], we start // them on the first index that will not overwrite previous TriPointInstance data. int quadBaseIdx = GR_CT_DIV_ROUND_UP(triEndIdx * sizeof(TriPointInstance), sizeof(QuadPointInstance)); fBaseInstances[0].fWeightedTriangles = quadBaseIdx; fBaseInstances[1].fWeightedTriangles = fBaseInstances[0].fWeightedTriangles + fTotalPrimitiveCounts[0].fWeightedTriangles; fBaseInstances[0].fCubics = fBaseInstances[1].fWeightedTriangles + fTotalPrimitiveCounts[1].fWeightedTriangles; fBaseInstances[1].fCubics = fBaseInstances[0].fCubics + fTotalPrimitiveCounts[0].fCubics; fBaseInstances[0].fConics = fBaseInstances[1].fCubics + fTotalPrimitiveCounts[1].fCubics; fBaseInstances[1].fConics = fBaseInstances[0].fConics + fTotalPrimitiveCounts[0].fConics; int quadEndIdx = fBaseInstances[1].fConics + fTotalPrimitiveCounts[1].fConics; fInstanceBuffer = onFlushRP->makeBuffer(kVertex_GrBufferType, quadEndIdx * sizeof(QuadPointInstance)); if (!fInstanceBuffer) { return false; } TriPointInstance* triPointInstanceData = static_cast<TriPointInstance*>(fInstanceBuffer->map()); QuadPointInstance* quadPointInstanceData = reinterpret_cast<QuadPointInstance*>(triPointInstanceData); SkASSERT(quadPointInstanceData); PathInfo* nextPathInfo = fPathsInfo.begin(); float atlasOffsetX = 0.0, atlasOffsetY = 0.0; Sk2f atlasOffset; PrimitiveTallies instanceIndices[2] = {fBaseInstances[0], fBaseInstances[1]}; PrimitiveTallies* currIndices = nullptr; SkSTArray<256, int32_t, true> currFan; bool currFanIsTessellated = false; const SkTArray<SkPoint, true>& pts = fGeometry.points(); int ptsIdx = -1; int nextConicWeightIdx = 0; // Expand the ccpr verbs into GPU instance buffers. for (GrCCGeometry::Verb verb : fGeometry.verbs()) { switch (verb) { case GrCCGeometry::Verb::kBeginPath: SkASSERT(currFan.empty()); currIndices = &instanceIndices[(int)nextPathInfo->scissorMode()]; atlasOffsetX = static_cast<float>(nextPathInfo->atlasOffsetX()); atlasOffsetY = static_cast<float>(nextPathInfo->atlasOffsetY()); atlasOffset = {atlasOffsetX, atlasOffsetY}; currFanIsTessellated = nextPathInfo->hasFanTessellation(); if (currFanIsTessellated) { emit_tessellated_fan(nextPathInfo->fanTessellation(), nextPathInfo->fanTessellationCount(), atlasOffset, triPointInstanceData, quadPointInstanceData, currIndices); } ++nextPathInfo; continue; case GrCCGeometry::Verb::kBeginContour: SkASSERT(currFan.empty()); ++ptsIdx; if (!currFanIsTessellated) { currFan.push_back(ptsIdx); } continue; case GrCCGeometry::Verb::kLineTo: ++ptsIdx; if (!currFanIsTessellated) { SkASSERT(!currFan.empty()); currFan.push_back(ptsIdx); } continue; case GrCCGeometry::Verb::kMonotonicQuadraticTo: triPointInstanceData[currIndices->fQuadratics++].set(&pts[ptsIdx], atlasOffset); ptsIdx += 2; if (!currFanIsTessellated) { SkASSERT(!currFan.empty()); currFan.push_back(ptsIdx); } continue; case GrCCGeometry::Verb::kMonotonicCubicTo: quadPointInstanceData[currIndices->fCubics++].set(&pts[ptsIdx], atlasOffsetX, atlasOffsetY); ptsIdx += 3; if (!currFanIsTessellated) { SkASSERT(!currFan.empty()); currFan.push_back(ptsIdx); } continue; case GrCCGeometry::Verb::kMonotonicConicTo: quadPointInstanceData[currIndices->fConics++].setW( &pts[ptsIdx], atlasOffset, fGeometry.getConicWeight(nextConicWeightIdx)); ptsIdx += 2; ++nextConicWeightIdx; if (!currFanIsTessellated) { SkASSERT(!currFan.empty()); currFan.push_back(ptsIdx); } continue; case GrCCGeometry::Verb::kEndClosedContour: // endPt == startPt. if (!currFanIsTessellated) { SkASSERT(!currFan.empty()); currFan.pop_back(); } // fallthru. case GrCCGeometry::Verb::kEndOpenContour: // endPt != startPt. SkASSERT(!currFanIsTessellated || currFan.empty()); if (!currFanIsTessellated && currFan.count() >= 3) { int fanSize = currFan.count(); // Reserve space for emit_recursive_fan. Technically this can grow to // fanSize + log3(fanSize), but we approximate with log2. currFan.push_back_n(SkNextLog2(fanSize)); SkDEBUGCODE(TriPointInstance* end =) emit_recursive_fan(pts, currFan, 0, fanSize, atlasOffset, triPointInstanceData + currIndices->fTriangles); currIndices->fTriangles += fanSize - 2; SkASSERT(triPointInstanceData + currIndices->fTriangles == end); } currFan.reset(); continue; } }
// TODO(egouriou): Take advantage of periods in the convolution. // Practical resizing filters are periodic outside of the border area. // For Lanczos, a scaling by a (reduced) factor of p/q (q pixels in the // source become p pixels in the destination) will have a period of p. // A nice consequence is a period of 1 when downscaling by an integral // factor. Downscaling from typical display resolutions is also bound // to produce interesting periods as those are chosen to have multiple // small factors. // Small periods reduce computational load and improve cache usage if // the coefficients can be shared. For periods of 1 we can consider // loading the factors only once outside the borders. void SkResizeFilter::computeFilters(int srcSize, float destSubsetLo, float destSubsetSize, float scale, SkConvolutionFilter1D* output, const SkConvolutionProcs& convolveProcs) { float destSubsetHi = destSubsetLo + destSubsetSize; // [lo, hi) // When we're doing a magnification, the scale will be larger than one. This // means the destination pixels are much smaller than the source pixels, and // that the range covered by the filter won't necessarily cover any source // pixel boundaries. Therefore, we use these clamped values (max of 1) for // some computations. float clampedScale = SkTMin(1.0f, scale); // This is how many source pixels from the center we need to count // to support the filtering function. float srcSupport = fBitmapFilter->width() / clampedScale; float invScale = 1.0f / scale; SkSTArray<64, float, true> filterValuesArray; SkSTArray<64, SkConvolutionFilter1D::ConvolutionFixed, true> fixedFilterValuesArray; // Loop over all pixels in the output range. We will generate one set of // filter values for each one. Those values will tell us how to blend the // source pixels to compute the destination pixel. // This is the pixel in the source directly under the pixel in the dest. // Note that we base computations on the "center" of the pixels. To see // why, observe that the destination pixel at coordinates (0, 0) in a 5.0x // downscale should "cover" the pixels around the pixel with *its center* // at coordinates (2.5, 2.5) in the source, not those around (0, 0). // Hence we need to scale coordinates (0.5, 0.5), not (0, 0). destSubsetLo = SkScalarFloorToScalar(destSubsetLo); destSubsetHi = SkScalarCeilToScalar(destSubsetHi); float srcPixel = (destSubsetLo + 0.5f) * invScale; int destLimit = SkScalarTruncToInt(destSubsetHi - destSubsetLo); output->reserveAdditional(destLimit, SkScalarCeilToInt(destLimit * srcSupport * 2)); for (int destI = 0; destI < destLimit; srcPixel += invScale, destI++) { // Compute the (inclusive) range of source pixels the filter covers. float srcBegin = SkTMax(0.f, SkScalarFloorToScalar(srcPixel - srcSupport)); float srcEnd = SkTMin(srcSize - 1.f, SkScalarCeilToScalar(srcPixel + srcSupport)); // Compute the unnormalized filter value at each location of the source // it covers. // Sum of the filter values for normalizing. // Distance from the center of the filter, this is the filter coordinate // in source space. We also need to consider the center of the pixel // when comparing distance against 'srcPixel'. In the 5x downscale // example used above the distance from the center of the filter to // the pixel with coordinates (2, 2) should be 0, because its center // is at (2.5, 2.5). float destFilterDist = (srcBegin + 0.5f - srcPixel) * clampedScale; int filterCount = SkScalarTruncToInt(srcEnd - srcBegin) + 1; if (filterCount <= 0) { // true when srcSize is equal to srcPixel - srcSupport; this may be a bug return; } filterValuesArray.reset(filterCount); float filterSum = fBitmapFilter->evaluate_n(destFilterDist, clampedScale, filterCount, filterValuesArray.begin()); // The filter must be normalized so that we don't affect the brightness of // the image. Convert to normalized fixed point. int fixedSum = 0; fixedFilterValuesArray.reset(filterCount); const float* filterValues = filterValuesArray.begin(); SkConvolutionFilter1D::ConvolutionFixed* fixedFilterValues = fixedFilterValuesArray.begin(); float invFilterSum = 1 / filterSum; for (int fixedI = 0; fixedI < filterCount; fixedI++) { int curFixed = SkConvolutionFilter1D::FloatToFixed(filterValues[fixedI] * invFilterSum); fixedSum += curFixed; fixedFilterValues[fixedI] = SkToS16(curFixed); } SkASSERT(fixedSum <= 0x7FFF); // The conversion to fixed point will leave some rounding errors, which // we add back in to avoid affecting the brightness of the image. We // arbitrarily add this to the center of the filter array (this won't always // be the center of the filter function since it could get clipped on the // edges, but it doesn't matter enough to worry about that case). int leftovers = SkConvolutionFilter1D::FloatToFixed(1) - fixedSum; fixedFilterValues[filterCount / 2] += leftovers; // Now it's ready to go. output->AddFilter(SkScalarFloorToInt(srcBegin), fixedFilterValues, filterCount); } if (convolveProcs.fApplySIMDPadding) { convolveProcs.fApplySIMDPadding(output); } }