/* * Tries to handle the image with PIEX. If PIEX returns kOk and finds the preview image, create a * SkJpegCodec. If PIEX returns kFail, then the file is invalid, return nullptr. In other cases, * fallback to create SkRawCodec for DNG images. */ SkCodec* SkRawCodec::NewFromStream(SkStream* stream) { SkAutoTDelete<SkRawStream> rawStream; if (is_asset_stream(*stream)) { rawStream.reset(new SkRawAssetStream(stream)); } else { rawStream.reset(new SkRawBufferedStream(stream)); } // Does not take the ownership of rawStream. SkPiexStream piexStream(rawStream.get()); ::piex::PreviewImageData imageData; if (::piex::IsRaw(&piexStream)) { ::piex::Error error = ::piex::GetPreviewImageData(&piexStream, &imageData); if (error == ::piex::Error::kOk && imageData.preview.length > 0) { // transferBuffer() is destructive to the rawStream. Abandon the rawStream after this // function call. // FIXME: one may avoid the copy of memoryStream and use the buffered rawStream. SkMemoryStream* memoryStream = rawStream->transferBuffer(imageData.preview.offset, imageData.preview.length); return memoryStream ? SkJpegCodec::NewFromStream(memoryStream) : nullptr; } else if (error == ::piex::Error::kFail) { return nullptr; } } // Takes the ownership of the rawStream. SkAutoTDelete<SkDngImage> dngImage(SkDngImage::NewFromStream(rawStream.release())); if (!dngImage) { return nullptr; } return new SkRawCodec(dngImage.release()); }
void BitmapRegionDecoderBench::onDraw(const int n, SkCanvas* canvas) { SkAutoTDelete<SkBitmap> bitmap; for (int i = 0; i < n; i++) { bitmap.reset(fBRD->decodeRegion(fSubset.left(), fSubset.top(), fSubset.width(), fSubset.height(), fSampleSize, fColorType)); SkASSERT(nullptr != bitmap.get()); } }
// static SkPDFImage* SkPDFImage::CreateImage(const SkBitmap& bitmap, const SkIRect& srcRect) { if (bitmap.colorType() == kUnknown_SkColorType) { return NULL; } bool isTransparent = false; SkAutoTDelete<SkStream> alphaData; if (!bitmap.isOpaque()) { // Note that isOpaque is not guaranteed to return false for bitmaps // with alpha support but a completely opaque alpha channel, // so alphaData may still be NULL if we have a completely opaque // (or transparent) bitmap. alphaData.reset( extract_image_data(bitmap, srcRect, true, &isTransparent)); } if (isTransparent) { return NULL; } SkPDFImage* image; SkColorType colorType = bitmap.colorType(); if (alphaData.get() != NULL && (kN32_SkColorType == colorType || kARGB_4444_SkColorType == colorType)) { if (kN32_SkColorType == colorType) { image = SkNEW_ARGS(SkPDFImage, (NULL, bitmap, false, SkIRect::MakeWH(srcRect.width(), srcRect.height()))); } else { SkBitmap unpremulBitmap = unpremultiply_bitmap(bitmap, srcRect); image = SkNEW_ARGS(SkPDFImage, (NULL, unpremulBitmap, false, SkIRect::MakeWH(srcRect.width(), srcRect.height()))); } } else { image = SkNEW_ARGS(SkPDFImage, (NULL, bitmap, false, srcRect)); } if (alphaData.get() != NULL) { SkAutoTUnref<SkPDFImage> mask( SkNEW_ARGS(SkPDFImage, (alphaData.get(), bitmap, true, srcRect))); image->insert("SMask", new SkPDFObjRef(mask))->unref(); } return image; }
/** * Creates a ExtGState with the SMask set to the luminosityShader in * luminosity mode. The shader pattern extends to the bbox. */ SkPDFGraphicState* SkPDFAlphaFunctionShader::CreateSMaskGraphicState() { SkRect bbox; bbox.set(fState.get()->fBBox); SkAutoTUnref<SkPDFObject> luminosityShader( SkPDFShader::GetPDFShaderByState( fState->CreateAlphaToLuminosityState())); SkAutoTUnref<SkStream> alphaStream(create_pattern_fill_content(-1, bbox)); SkAutoTUnref<SkPDFResourceDict> resources(get_gradient_resource_dict(luminosityShader, NULL)); SkAutoTUnref<SkPDFFormXObject> alphaMask( new SkPDFFormXObject(alphaStream.get(), bbox, resources.get())); return SkPDFGraphicState::GetSMaskGraphicState( alphaMask.get(), false, SkPDFGraphicState::kLuminosity_SMaskMode); }
int tool_main(int argc, char** argv) { SetupCrashHandler(); SkString usage; usage.printf("Time drawing .skp files.\n" "\tPossible arguments for --filter: [%s]\n\t\t[%s]", filterTypesUsage().c_str(), filterFlagsUsage().c_str()); SkCommandLineFlags::SetUsage(usage.c_str()); SkCommandLineFlags::Parse(argc, argv); if (FLAGS_repeat < 1) { SkString error; error.printf("--repeats must be >= 1. Was %i\n", FLAGS_repeat); gLogger.logError(error); exit(-1); } if (FLAGS_logFile.count() == 1) { if (!gLogger.SetLogFile(FLAGS_logFile[0])) { SkString str; str.printf("Could not open %s for writing.\n", FLAGS_logFile[0]); gLogger.logError(str); // TODO(borenet): We're disabling this for now, due to // write-protected Android devices. The very short-term // solution is to ignore the fact that we have no log file. //exit(-1); } } SkAutoTDelete<PictureJSONResultsWriter> jsonWriter; if (FLAGS_jsonLog.count() == 1) { SkASSERT(FLAGS_builderName.count() == 1 && FLAGS_gitHash.count() == 1); jsonWriter.reset(SkNEW(PictureJSONResultsWriter( FLAGS_jsonLog[0], FLAGS_builderName[0], FLAGS_buildNumber, FLAGS_timestamp, FLAGS_gitHash[0], FLAGS_gitNumber))); gWriter.add(jsonWriter.get()); } gWriter.add(&gLogWriter); #if SK_ENABLE_INST_COUNT gPrintInstCount = true; #endif SkAutoGraphics ag; sk_tools::PictureBenchmark benchmark; setup_benchmark(&benchmark); int failures = 0; for (int i = 0; i < FLAGS_readPath.count(); ++i) { failures += process_input(FLAGS_readPath[i], benchmark); } if (failures != 0) { SkString err; err.printf("Failed to run %i benchmarks.\n", failures); gLogger.logError(err); return 1; } #if SK_LAZY_CACHE_STATS if (FLAGS_trackDeferredCaching) { SkDebugf("Total cache hit rate: %f\n", (double) gTotalCacheHits / (gTotalCacheHits + gTotalCacheMisses)); } #endif gWriter.end(); return 0; }
ContextInfo GrContextFactory::getContextInfo(ContextType type, ContextOptions options) { for (int i = 0; i < fContexts.count(); ++i) { Context& context = fContexts[i]; if (context.fType == type && context.fOptions == options && !context.fAbandoned) { if (context.fGLContext) { context.fGLContext->makeCurrent(); } return ContextInfo(context.fGrContext, context.fGLContext); } } SkAutoTDelete<GLTestContext> glCtx; sk_sp<GrContext> grCtx; GrBackendContext backendContext = 0; sk_sp<const GrGLInterface> glInterface; #ifdef SK_VULKAN sk_sp<const GrVkBackendContext> vkBackend; #endif GrBackend backend = ContextTypeBackend(type); switch (backend) { case kOpenGL_GrBackend: switch (type) { case kGL_ContextType: glCtx.reset(CreatePlatformGLTestContext(kGL_GrGLStandard)); break; case kGLES_ContextType: glCtx.reset(CreatePlatformGLTestContext(kGLES_GrGLStandard)); break; #if SK_ANGLE # ifdef SK_BUILD_FOR_WIN case kANGLE_ContextType: glCtx.reset(CreateANGLEDirect3DGLTestContext()); break; # endif case kANGLE_GL_ContextType: glCtx.reset(CreateANGLEOpenGLGLTestContext()); break; #endif #if SK_COMMAND_BUFFER case kCommandBuffer_ContextType: glCtx.reset(CommandBufferGLTestContext::Create()); break; #endif #if SK_MESA case kMESA_ContextType: glCtx.reset(CreateMesaGLTestContext()); break; #endif case kNullGL_ContextType: glCtx.reset(CreateNullGLTestContext()); break; case kDebugGL_ContextType: glCtx.reset(CreateDebugGLTestContext()); break; default: return ContextInfo(); } if (nullptr == glCtx.get()) { return ContextInfo(); } glInterface.reset(SkRef(glCtx->gl())); // Block NVPR from non-NVPR types. if (!(kEnableNVPR_ContextOptions & options)) { glInterface.reset(GrGLInterfaceRemoveNVPR(glInterface.get())); if (!glInterface) { return ContextInfo(); } } backendContext = reinterpret_cast<GrBackendContext>(glInterface.get()); glCtx->makeCurrent(); break; #ifdef SK_VULKAN case kVulkan_GrBackend: SkASSERT(kVulkan_ContextType == type); if ((kEnableNVPR_ContextOptions & options) || (kRequireSRGBSupport_ContextOptions & options)) { return ContextInfo(); } vkBackend.reset(GrVkBackendContext::Create()); if (!vkBackend) { return ContextInfo(); } backendContext = reinterpret_cast<GrBackendContext>(vkBackend.get()); // There is some bug (either in Skia or the NV Vulkan driver) where VkDevice // destruction will hang occaisonally. For some reason having an existing GL // context fixes this. if (!fSentinelGLContext) { fSentinelGLContext.reset(CreatePlatformGLTestContext(kGL_GrGLStandard)); if (!fSentinelGLContext) { fSentinelGLContext.reset(CreatePlatformGLTestContext(kGLES_GrGLStandard)); } } break; #endif default: return ContextInfo(); } grCtx.reset(GrContext::Create(backend, backendContext, fGlobalOptions)); if (!grCtx.get()) { return ContextInfo(); } if (kEnableNVPR_ContextOptions & options) { if (!grCtx->caps()->shaderCaps()->pathRenderingSupport()) { return ContextInfo(); } } if (kRequireSRGBSupport_ContextOptions & options) { if (!grCtx->caps()->srgbSupport()) { return ContextInfo(); } } Context& context = fContexts.push_back(); context.fGLContext = glCtx.release(); context.fGrContext = SkRef(grCtx.get()); context.fType = type; context.fOptions = options; context.fAbandoned = false; return ContextInfo(context.fGrContext, context.fGLContext); }
static int filter_picture(const SkString& inFile, const SkString& outFile) { SkAutoTDelete<SkPicture> inPicture; SkFILEStream inStream(inFile.c_str()); if (inStream.isValid()) { inPicture.reset(SkPicture::CreateFromStream(&inStream)); } if (NULL == inPicture.get()) { SkDebugf("Could not read file %s\n", inFile.c_str()); return -1; } int localCount[SK_ARRAY_COUNT(gOptTable)]; memset(localCount, 0, sizeof(localCount)); SkDebugCanvas debugCanvas(inPicture->width(), inPicture->height()); debugCanvas.setBounds(inPicture->width(), inPicture->height()); inPicture->draw(&debugCanvas); // delete the initial save and restore since replaying the commands will // re-add them if (debugCanvas.getSize() > 1) { debugCanvas.deleteDrawCommandAt(0); debugCanvas.deleteDrawCommandAt(debugCanvas.getSize()-1); } bool changed = true; int numBefore = debugCanvas.getSize(); while (changed) { changed = false; for (int i = 0; i < debugCanvas.getSize(); ++i) { for (size_t opt = 0; opt < SK_ARRAY_COUNT(gOptTable); ++opt) { if ((*gOptTable[opt].fCheck)(&debugCanvas, i)) { (*gOptTable[opt].fApply)(&debugCanvas, i); ++gOptTable[opt].fNumTimesApplied; ++localCount[opt]; if (debugCanvas.getSize() == i) { // the optimization removed all the remaining operations break; } opt = 0; // try all the opts all over again changed = true; } } } } int numAfter = debugCanvas.getSize(); if (!outFile.isEmpty()) { SkPicture outPicture; SkCanvas* canvas = outPicture.beginRecording(inPicture->width(), inPicture->height()); debugCanvas.draw(canvas); outPicture.endRecording(); SkFILEWStream outStream(outFile.c_str()); outPicture.serialize(&outStream); } bool someOptFired = false; for (size_t opt = 0; opt < SK_ARRAY_COUNT(gOptTable); ++opt) { if (0 != localCount[opt]) { SkDebugf("%d: %d ", opt, localCount[opt]); someOptFired = true; } } if (!someOptFired) { SkDebugf("No opts fired\n"); } else { SkDebugf("\t before: %d after: %d delta: %d\n", numBefore, numAfter, numBefore-numAfter); } return 0; }
bool GrGpuGL::programUnitTest(int maxStages) { GrTextureDesc dummyDesc; dummyDesc.fFlags = kRenderTarget_GrTextureFlagBit; dummyDesc.fConfig = kSkia8888_GrPixelConfig; dummyDesc.fWidth = 34; dummyDesc.fHeight = 18; SkAutoTUnref<GrTexture> dummyTexture1(this->createTexture(dummyDesc, NULL, 0)); dummyDesc.fFlags = kNone_GrTextureFlags; dummyDesc.fConfig = kAlpha_8_GrPixelConfig; dummyDesc.fWidth = 16; dummyDesc.fHeight = 22; SkAutoTUnref<GrTexture> dummyTexture2(this->createTexture(dummyDesc, NULL, 0)); if (!dummyTexture1 || ! dummyTexture2) { return false; } static const int NUM_TESTS = 512; SkRandom random; for (int t = 0; t < NUM_TESTS; ++t) { #if 0 GrPrintf("\nTest Program %d\n-------------\n", t); static const int stop = -1; if (t == stop) { int breakpointhere = 9; } #endif GrGLProgramDesc pdesc; int currAttribIndex = 1; // we need to always leave room for position int currTextureCoordSet = 0; GrTexture* dummyTextures[] = {dummyTexture1.get(), dummyTexture2.get()}; int numStages = random.nextULessThan(maxStages + 1); int numColorStages = random.nextULessThan(numStages + 1); int numCoverageStages = numStages - numColorStages; SkAutoSTMalloc<8, const GrFragmentStage*> stages(numStages); bool usePathRendering = this->glCaps().pathRenderingSupport() && random.nextBool(); GrGpu::DrawType drawType = usePathRendering ? GrGpu::kDrawPath_DrawType : GrGpu::kDrawPoints_DrawType; SkAutoTDelete<GrGeometryStage> geometryProcessor; bool hasGeometryProcessor = usePathRendering ? false : random.nextBool(); if (hasGeometryProcessor) { while (true) { SkAutoTUnref<const GrGeometryProcessor> effect( GrProcessorTestFactory<GrGeometryProcessor>::CreateStage(&random, this->getContext(), *this->caps(), dummyTextures)); SkASSERT(effect); // Only geometryProcessor can use vertex shader GrGeometryStage* stage = SkNEW_ARGS(GrGeometryStage, (effect.get())); geometryProcessor.reset(stage); // we have to set dummy vertex attribs const GrGeometryProcessor::VertexAttribArray& v = effect->getVertexAttribs(); int numVertexAttribs = v.count(); SkASSERT(GrGeometryProcessor::kMaxVertexAttribs == 2 && GrGeometryProcessor::kMaxVertexAttribs >= numVertexAttribs); size_t runningStride = GrVertexAttribTypeSize(genericVertexAttribs[0].fType); for (int i = 0; i < numVertexAttribs; i++) { genericVertexAttribs[i + 1].fOffset = runningStride; genericVertexAttribs[i + 1].fType = convert_sltype_to_attribtype(v[i].getType()); runningStride += GrVertexAttribTypeSize(genericVertexAttribs[i + 1].fType); } // update the vertex attributes with the ds GrDrawState* ds = this->drawState(); ds->setVertexAttribs<genericVertexAttribs>(numVertexAttribs + 1, runningStride); currAttribIndex = numVertexAttribs + 1; break; } } for (int s = 0; s < numStages;) { SkAutoTUnref<const GrFragmentProcessor> effect( GrProcessorTestFactory<GrFragmentProcessor>::CreateStage( &random, this->getContext(), *this->caps(), dummyTextures)); SkASSERT(effect); // If adding this effect would exceed the max texture coord set count then generate a // new random effect. if (usePathRendering && this->glPathRendering()->texturingMode() == GrGLPathRendering::FixedFunction_TexturingMode) {; int numTransforms = effect->numTransforms(); if (currTextureCoordSet + numTransforms > this->glCaps().maxFixedFunctionTextureCoords()) { continue; } currTextureCoordSet += numTransforms; } GrFragmentStage* stage = SkNEW_ARGS(GrFragmentStage, (effect.get())); stages[s] = stage; ++s; } const GrTexture* dstTexture = random.nextBool() ? dummyTextures[0] : dummyTextures[1]; if (!pdesc.setRandom(&random, this, dummyTextures[0]->asRenderTarget(), dstTexture, geometryProcessor.get(), stages.get(), numColorStages, numCoverageStages, currAttribIndex, drawType)) { return false; } SkAutoTUnref<GrOptDrawState> optState(GrOptDrawState::Create(this->getDrawState(), *this->caps(), drawType)); SkAutoTUnref<GrGLProgram> program( GrGLProgramBuilder::CreateProgram(*optState, pdesc, drawType, geometryProcessor, stages, stages + numColorStages, this)); for (int s = 0; s < numStages; ++s) { SkDELETE(stages[s]); } if (NULL == program.get()) { return false; } // We have to reset the drawstate because we might have added a gp this->drawState()->reset(); } return true; }
int tool_main(int argc, char** argv) { SetupCrashHandler(); SkCommandLineFlags::Parse(argc, argv); #if SK_ENABLE_INST_COUNT if (FLAGS_leaks) { gPrintInstCount = true; } #endif SkAutoGraphics ag; // First, parse some flags. BenchLogger logger; if (FLAGS_logFile.count()) { logger.SetLogFile(FLAGS_logFile[0]); } LoggerResultsWriter logWriter(logger, FLAGS_timeFormat[0]); MultiResultsWriter writer; writer.add(&logWriter); SkAutoTDelete<JSONResultsWriter> jsonWriter; if (FLAGS_outResultsFile.count()) { jsonWriter.reset(SkNEW(JSONResultsWriter(FLAGS_outResultsFile[0]))); writer.add(jsonWriter.get()); } // Instantiate after all the writers have been added to writer so that we // call close() before their destructors are called on the way out. CallEnd<MultiResultsWriter> ender(writer); const uint8_t alpha = FLAGS_forceBlend ? 0x80 : 0xFF; SkTriState::State dither = SkTriState::kDefault; for (size_t i = 0; i < 3; i++) { if (strcmp(SkTriState::Name[i], FLAGS_forceDither[0]) == 0) { dither = static_cast<SkTriState::State>(i); } } BenchMode benchMode = kNormal_BenchMode; for (size_t i = 0; i < SK_ARRAY_COUNT(BenchMode_Name); i++) { if (strcmp(FLAGS_mode[0], BenchMode_Name[i]) == 0) { benchMode = static_cast<BenchMode>(i); } } SkTDArray<int> configs; bool runDefaultConfigs = false; // Try user-given configs first. for (int i = 0; i < FLAGS_config.count(); i++) { for (int j = 0; j < static_cast<int>(SK_ARRAY_COUNT(gConfigs)); ++j) { if (0 == strcmp(FLAGS_config[i], gConfigs[j].name)) { *configs.append() = j; } else if (0 == strcmp(FLAGS_config[i], kDefaultsConfigStr)) { runDefaultConfigs = true; } } } // If there weren't any, fill in with defaults. if (runDefaultConfigs) { for (int i = 0; i < static_cast<int>(SK_ARRAY_COUNT(gConfigs)); ++i) { if (gConfigs[i].runByDefault) { *configs.append() = i; } } } // Filter out things we can't run. if (kNormal_BenchMode != benchMode) { // Non-rendering configs only run in normal mode for (int i = 0; i < configs.count(); ++i) { const Config& config = gConfigs[configs[i]]; if (Benchmark::kNonRendering_Backend == config.backend) { configs.remove(i, 1); --i; } } } #if SK_SUPPORT_GPU for (int i = 0; i < configs.count(); ++i) { const Config& config = gConfigs[configs[i]]; if (Benchmark::kGPU_Backend == config.backend) { GrContext* context = gContextFactory.get(config.contextType); if (NULL == context) { SkDebugf("GrContext could not be created for config %s. Config will be skipped.\n", config.name); configs.remove(i); --i; continue; } if (config.sampleCount > context->getMaxSampleCount()){ SkDebugf( "Sample count (%d) for config %s is not supported. Config will be skipped.\n", config.sampleCount, config.name); configs.remove(i); --i; continue; } } } #endif // All flags should be parsed now. Report our settings. if (FLAGS_runOnce) { logger.logError("bench was run with --runOnce, so we're going to hide the times." " It's for your own good!\n"); } writer.option("mode", FLAGS_mode[0]); writer.option("alpha", SkStringPrintf("0x%02X", alpha).c_str()); writer.option("antialias", SkStringPrintf("%d", FLAGS_forceAA).c_str()); writer.option("filter", SkStringPrintf("%d", FLAGS_forceFilter).c_str()); writer.option("dither", SkTriState::Name[dither]); writer.option("rotate", SkStringPrintf("%d", FLAGS_rotate).c_str()); writer.option("scale", SkStringPrintf("%d", FLAGS_scale).c_str()); writer.option("clip", SkStringPrintf("%d", FLAGS_clip).c_str()); #if defined(SK_BUILD_FOR_WIN32) writer.option("system", "WIN32"); #elif defined(SK_BUILD_FOR_MAC) writer.option("system", "MAC"); #elif defined(SK_BUILD_FOR_ANDROID) writer.option("system", "ANDROID"); #elif defined(SK_BUILD_FOR_UNIX) writer.option("system", "UNIX"); #else writer.option("system", "other"); #endif #if defined(SK_DEBUG) writer.option("build", "DEBUG"); #else writer.option("build", "RELEASE"); #endif // Set texture cache limits if non-default. for (size_t i = 0; i < SK_ARRAY_COUNT(gConfigs); ++i) { #if SK_SUPPORT_GPU const Config& config = gConfigs[i]; if (Benchmark::kGPU_Backend != config.backend) { continue; } GrContext* context = gContextFactory.get(config.contextType); if (NULL == context) { continue; } size_t bytes; int count; context->getResourceCacheLimits(&count, &bytes); if (-1 != FLAGS_gpuCacheBytes) { bytes = static_cast<size_t>(FLAGS_gpuCacheBytes); } if (-1 != FLAGS_gpuCacheCount) { count = FLAGS_gpuCacheCount; } context->setResourceCacheLimits(count, bytes); #endif } // Run each bench in each configuration it supports and we asked for. Iter iter; Benchmark* bench; while ((bench = iter.next()) != NULL) { SkAutoTUnref<Benchmark> benchUnref(bench); if (SkCommandLineFlags::ShouldSkip(FLAGS_match, bench->getName())) { continue; } bench->setForceAlpha(alpha); bench->setForceAA(FLAGS_forceAA); bench->setForceFilter(FLAGS_forceFilter); bench->setDither(dither); bench->preDraw(); bool loggedBenchName = false; for (int i = 0; i < configs.count(); ++i) { const int configIndex = configs[i]; const Config& config = gConfigs[configIndex]; if (!bench->isSuitableFor(config.backend)) { continue; } GrContext* context = NULL; #if SK_SUPPORT_GPU SkGLContextHelper* glContext = NULL; if (Benchmark::kGPU_Backend == config.backend) { context = gContextFactory.get(config.contextType); if (NULL == context) { continue; } glContext = gContextFactory.getGLContext(config.contextType); } #endif SkAutoTUnref<SkCanvas> canvas; SkAutoTUnref<SkPicture> recordFrom; SkPictureRecorder recorderTo; const SkIPoint dim = bench->getSize(); SkAutoTUnref<SkSurface> surface; if (Benchmark::kNonRendering_Backend != config.backend) { surface.reset(make_surface(config.fColorType, dim, config.backend, config.sampleCount, context)); if (!surface.get()) { logger.logError(SkStringPrintf( "Device creation failure for config %s. Will skip.\n", config.name)); continue; } switch(benchMode) { case kDeferredSilent_BenchMode: case kDeferred_BenchMode: canvas.reset(SkDeferredCanvas::Create(surface.get())); break; case kRecord_BenchMode: canvas.reset(SkRef(recorderTo.beginRecording(dim.fX, dim.fY))); break; case kPictureRecord_BenchMode: { SkPictureRecorder recorderFrom; bench->draw(1, recorderFrom.beginRecording(dim.fX, dim.fY)); recordFrom.reset(recorderFrom.endRecording()); canvas.reset(SkRef(recorderTo.beginRecording(dim.fX, dim.fY))); break; } case kNormal_BenchMode: canvas.reset(SkRef(surface->getCanvas())); break; default: SkASSERT(false); } } if (NULL != canvas) { canvas->clear(SK_ColorWHITE); if (FLAGS_clip) { perform_clip(canvas, dim.fX, dim.fY); } if (FLAGS_scale) { perform_scale(canvas, dim.fX, dim.fY); } if (FLAGS_rotate) { perform_rotate(canvas, dim.fX, dim.fY); } } if (!loggedBenchName) { loggedBenchName = true; writer.bench(bench->getName(), dim.fX, dim.fY); } #if SK_SUPPORT_GPU SkGLContextHelper* contextHelper = NULL; if (Benchmark::kGPU_Backend == config.backend) { contextHelper = gContextFactory.getGLContext(config.contextType); } BenchTimer timer(contextHelper); #else BenchTimer timer; #endif double previous = std::numeric_limits<double>::infinity(); bool converged = false; // variables used to compute loopsPerFrame double frameIntervalTime = 0.0f; int frameIntervalTotalLoops = 0; bool frameIntervalComputed = false; int loopsPerFrame = 0; int loopsPerIter = 0; if (FLAGS_verbose) { SkDebugf("%s %s: ", bench->getName(), config.name); } if (!FLAGS_dryRun) { do { // Ramp up 1 -> 2 -> 4 -> 8 -> 16 -> ... -> ~1 billion. loopsPerIter = (loopsPerIter == 0) ? 1 : loopsPerIter * 2; if (loopsPerIter >= (1<<30) || timer.fWall > FLAGS_maxMs) { // If you find it takes more than a billion loops to get up to 20ms of runtime, // you've got a computer clocked at several THz or have a broken benchmark. ;) // "1B ought to be enough for anybody." logger.logError(SkStringPrintf( "\nCan't get %s %s to converge in %dms (%d loops)", bench->getName(), config.name, FLAGS_maxMs, loopsPerIter)); break; } if ((benchMode == kRecord_BenchMode || benchMode == kPictureRecord_BenchMode)) { // Clear the recorded commands so that they do not accumulate. canvas.reset(SkRef(recorderTo.beginRecording(dim.fX, dim.fY))); } timer.start(); // Inner loop that allows us to break the run into smaller // chunks (e.g. frames). This is especially useful for the GPU // as we can flush and/or swap buffers to keep the GPU from // queuing up too much work. for (int loopCount = loopsPerIter; loopCount > 0; ) { // Save and restore around each call to draw() to guarantee a pristine canvas. SkAutoCanvasRestore saveRestore(canvas, true/*also save*/); int loops; if (frameIntervalComputed && loopCount > loopsPerFrame) { loops = loopsPerFrame; loopCount -= loopsPerFrame; } else { loops = loopCount; loopCount = 0; } if (benchMode == kPictureRecord_BenchMode) { recordFrom->draw(canvas); } else { bench->draw(loops, canvas); } if (kDeferredSilent_BenchMode == benchMode) { static_cast<SkDeferredCanvas*>(canvas.get())->silentFlush(); } else if (NULL != canvas) { canvas->flush(); } #if SK_SUPPORT_GPU // swap drawing buffers on each frame to prevent the GPU // from queuing up too much work if (NULL != glContext) { glContext->swapBuffers(); } #endif } // Stop truncated timers before GL calls complete, and stop the full timers after. timer.truncatedEnd(); #if SK_SUPPORT_GPU if (NULL != glContext) { context->flush(); SK_GL(*glContext, Finish()); } #endif timer.end(); // setup the frame interval for subsequent iterations if (!frameIntervalComputed) { frameIntervalTime += timer.fWall; frameIntervalTotalLoops += loopsPerIter; if (frameIntervalTime >= FLAGS_minMs) { frameIntervalComputed = true; loopsPerFrame = (int)(((double)frameIntervalTotalLoops / frameIntervalTime) * FLAGS_minMs); if (loopsPerFrame < 1) { loopsPerFrame = 1; } // SkDebugf(" %s has %d loops in %f ms (normalized to %d)\n", // bench->getName(), frameIntervalTotalLoops, // timer.fWall, loopsPerFrame); } } const double current = timer.fWall / loopsPerIter; if (FLAGS_verbose && current > previous) { SkDebugf("↑"); } if (FLAGS_verbose) { SkDebugf("%.3g ", current); } converged = HasConverged(previous, current, timer.fWall); previous = current; } while (!FLAGS_runOnce && !converged); } if (FLAGS_verbose) { SkDebugf("\n"); } if (!FLAGS_dryRun && FLAGS_outDir.count() && Benchmark::kNonRendering_Backend != config.backend) { SkAutoTUnref<SkImage> image(surface->newImageSnapshot()); if (image.get()) { saveFile(bench->getName(), config.name, FLAGS_outDir[0], image); } } if (FLAGS_runOnce) { // Let's not mislead ourselves by looking at Debug build or single iteration bench times! continue; } // Normalize to ms per 1000 iterations. const double normalize = 1000.0 / loopsPerIter; const struct { char shortName; const char* longName; double ms; } times[] = { {'w', "msecs", normalize * timer.fWall}, {'W', "Wmsecs", normalize * timer.fTruncatedWall}, {'c', "cmsecs", normalize * timer.fCpu}, {'C', "Cmsecs", normalize * timer.fTruncatedCpu}, {'g', "gmsecs", normalize * timer.fGpu}, }; writer.config(config.name); for (size_t i = 0; i < SK_ARRAY_COUNT(times); i++) { if (strchr(FLAGS_timers[0], times[i].shortName) && times[i].ms > 0) { writer.timer(times[i].longName, times[i].ms); } } } } #if SK_SUPPORT_GPU gContextFactory.destroyContexts(); #endif return 0; }