static Sink* create_sink(const SkCommandLineConfig* config) { #if SK_SUPPORT_GPU if (gpu_supported()) { if (const SkCommandLineConfigGpu* gpuConfig = config->asConfigGpu()) { GrContextFactory::ContextType contextType = gpuConfig->getContextType(); GrContextFactory::ContextOptions contextOptions = GrContextFactory::kNone_ContextOptions; if (gpuConfig->getUseNVPR()) { contextOptions = static_cast<GrContextFactory::ContextOptions>( contextOptions | GrContextFactory::kEnableNVPR_ContextOptions); } if (SkColorAndColorSpaceAreGammaCorrect(gpuConfig->getColorType(), gpuConfig->getColorSpace())) { contextOptions = static_cast<GrContextFactory::ContextOptions>( contextOptions | GrContextFactory::kRequireSRGBSupport_ContextOptions); } GrContextFactory testFactory; if (!testFactory.get(contextType, contextOptions)) { info("WARNING: can not create GPU context for config '%s'. " "GM tests will be skipped.\n", gpuConfig->getTag().c_str()); return nullptr; } return new GPUSink(contextType, contextOptions, gpuConfig->getSamples(), gpuConfig->getUseDIText(), gpuConfig->getColorType(), sk_ref_sp(gpuConfig->getColorSpace()), FLAGS_gpu_threading); } } #endif #define SINK(t, sink, ...) if (config->getBackend().equals(t)) { return new sink(__VA_ARGS__); } #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK SINK("hwui", HWUISink); #endif if (FLAGS_cpu) { auto srgbColorSpace = SkColorSpace::NewNamed(SkColorSpace::kSRGB_Named); SINK("565", RasterSink, kRGB_565_SkColorType); SINK("8888", RasterSink, kN32_SkColorType); SINK("srgb", RasterSink, kN32_SkColorType, srgbColorSpace); SINK("f16", RasterSink, kRGBA_F16_SkColorType); SINK("pdf", PDFSink); SINK("skp", SKPSink); SINK("svg", SVGSink); SINK("null", NullSink); SINK("xps", XPSSink); SINK("pdfa", PDFSink, true); } #undef SINK return nullptr; }
static Sink* create_sink(const SkCommandLineConfig* config) { #if SK_SUPPORT_GPU if (gpu_supported()) { if (const SkCommandLineConfigGpu* gpuConfig = config->asConfigGpu()) { GrContextFactory::GLContextType contextType = gpuConfig->getContextType(); GrContextFactory::GLContextOptions contextOptions = GrContextFactory::kNone_GLContextOptions; if (gpuConfig->getUseNVPR()) { contextOptions = static_cast<GrContextFactory::GLContextOptions>( contextOptions | GrContextFactory::kEnableNVPR_GLContextOptions); } GrContextFactory testFactory; if (!testFactory.get(contextType, contextOptions)) { SkDebugf("WARNING: can not create GPU context for config '%s'. " "GM tests will be skipped.\n", gpuConfig->getTag().c_str()); return nullptr; } return new GPUSink(contextType, contextOptions, gpuConfig->getSamples(), gpuConfig->getUseDIText(), FLAGS_gpu_threading); } } #endif #define SINK(t, sink, ...) if (config->getBackend().equals(t)) { return new sink(__VA_ARGS__); } #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK SINK("hwui", HWUISink); #endif if (FLAGS_cpu) { SINK("565", RasterSink, kRGB_565_SkColorType); SINK("8888", RasterSink, kN32_SkColorType); SINK("pdf", PDFSink, "Pdfium"); SINK("pdf_poppler", PDFSink, "Poppler"); SINK("skp", SKPSink); SINK("svg", SVGSink); SINK("null", NullSink); SINK("xps", XPSSink); } #undef SINK return nullptr; }
void TestResult::testOne() { sk_sp<SkPicture> pic; { SkString d; d.printf(" {%d, \"%s\"},", fDirNo, fFilename); SkString path = make_filepath(fDirNo, IN_DIR, fFilename); SkFILEStream stream(path.c_str()); if (!stream.isValid()) { SkDebugf("invalid stream %s\n", path.c_str()); goto finish; } if (fTestStep == kEncodeFiles) { size_t length = stream.getLength(); SkTArray<char, true> bytes; bytes.push_back_n(length); stream.read(&bytes[0], length); stream.rewind(); SkString wPath = make_filepath(0, outSkpDir, fFilename); SkFILEWStream wStream(wPath.c_str()); wStream.write(&bytes[0], length); wStream.flush(); } pic = SkPicture::MakeFromStream(&stream); if (!pic) { SkDebugf("unable to decode %s\n", fFilename); goto finish; } int pWidth = pic->width(); int pHeight = pic->height(); int pLargerWH = SkTMax(pWidth, pHeight); GrContextFactory contextFactory; #ifdef SK_BUILD_FOR_WIN GrContext* context = contextFactory.get(kAngle); #else GrContext* context = contextFactory.get(kNative); #endif if (nullptr == context) { SkDebugf("unable to allocate context for %s\n", fFilename); goto finish; } int maxWH = context->getMaxRenderTargetSize(); int scale = 1; while (pLargerWH / scale > maxWH) { scale *= 2; } SkBitmap bitmap; SkIPoint dim; do { dim.fX = (pWidth + scale - 1) / scale; dim.fY = (pHeight + scale - 1) / scale; bool success = bitmap.allocN32Pixels(dim.fX, dim.fY); if (success) { break; } SkDebugf("-%d-", scale); } while ((scale *= 2) < 256); if (scale >= 256) { SkDebugf("unable to allocate bitmap for %s (w=%d h=%d) (sw=%d sh=%d)\n", fFilename, pWidth, pHeight, dim.fX, dim.fY); return; } SkCanvas skCanvas(bitmap); drawPict(pic, &skCanvas, fScaleOversized ? scale : 1); GrTextureDesc desc; desc.fConfig = kRGBA_8888_GrPixelConfig; desc.fFlags = kRenderTarget_GrTextureFlagBit; desc.fWidth = dim.fX; desc.fHeight = dim.fY; desc.fSampleCnt = 0; sk_sp<GrTexture> texture(context->createUncachedTexture(desc, nullptr, 0)); if (!texture) { SkDebugf("unable to allocate texture for %s (w=%d h=%d)\n", fFilename, dim.fX, dim.fY); return; } SkGpuDevice grDevice(context, texture.get()); SkCanvas grCanvas(&grDevice); drawPict(pic.get(), &grCanvas, fScaleOversized ? scale : 1); SkBitmap grBitmap; grBitmap.allocPixels(grCanvas.imageInfo()); grCanvas.readPixels(&grBitmap, 0, 0); if (fTestStep == kCompareBits) { fPixelError = similarBits(grBitmap, bitmap); SkMSec skTime = timePict(pic, &skCanvas); SkMSec grTime = timePict(pic, &grCanvas); fTime = skTime - grTime; } else if (fTestStep == kEncodeFiles) { SkString pngStr = make_png_name(fFilename); const char* pngName = pngStr.c_str(); writePict(grBitmap, outGrDir, pngName); writePict(bitmap, outSkDir, pngName); } } }
int tool_main(int argc, char** argv) { SetupCrashHandler(); SkCommandLineFlags::Parse(argc, argv); #if SK_ENABLE_INST_COUNT if (FLAGS_leaks) { gPrintInstCount = true; } #endif SkAutoGraphics ag; // First, parse some flags. BenchLogger logger; if (FLAGS_logFile.count()) { logger.SetLogFile(FLAGS_logFile[0]); } LoggerResultsWriter logWriter(logger, FLAGS_timeFormat[0]); MultiResultsWriter writer; writer.add(&logWriter); SkAutoTDelete<JSONResultsWriter> jsonWriter; if (FLAGS_outResultsFile.count()) { jsonWriter.reset(SkNEW(JSONResultsWriter(FLAGS_outResultsFile[0]))); writer.add(jsonWriter.get()); } // Instantiate after all the writers have been added to writer so that we // call close() before their destructors are called on the way out. CallEnd<MultiResultsWriter> ender(writer); const uint8_t alpha = FLAGS_forceBlend ? 0x80 : 0xFF; SkTriState::State dither = SkTriState::kDefault; for (size_t i = 0; i < 3; i++) { if (strcmp(SkTriState::Name[i], FLAGS_forceDither[0]) == 0) { dither = static_cast<SkTriState::State>(i); } } BenchMode benchMode = kNormal_BenchMode; for (size_t i = 0; i < SK_ARRAY_COUNT(BenchMode_Name); i++) { if (strcmp(FLAGS_mode[0], BenchMode_Name[i]) == 0) { benchMode = static_cast<BenchMode>(i); } } SkTDArray<int> configs; bool runDefaultConfigs = false; // Try user-given configs first. for (int i = 0; i < FLAGS_config.count(); i++) { for (int j = 0; j < static_cast<int>(SK_ARRAY_COUNT(gConfigs)); ++j) { if (0 == strcmp(FLAGS_config[i], gConfigs[j].name)) { *configs.append() = j; } else if (0 == strcmp(FLAGS_config[i], kDefaultsConfigStr)) { runDefaultConfigs = true; } } } // If there weren't any, fill in with defaults. if (runDefaultConfigs) { for (int i = 0; i < static_cast<int>(SK_ARRAY_COUNT(gConfigs)); ++i) { if (gConfigs[i].runByDefault) { *configs.append() = i; } } } // Filter out things we can't run. if (kNormal_BenchMode != benchMode) { // Non-rendering configs only run in normal mode for (int i = 0; i < configs.count(); ++i) { const Config& config = gConfigs[configs[i]]; if (Benchmark::kNonRendering_Backend == config.backend) { configs.remove(i, 1); --i; } } } #if SK_SUPPORT_GPU for (int i = 0; i < configs.count(); ++i) { const Config& config = gConfigs[configs[i]]; if (Benchmark::kGPU_Backend == config.backend) { GrContext* context = gContextFactory.get(config.contextType); if (NULL == context) { SkDebugf("GrContext could not be created for config %s. Config will be skipped.\n", config.name); configs.remove(i); --i; continue; } if (config.sampleCount > context->getMaxSampleCount()){ SkDebugf( "Sample count (%d) for config %s is not supported. Config will be skipped.\n", config.sampleCount, config.name); configs.remove(i); --i; continue; } } } #endif // All flags should be parsed now. Report our settings. if (FLAGS_runOnce) { logger.logError("bench was run with --runOnce, so we're going to hide the times." " It's for your own good!\n"); } writer.option("mode", FLAGS_mode[0]); writer.option("alpha", SkStringPrintf("0x%02X", alpha).c_str()); writer.option("antialias", SkStringPrintf("%d", FLAGS_forceAA).c_str()); writer.option("filter", SkStringPrintf("%d", FLAGS_forceFilter).c_str()); writer.option("dither", SkTriState::Name[dither]); writer.option("rotate", SkStringPrintf("%d", FLAGS_rotate).c_str()); writer.option("scale", SkStringPrintf("%d", FLAGS_scale).c_str()); writer.option("clip", SkStringPrintf("%d", FLAGS_clip).c_str()); #if defined(SK_BUILD_FOR_WIN32) writer.option("system", "WIN32"); #elif defined(SK_BUILD_FOR_MAC) writer.option("system", "MAC"); #elif defined(SK_BUILD_FOR_ANDROID) writer.option("system", "ANDROID"); #elif defined(SK_BUILD_FOR_UNIX) writer.option("system", "UNIX"); #else writer.option("system", "other"); #endif #if defined(SK_DEBUG) writer.option("build", "DEBUG"); #else writer.option("build", "RELEASE"); #endif // Set texture cache limits if non-default. for (size_t i = 0; i < SK_ARRAY_COUNT(gConfigs); ++i) { #if SK_SUPPORT_GPU const Config& config = gConfigs[i]; if (Benchmark::kGPU_Backend != config.backend) { continue; } GrContext* context = gContextFactory.get(config.contextType); if (NULL == context) { continue; } size_t bytes; int count; context->getResourceCacheLimits(&count, &bytes); if (-1 != FLAGS_gpuCacheBytes) { bytes = static_cast<size_t>(FLAGS_gpuCacheBytes); } if (-1 != FLAGS_gpuCacheCount) { count = FLAGS_gpuCacheCount; } context->setResourceCacheLimits(count, bytes); #endif } // Run each bench in each configuration it supports and we asked for. Iter iter; Benchmark* bench; while ((bench = iter.next()) != NULL) { SkAutoTUnref<Benchmark> benchUnref(bench); if (SkCommandLineFlags::ShouldSkip(FLAGS_match, bench->getName())) { continue; } bench->setForceAlpha(alpha); bench->setForceAA(FLAGS_forceAA); bench->setForceFilter(FLAGS_forceFilter); bench->setDither(dither); bench->preDraw(); bool loggedBenchName = false; for (int i = 0; i < configs.count(); ++i) { const int configIndex = configs[i]; const Config& config = gConfigs[configIndex]; if (!bench->isSuitableFor(config.backend)) { continue; } GrContext* context = NULL; #if SK_SUPPORT_GPU SkGLContextHelper* glContext = NULL; if (Benchmark::kGPU_Backend == config.backend) { context = gContextFactory.get(config.contextType); if (NULL == context) { continue; } glContext = gContextFactory.getGLContext(config.contextType); } #endif SkAutoTUnref<SkCanvas> canvas; SkAutoTUnref<SkPicture> recordFrom; SkPictureRecorder recorderTo; const SkIPoint dim = bench->getSize(); SkAutoTUnref<SkSurface> surface; if (Benchmark::kNonRendering_Backend != config.backend) { surface.reset(make_surface(config.fColorType, dim, config.backend, config.sampleCount, context)); if (!surface.get()) { logger.logError(SkStringPrintf( "Device creation failure for config %s. Will skip.\n", config.name)); continue; } switch(benchMode) { case kDeferredSilent_BenchMode: case kDeferred_BenchMode: canvas.reset(SkDeferredCanvas::Create(surface.get())); break; case kRecord_BenchMode: canvas.reset(SkRef(recorderTo.beginRecording(dim.fX, dim.fY))); break; case kPictureRecord_BenchMode: { SkPictureRecorder recorderFrom; bench->draw(1, recorderFrom.beginRecording(dim.fX, dim.fY)); recordFrom.reset(recorderFrom.endRecording()); canvas.reset(SkRef(recorderTo.beginRecording(dim.fX, dim.fY))); break; } case kNormal_BenchMode: canvas.reset(SkRef(surface->getCanvas())); break; default: SkASSERT(false); } } if (NULL != canvas) { canvas->clear(SK_ColorWHITE); if (FLAGS_clip) { perform_clip(canvas, dim.fX, dim.fY); } if (FLAGS_scale) { perform_scale(canvas, dim.fX, dim.fY); } if (FLAGS_rotate) { perform_rotate(canvas, dim.fX, dim.fY); } } if (!loggedBenchName) { loggedBenchName = true; writer.bench(bench->getName(), dim.fX, dim.fY); } #if SK_SUPPORT_GPU SkGLContextHelper* contextHelper = NULL; if (Benchmark::kGPU_Backend == config.backend) { contextHelper = gContextFactory.getGLContext(config.contextType); } BenchTimer timer(contextHelper); #else BenchTimer timer; #endif double previous = std::numeric_limits<double>::infinity(); bool converged = false; // variables used to compute loopsPerFrame double frameIntervalTime = 0.0f; int frameIntervalTotalLoops = 0; bool frameIntervalComputed = false; int loopsPerFrame = 0; int loopsPerIter = 0; if (FLAGS_verbose) { SkDebugf("%s %s: ", bench->getName(), config.name); } if (!FLAGS_dryRun) { do { // Ramp up 1 -> 2 -> 4 -> 8 -> 16 -> ... -> ~1 billion. loopsPerIter = (loopsPerIter == 0) ? 1 : loopsPerIter * 2; if (loopsPerIter >= (1<<30) || timer.fWall > FLAGS_maxMs) { // If you find it takes more than a billion loops to get up to 20ms of runtime, // you've got a computer clocked at several THz or have a broken benchmark. ;) // "1B ought to be enough for anybody." logger.logError(SkStringPrintf( "\nCan't get %s %s to converge in %dms (%d loops)", bench->getName(), config.name, FLAGS_maxMs, loopsPerIter)); break; } if ((benchMode == kRecord_BenchMode || benchMode == kPictureRecord_BenchMode)) { // Clear the recorded commands so that they do not accumulate. canvas.reset(SkRef(recorderTo.beginRecording(dim.fX, dim.fY))); } timer.start(); // Inner loop that allows us to break the run into smaller // chunks (e.g. frames). This is especially useful for the GPU // as we can flush and/or swap buffers to keep the GPU from // queuing up too much work. for (int loopCount = loopsPerIter; loopCount > 0; ) { // Save and restore around each call to draw() to guarantee a pristine canvas. SkAutoCanvasRestore saveRestore(canvas, true/*also save*/); int loops; if (frameIntervalComputed && loopCount > loopsPerFrame) { loops = loopsPerFrame; loopCount -= loopsPerFrame; } else { loops = loopCount; loopCount = 0; } if (benchMode == kPictureRecord_BenchMode) { recordFrom->draw(canvas); } else { bench->draw(loops, canvas); } if (kDeferredSilent_BenchMode == benchMode) { static_cast<SkDeferredCanvas*>(canvas.get())->silentFlush(); } else if (NULL != canvas) { canvas->flush(); } #if SK_SUPPORT_GPU // swap drawing buffers on each frame to prevent the GPU // from queuing up too much work if (NULL != glContext) { glContext->swapBuffers(); } #endif } // Stop truncated timers before GL calls complete, and stop the full timers after. timer.truncatedEnd(); #if SK_SUPPORT_GPU if (NULL != glContext) { context->flush(); SK_GL(*glContext, Finish()); } #endif timer.end(); // setup the frame interval for subsequent iterations if (!frameIntervalComputed) { frameIntervalTime += timer.fWall; frameIntervalTotalLoops += loopsPerIter; if (frameIntervalTime >= FLAGS_minMs) { frameIntervalComputed = true; loopsPerFrame = (int)(((double)frameIntervalTotalLoops / frameIntervalTime) * FLAGS_minMs); if (loopsPerFrame < 1) { loopsPerFrame = 1; } // SkDebugf(" %s has %d loops in %f ms (normalized to %d)\n", // bench->getName(), frameIntervalTotalLoops, // timer.fWall, loopsPerFrame); } } const double current = timer.fWall / loopsPerIter; if (FLAGS_verbose && current > previous) { SkDebugf("↑"); } if (FLAGS_verbose) { SkDebugf("%.3g ", current); } converged = HasConverged(previous, current, timer.fWall); previous = current; } while (!FLAGS_runOnce && !converged); } if (FLAGS_verbose) { SkDebugf("\n"); } if (!FLAGS_dryRun && FLAGS_outDir.count() && Benchmark::kNonRendering_Backend != config.backend) { SkAutoTUnref<SkImage> image(surface->newImageSnapshot()); if (image.get()) { saveFile(bench->getName(), config.name, FLAGS_outDir[0], image); } } if (FLAGS_runOnce) { // Let's not mislead ourselves by looking at Debug build or single iteration bench times! continue; } // Normalize to ms per 1000 iterations. const double normalize = 1000.0 / loopsPerIter; const struct { char shortName; const char* longName; double ms; } times[] = { {'w', "msecs", normalize * timer.fWall}, {'W', "Wmsecs", normalize * timer.fTruncatedWall}, {'c', "cmsecs", normalize * timer.fCpu}, {'C', "Cmsecs", normalize * timer.fTruncatedCpu}, {'g', "gmsecs", normalize * timer.fGpu}, }; writer.config(config.name); for (size_t i = 0; i < SK_ARRAY_COUNT(times); i++) { if (strchr(FLAGS_timers[0], times[i].shortName) && times[i].ms > 0) { writer.timer(times[i].longName, times[i].ms); } } } } #if SK_SUPPORT_GPU gContextFactory.destroyContexts(); #endif return 0; }
static bool render_page(const SkString& outputDir, const SkString& inputFilename, const SkPdfRenderer& renderer, int page) { SkRect rect = renderer.MediaBox(page < 0 ? 0 :page); // Exercise all pdf codepaths as in normal rendering, but no actual bits are changed. if (!FLAGS_config.isEmpty() && strcmp(FLAGS_config[0], "nul") == 0) { SkBitmap bitmap; SkAutoTUnref<SkBaseDevice> device(SkNEW_ARGS(SkBitmapDevice, (bitmap))); SkNulCanvas canvas(device); renderer.renderPage(page < 0 ? 0 : page, &canvas, rect); } else { // 8888 SkRect rect = renderer.MediaBox(page < 0 ? 0 :page); SkBitmap bitmap; SkScalar width = SkScalarMul(rect.width(), SkDoubleToScalar(FLAGS_DPI / 72.0)); SkScalar height = SkScalarMul(rect.height(), SkDoubleToScalar(FLAGS_DPI / 72.0)); rect = SkRect::MakeWH(width, height); SkColor background = FLAGS_transparentBackground ? SK_ColorTRANSPARENT : SK_ColorWHITE; #ifdef PDF_DEBUG_3X setup_bitmap(&bitmap, 3 * (int)SkScalarToDouble(width), 3 * (int)SkScalarToDouble(height), background); #else setup_bitmap(&bitmap, (int)SkScalarToDouble(width), (int)SkScalarToDouble(height), background); #endif SkAutoTUnref<SkBaseDevice> device; if (strcmp(FLAGS_config[0], "8888") == 0) { device.reset(SkNEW_ARGS(SkBitmapDevice, (bitmap))); } #if SK_SUPPORT_GPU else if (strcmp(FLAGS_config[0], "gpu") == 0) { SkAutoTUnref<GrSurface> target; GrContext* gr = gContextFactory.get(GrContextFactory::kNative_GLContextType); if (gr) { // create a render target to back the device GrTextureDesc desc; desc.fConfig = kSkia8888_GrPixelConfig; desc.fFlags = kRenderTarget_GrTextureFlagBit; desc.fWidth = SkScalarCeilToInt(width); desc.fHeight = SkScalarCeilToInt(height); desc.fSampleCnt = 0; target.reset(gr->createUncachedTexture(desc, NULL, 0)); } if (NULL == target.get()) { SkASSERT(0); return false; } device.reset(SkGpuDevice::Create(target)); } #endif else { SkDebugf("unknown --config: %s\n", FLAGS_config[0]); return false; } SkCanvas canvas(device); #ifdef PDF_TRACE_DIFF_IN_PNG gDumpBitmap = &bitmap; gDumpCanvas = &canvas; #endif renderer.renderPage(page < 0 ? 0 : page, &canvas, rect); SkString outputPath; if (!make_output_filepath(&outputPath, outputDir, inputFilename, page)) { return false; } SkImageEncoder::EncodeFile(outputPath.c_str(), bitmap, SkImageEncoder::kPNG_Type, 100); if (FLAGS_showMemoryUsage) { SkDebugf("Memory usage after page %i rendered: %u\n", page < 0 ? 0 : page, (unsigned int)renderer.bytesUsed()); } } return true; }