DEF_GPUTEST_FOR_ALL_GL_CONTEXTS(VertexAttributeCount, reporter, ctxInfo) { GrContext* context = ctxInfo.fGrContext; GrTextureDesc desc; desc.fHeight = 1; desc.fWidth = 1; desc.fFlags = kRenderTarget_GrSurfaceFlag; desc.fConfig = kRGBA_8888_GrPixelConfig; SkAutoTUnref<GrTexture> target(context->textureProvider()->createTexture(desc, SkBudgeted::kYes)); if (!target) { ERRORF(reporter, "Could not create render target."); return; } SkAutoTUnref<GrDrawContext> dc(context->drawContext(target->asRenderTarget())); if (!dc) { ERRORF(reporter, "Could not create draw context."); return; } int attribCnt = context->caps()->maxVertexAttributes(); if (!attribCnt) { ERRORF(reporter, "No attributes allowed?!"); return; } context->flush(); context->resetGpuStats(); #if GR_GPU_STATS REPORTER_ASSERT(reporter, context->getGpu()->stats()->numDraws() == 0); REPORTER_ASSERT(reporter, context->getGpu()->stats()->numFailedDraws() == 0); #endif SkAutoTUnref<GrDrawBatch> batch; GrPipelineBuilder pb; pb.setRenderTarget(target->asRenderTarget()); // This one should succeed. batch.reset(new Batch(attribCnt)); dc->drawContextPriv().testingOnly_drawBatch(pb, batch); context->flush(); #if GR_GPU_STATS REPORTER_ASSERT(reporter, context->getGpu()->stats()->numDraws() == 1); REPORTER_ASSERT(reporter, context->getGpu()->stats()->numFailedDraws() == 0); #endif context->resetGpuStats(); // This one should fail. batch.reset(new Batch(attribCnt+1)); dc->drawContextPriv().testingOnly_drawBatch(pb, batch); context->flush(); #if GR_GPU_STATS REPORTER_ASSERT(reporter, context->getGpu()->stats()->numDraws() == 0); REPORTER_ASSERT(reporter, context->getGpu()->stats()->numFailedDraws() == 1); #endif }
// Tests that MIP maps are created and invalidated as expected when drawing to and from GrTextures. DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrTextureMipMapInvalidationTest, reporter, ctxInfo) { GrContext* context = ctxInfo.grContext(); if (!context->priv().caps()->mipMapSupport()) { return; } auto isMipped = [] (SkSurface* surf) { const GrTexture* texture = surf->makeImageSnapshot()->getTexture(); return GrMipMapped::kYes == texture->texturePriv().mipMapped(); }; auto mipsAreDirty = [] (SkSurface* surf) { return surf->makeImageSnapshot()->getTexture()->texturePriv().mipMapsAreDirty(); }; auto info = SkImageInfo::MakeN32Premul(256, 256); for (auto allocateMips : {false, true}) { auto surf1 = SkSurface::MakeRenderTarget(context, SkBudgeted::kYes, info, 0, kBottomLeft_GrSurfaceOrigin, nullptr, allocateMips); auto surf2 = SkSurface::MakeRenderTarget(context, SkBudgeted::kYes, info); // Draw something just in case we ever had a solid color optimization surf1->getCanvas()->drawCircle(128, 128, 50, SkPaint()); surf1->flush(); // No mipmaps initially REPORTER_ASSERT(reporter, isMipped(surf1.get()) == allocateMips); // Painting with downscale and medium filter quality should result in mipmap creation // Flush the context rather than the canvas as flushing the canvas triggers MIP level // generation. SkPaint paint; paint.setFilterQuality(kMedium_SkFilterQuality); surf2->getCanvas()->scale(0.2f, 0.2f); surf2->getCanvas()->drawImage(surf1->makeImageSnapshot(), 0, 0, &paint); context->flush(); REPORTER_ASSERT(reporter, isMipped(surf1.get()) == allocateMips); REPORTER_ASSERT(reporter, !allocateMips || !mipsAreDirty(surf1.get())); // Changing the contents of the surface should invalidate the mipmap, but not de-allocate surf1->getCanvas()->drawCircle(128, 128, 100, SkPaint()); context->flush(); REPORTER_ASSERT(reporter, isMipped(surf1.get()) == allocateMips); REPORTER_ASSERT(reporter, mipsAreDirty(surf1.get())); } }
void GrContext::TextBlobCacheOverBudgetCB(void* data) { SkASSERT(data); // Unlike the GrResourceCache, TextBlobs are drawn at the SkGpuDevice level, therefore they // cannot use fFlushTorReduceCacheSize because it uses AutoCheckFlush. The solution is to move // drawText calls to below the GrContext level, but this is not trivial because they call // drawPath on SkGpuDevice GrContext* context = reinterpret_cast<GrContext*>(data); context->flush(); }
DEF_GPUTEST_FOR_ALL_CONTEXTS(VertexAttributeCount, reporter, ctxInfo) { GrContext* context = ctxInfo.grContext(); sk_sp<GrDrawContext> drawContext(context->newDrawContext(SkBackingFit::kApprox, 1, 1, kRGBA_8888_GrPixelConfig)); if (!drawContext) { ERRORF(reporter, "Could not create draw context."); return; } int attribCnt = context->caps()->maxVertexAttributes(); if (!attribCnt) { ERRORF(reporter, "No attributes allowed?!"); return; } context->flush(); context->resetGpuStats(); #if GR_GPU_STATS REPORTER_ASSERT(reporter, context->getGpu()->stats()->numDraws() == 0); REPORTER_ASSERT(reporter, context->getGpu()->stats()->numFailedDraws() == 0); #endif SkAutoTUnref<GrDrawBatch> batch; GrPaint grPaint; // This one should succeed. batch.reset(new Batch(attribCnt)); drawContext->drawContextPriv().testingOnly_drawBatch(grPaint, batch); context->flush(); #if GR_GPU_STATS REPORTER_ASSERT(reporter, context->getGpu()->stats()->numDraws() == 1); REPORTER_ASSERT(reporter, context->getGpu()->stats()->numFailedDraws() == 0); #endif context->resetGpuStats(); // This one should fail. batch.reset(new Batch(attribCnt+1)); drawContext->drawContextPriv().testingOnly_drawBatch(grPaint, batch); context->flush(); #if GR_GPU_STATS REPORTER_ASSERT(reporter, context->getGpu()->stats()->numDraws() == 0); REPORTER_ASSERT(reporter, context->getGpu()->stats()->numFailedDraws() == 1); #endif }
RENDERTHREAD_SKIA_PIPELINE_TEST(CacheManager, trimMemory) { DisplayInfo displayInfo = renderThread.mainDisplayInfo(); GrContext* grContext = renderThread.getGrContext(); ASSERT_TRUE(grContext != nullptr); // create pairs of offscreen render targets and images until we exceed the // backgroundCacheSizeLimit std::vector<sk_sp<SkSurface>> surfaces; while (getCacheUsage(grContext) <= renderThread.cacheManager().getBackgroundCacheSize()) { SkImageInfo info = SkImageInfo::MakeA8(displayInfo.w, displayInfo.h); sk_sp<SkSurface> surface = SkSurface::MakeRenderTarget(grContext, SkBudgeted::kYes, info); surface->getCanvas()->drawColor(SK_AlphaTRANSPARENT); grContext->flush(); surfaces.push_back(surface); } // create an image and pin it so that we have something with a unique key in the cache sk_sp<Bitmap> bitmap = Bitmap::allocateHeapBitmap(SkImageInfo::MakeA8(displayInfo.w, displayInfo.h)); sk_sp<SkColorFilter> filter; sk_sp<SkImage> image = bitmap->makeImage(&filter); ASSERT_TRUE(SkImage_pinAsTexture(image.get(), grContext)); // attempt to trim all memory while we still hold strong refs renderThread.cacheManager().trimMemory(CacheManager::TrimMemoryMode::Complete); ASSERT_TRUE(0 == grContext->getResourceCachePurgeableBytes()); // free the surfaces for (size_t i = 0; i < surfaces.size(); i++) { ASSERT_TRUE(surfaces[i]->unique()); surfaces[i].reset(); } // unpin the image which should add a unique purgeable key to the cache SkImage_unpinAsTexture(image.get(), grContext); // verify that we have enough purgeable bytes const size_t purgeableBytes = grContext->getResourceCachePurgeableBytes(); ASSERT_TRUE(renderThread.cacheManager().getBackgroundCacheSize() < purgeableBytes); // UI hidden and make sure only some got purged (unique should remain) renderThread.cacheManager().trimMemory(CacheManager::TrimMemoryMode::UiHidden); ASSERT_TRUE(0 < grContext->getResourceCachePurgeableBytes()); ASSERT_TRUE(renderThread.cacheManager().getBackgroundCacheSize() > getCacheUsage(grContext)); // complete and make sure all get purged renderThread.cacheManager().trimMemory(CacheManager::TrimMemoryMode::Complete); ASSERT_TRUE(0 == grContext->getResourceCachePurgeableBytes()); }
DEF_GPUTEST_FOR_ALL_CONTEXTS(VertexAttributeCount, reporter, ctxInfo) { GrContext* context = ctxInfo.grContext(); sk_sp<GrRenderTargetContext> renderTargetContext(context->makeDeferredRenderTargetContext( SkBackingFit::kApprox, 1, 1, kRGBA_8888_GrPixelConfig, nullptr)); if (!renderTargetContext) { ERRORF(reporter, "Could not create render target context."); return; } int attribCnt = context->caps()->maxVertexAttributes(); if (!attribCnt) { ERRORF(reporter, "No attributes allowed?!"); return; } context->flush(); context->resetGpuStats(); #if GR_GPU_STATS REPORTER_ASSERT(reporter, context->getGpu()->stats()->numDraws() == 0); REPORTER_ASSERT(reporter, context->getGpu()->stats()->numFailedDraws() == 0); #endif GrPaint grPaint; // This one should succeed. renderTargetContext->priv().testingOnly_addDrawOp(Op::Make(attribCnt)); context->flush(); #if GR_GPU_STATS REPORTER_ASSERT(reporter, context->getGpu()->stats()->numDraws() == 1); REPORTER_ASSERT(reporter, context->getGpu()->stats()->numFailedDraws() == 0); #endif context->resetGpuStats(); renderTargetContext->priv().testingOnly_addDrawOp(Op::Make(attribCnt + 1)); context->flush(); #if GR_GPU_STATS REPORTER_ASSERT(reporter, context->getGpu()->stats()->numDraws() == 0); REPORTER_ASSERT(reporter, context->getGpu()->stats()->numFailedDraws() == 1); #endif }
DEF_GPUTEST_FOR_ALL_CONTEXTS(TessellatingPathRendererTests, reporter, ctxInfo) { GrContext* ctx = ctxInfo.grContext(); sk_sp<GrRenderTargetContext> rtc(ctx->makeDeferredRenderTargetContext( SkBackingFit::kApprox, 800, 800, kRGBA_8888_GrPixelConfig, nullptr, 1, GrMipMapped::kNo, kTopLeft_GrSurfaceOrigin)); if (!rtc) { return; } ctx->flush(); // Adding discard to appease vulkan validation warning about loading uninitialized data on draw rtc->discard(); test_path(ctx, rtc.get(), create_path_0()); test_path(ctx, rtc.get(), create_path_1()); test_path(ctx, rtc.get(), create_path_2()); test_path(ctx, rtc.get(), create_path_3()); test_path(ctx, rtc.get(), create_path_4()); test_path(ctx, rtc.get(), create_path_5()); test_path(ctx, rtc.get(), create_path_6()); test_path(ctx, rtc.get(), create_path_7()); test_path(ctx, rtc.get(), create_path_8()); test_path(ctx, rtc.get(), create_path_9()); test_path(ctx, rtc.get(), create_path_10()); test_path(ctx, rtc.get(), create_path_11()); test_path(ctx, rtc.get(), create_path_12()); test_path(ctx, rtc.get(), create_path_13()); test_path(ctx, rtc.get(), create_path_14()); test_path(ctx, rtc.get(), create_path_15()); test_path(ctx, rtc.get(), create_path_16()); SkMatrix nonInvertibleMatrix = SkMatrix::MakeScale(0, 0); std::unique_ptr<GrFragmentProcessor> fp(create_linear_gradient_processor(ctx)); test_path(ctx, rtc.get(), create_path_17(), nonInvertibleMatrix, GrAAType::kCoverage, std::move(fp)); test_path(ctx, rtc.get(), create_path_18()); test_path(ctx, rtc.get(), create_path_19()); test_path(ctx, rtc.get(), create_path_20(), SkMatrix(), GrAAType::kCoverage); test_path(ctx, rtc.get(), create_path_21(), SkMatrix(), GrAAType::kCoverage); test_path(ctx, rtc.get(), create_path_22()); test_path(ctx, rtc.get(), create_path_23()); test_path(ctx, rtc.get(), create_path_24()); test_path(ctx, rtc.get(), create_path_25(), SkMatrix(), GrAAType::kCoverage); test_path(ctx, rtc.get(), create_path_26(), SkMatrix(), GrAAType::kCoverage); test_path(ctx, rtc.get(), create_path_27(), SkMatrix(), GrAAType::kCoverage); test_path(ctx, rtc.get(), create_path_28(), SkMatrix(), GrAAType::kCoverage); test_path(ctx, rtc.get(), create_path_29()); }
void SKPBench::getGpuStats(SkCanvas* canvas, SkTArray<SkString>* keys, SkTArray<double>* values) { #if SK_SUPPORT_GPU // we do a special single draw and then dump the key / value pairs GrContext* context = canvas->getGrContext(); if (!context) { return; } // TODO refactor this out if we want to test other subclasses of skpbench context->flush(); context->freeGpuResources(); context->resetContext(); context->getGpu()->resetShaderCacheForTesting(); draw_pic_for_stats(canvas, context, fPic, keys, values, "first_frame"); // draw second frame draw_pic_for_stats(canvas, context, fPic, keys, values, "second_frame"); #endif }
DEF_GPUTEST_FOR_ALL_CONTEXTS(TessellatingPathRendererTests, reporter, ctxInfo) { GrContext* ctx = ctxInfo.grContext(); sk_sp<GrRenderTargetContext> rtc(ctx->makeDeferredRenderTargetContext( SkBackingFit::kApprox, 800, 800, kRGBA_8888_GrPixelConfig, nullptr, 0, kTopLeft_GrSurfaceOrigin)); if (!rtc) { return; } ctx->flush(); test_path(ctx, rtc.get(), create_path_0()); test_path(ctx, rtc.get(), create_path_1()); test_path(ctx, rtc.get(), create_path_2()); test_path(ctx, rtc.get(), create_path_3()); test_path(ctx, rtc.get(), create_path_4()); test_path(ctx, rtc.get(), create_path_5()); test_path(ctx, rtc.get(), create_path_6()); test_path(ctx, rtc.get(), create_path_7()); test_path(ctx, rtc.get(), create_path_8()); test_path(ctx, rtc.get(), create_path_9()); test_path(ctx, rtc.get(), create_path_10()); test_path(ctx, rtc.get(), create_path_11()); test_path(ctx, rtc.get(), create_path_12()); test_path(ctx, rtc.get(), create_path_13()); test_path(ctx, rtc.get(), create_path_14()); test_path(ctx, rtc.get(), create_path_15()); test_path(ctx, rtc.get(), create_path_16()); SkMatrix nonInvertibleMatrix = SkMatrix::MakeScale(0, 0); sk_sp<GrFragmentProcessor> fp(create_linear_gradient_processor(ctx)); test_path(ctx, rtc.get(), create_path_17(), nonInvertibleMatrix, GrAAType::kCoverage, fp); test_path(ctx, rtc.get(), create_path_18()); test_path(ctx, rtc.get(), create_path_19()); test_path(ctx, rtc.get(), create_path_20(), SkMatrix(), GrAAType::kCoverage); test_path(ctx, rtc.get(), create_path_21(), SkMatrix(), GrAAType::kCoverage); test_path(ctx, rtc.get(), create_path_22()); }
void LayerTextureUpdaterSkPicture::updateTextureRect(GraphicsContext3D* compositorContext, TextureAllocator* allocator, ManagedTexture* texture, const IntRect& sourceRect, const IntRect& destRect) { ASSERT(!m_context || m_context == compositorContext); m_context = compositorContext; if (m_createFrameBuffer) { deleteFrameBuffer(); createFrameBuffer(); m_createFrameBuffer = false; } if (!m_fbo) return; // Bind texture. context()->bindFramebuffer(GraphicsContext3D::FRAMEBUFFER, m_fbo); texture->framebufferTexture2D(context(), allocator); ASSERT(context()->checkFramebufferStatus(GraphicsContext3D::FRAMEBUFFER) == GraphicsContext3D::FRAMEBUFFER_COMPLETE); // Make sure SKIA uses the correct GL context. context()->makeContextCurrent(); GrContext* skiaContext = m_context->grContext(); // Notify SKIA to sync its internal GL state. skiaContext->resetContext(); m_canvas->save(); m_canvas->clipRect(SkRect(destRect)); // Translate the origin of contentRect to that of destRect. // Note that destRect is defined relative to sourceRect. m_canvas->translate(contentRect().x() - sourceRect.x() + destRect.x(), contentRect().y() - sourceRect.y() + destRect.y()); m_canvas->drawPicture(m_picture); m_canvas->restore(); // Flush SKIA context so that all the rendered stuff appears on the texture. skiaContext->flush(); // Unbind texture. context()->framebufferTexture2D(GraphicsContext3D::FRAMEBUFFER, GraphicsContext3D::COLOR_ATTACHMENT0, GraphicsContext3D::TEXTURE_2D, 0, 0); context()->bindFramebuffer(GraphicsContext3D::FRAMEBUFFER, 0); }
bool Canvas2DLayerBridge::prepareMailbox(WebExternalTextureMailbox* outMailbox, WebExternalBitmap* bitmap) { ASSERT(isAccelerated()); if (m_destructionInProgress) { // It can be hit in the following sequence. // 1. Canvas draws something. // 2. The compositor begins the frame. // 3. Javascript makes a context be lost. // 4. Here. return false; } if (bitmap) { // Using accelerated 2d canvas with software renderer, which // should only happen in tests that use fake graphics contexts // or in Android WebView in software mode. In this case, we do // not care about producing any results for this canvas. skipQueuedDrawCommands(); m_lastImageId = 0; return false; } if (!checkSurfaceValid()) return false; WebGraphicsContext3D* webContext = context(); RefPtr<SkImage> image = newImageSnapshot(PreferAcceleration); // Early exit if canvas was not drawn to since last prepareMailbox GLenum filter = m_filterQuality == kNone_SkFilterQuality ? GL_NEAREST : GL_LINEAR; if (image->uniqueID() == m_lastImageId && filter == m_lastFilter) return false; m_lastImageId = image->uniqueID(); m_lastFilter = filter; { MailboxInfo tmp; tmp.m_image = image; tmp.m_parentLayerBridge = this; m_mailboxes.prepend(tmp); } MailboxInfo& mailboxInfo = m_mailboxes.first(); mailboxInfo.m_mailbox.nearestNeighbor = filter == GL_NEAREST; GrContext* grContext = m_contextProvider->grContext(); if (!grContext) return true; // for testing: skip gl stuff when using a mock graphics context. // Need to flush skia's internal queue because texture is about to be accessed directly grContext->flush(); ASSERT(image->getTexture()); // Because of texture sharing with the compositor, we must invalidate // the state cached in skia so that the deferred copy on write // in SkSurface_Gpu does not make any false assumptions. mailboxInfo.m_image->getTexture()->textureParamsModified(); webContext->bindTexture(GL_TEXTURE_2D, mailboxInfo.m_image->getTexture()->getTextureHandle()); webContext->texParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, filter); webContext->texParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, filter); webContext->texParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); webContext->texParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); // Re-use the texture's existing mailbox, if there is one. if (image->getTexture()->getCustomData()) { ASSERT(image->getTexture()->getCustomData()->size() == sizeof(mailboxInfo.m_mailbox.name)); memcpy(&mailboxInfo.m_mailbox.name[0], image->getTexture()->getCustomData()->data(), sizeof(mailboxInfo.m_mailbox.name)); } else { context()->genMailboxCHROMIUM(mailboxInfo.m_mailbox.name); RefPtr<SkData> mailboxNameData = adoptRef(SkData::NewWithCopy(&mailboxInfo.m_mailbox.name[0], sizeof(mailboxInfo.m_mailbox.name))); image->getTexture()->setCustomData(mailboxNameData.get()); webContext->produceTextureCHROMIUM(GL_TEXTURE_2D, mailboxInfo.m_mailbox.name); } if (isHidden()) { // With hidden canvases, we release the SkImage immediately because // there is no need for animations to be double buffered. mailboxInfo.m_image.clear(); } else { // FIXME: We'd rather insert a syncpoint than perform a flush here, // but currentlythe canvas will flicker if we don't flush here. webContext->flush(); // mailboxInfo.m_mailbox.syncPoint = webContext->insertSyncPoint(); } webContext->bindTexture(GL_TEXTURE_2D, 0); // Because we are changing the texture binding without going through skia, // we must dirty the context. grContext->resetContext(kTextureBinding_GrGLBackendState); *outMailbox = mailboxInfo.m_mailbox; return true; }
int tool_main(int argc, char** argv) { SetupCrashHandler(); SkCommandLineFlags::Parse(argc, argv); #if SK_ENABLE_INST_COUNT if (FLAGS_leaks) { gPrintInstCount = true; } #endif SkAutoGraphics ag; // First, parse some flags. BenchLogger logger; if (FLAGS_logFile.count()) { logger.SetLogFile(FLAGS_logFile[0]); } LoggerResultsWriter logWriter(logger, FLAGS_timeFormat[0]); MultiResultsWriter writer; writer.add(&logWriter); SkAutoTDelete<JSONResultsWriter> jsonWriter; if (FLAGS_outResultsFile.count()) { jsonWriter.reset(SkNEW(JSONResultsWriter(FLAGS_outResultsFile[0]))); writer.add(jsonWriter.get()); } // Instantiate after all the writers have been added to writer so that we // call close() before their destructors are called on the way out. CallEnd<MultiResultsWriter> ender(writer); const uint8_t alpha = FLAGS_forceBlend ? 0x80 : 0xFF; SkTriState::State dither = SkTriState::kDefault; for (size_t i = 0; i < 3; i++) { if (strcmp(SkTriState::Name[i], FLAGS_forceDither[0]) == 0) { dither = static_cast<SkTriState::State>(i); } } BenchMode benchMode = kNormal_BenchMode; for (size_t i = 0; i < SK_ARRAY_COUNT(BenchMode_Name); i++) { if (strcmp(FLAGS_mode[0], BenchMode_Name[i]) == 0) { benchMode = static_cast<BenchMode>(i); } } SkTDArray<int> configs; bool runDefaultConfigs = false; // Try user-given configs first. for (int i = 0; i < FLAGS_config.count(); i++) { for (int j = 0; j < static_cast<int>(SK_ARRAY_COUNT(gConfigs)); ++j) { if (0 == strcmp(FLAGS_config[i], gConfigs[j].name)) { *configs.append() = j; } else if (0 == strcmp(FLAGS_config[i], kDefaultsConfigStr)) { runDefaultConfigs = true; } } } // If there weren't any, fill in with defaults. if (runDefaultConfigs) { for (int i = 0; i < static_cast<int>(SK_ARRAY_COUNT(gConfigs)); ++i) { if (gConfigs[i].runByDefault) { *configs.append() = i; } } } // Filter out things we can't run. if (kNormal_BenchMode != benchMode) { // Non-rendering configs only run in normal mode for (int i = 0; i < configs.count(); ++i) { const Config& config = gConfigs[configs[i]]; if (Benchmark::kNonRendering_Backend == config.backend) { configs.remove(i, 1); --i; } } } #if SK_SUPPORT_GPU for (int i = 0; i < configs.count(); ++i) { const Config& config = gConfigs[configs[i]]; if (Benchmark::kGPU_Backend == config.backend) { GrContext* context = gContextFactory.get(config.contextType); if (NULL == context) { SkDebugf("GrContext could not be created for config %s. Config will be skipped.\n", config.name); configs.remove(i); --i; continue; } if (config.sampleCount > context->getMaxSampleCount()){ SkDebugf( "Sample count (%d) for config %s is not supported. Config will be skipped.\n", config.sampleCount, config.name); configs.remove(i); --i; continue; } } } #endif // All flags should be parsed now. Report our settings. if (FLAGS_runOnce) { logger.logError("bench was run with --runOnce, so we're going to hide the times." " It's for your own good!\n"); } writer.option("mode", FLAGS_mode[0]); writer.option("alpha", SkStringPrintf("0x%02X", alpha).c_str()); writer.option("antialias", SkStringPrintf("%d", FLAGS_forceAA).c_str()); writer.option("filter", SkStringPrintf("%d", FLAGS_forceFilter).c_str()); writer.option("dither", SkTriState::Name[dither]); writer.option("rotate", SkStringPrintf("%d", FLAGS_rotate).c_str()); writer.option("scale", SkStringPrintf("%d", FLAGS_scale).c_str()); writer.option("clip", SkStringPrintf("%d", FLAGS_clip).c_str()); #if defined(SK_BUILD_FOR_WIN32) writer.option("system", "WIN32"); #elif defined(SK_BUILD_FOR_MAC) writer.option("system", "MAC"); #elif defined(SK_BUILD_FOR_ANDROID) writer.option("system", "ANDROID"); #elif defined(SK_BUILD_FOR_UNIX) writer.option("system", "UNIX"); #else writer.option("system", "other"); #endif #if defined(SK_DEBUG) writer.option("build", "DEBUG"); #else writer.option("build", "RELEASE"); #endif // Set texture cache limits if non-default. for (size_t i = 0; i < SK_ARRAY_COUNT(gConfigs); ++i) { #if SK_SUPPORT_GPU const Config& config = gConfigs[i]; if (Benchmark::kGPU_Backend != config.backend) { continue; } GrContext* context = gContextFactory.get(config.contextType); if (NULL == context) { continue; } size_t bytes; int count; context->getResourceCacheLimits(&count, &bytes); if (-1 != FLAGS_gpuCacheBytes) { bytes = static_cast<size_t>(FLAGS_gpuCacheBytes); } if (-1 != FLAGS_gpuCacheCount) { count = FLAGS_gpuCacheCount; } context->setResourceCacheLimits(count, bytes); #endif } // Run each bench in each configuration it supports and we asked for. Iter iter; Benchmark* bench; while ((bench = iter.next()) != NULL) { SkAutoTUnref<Benchmark> benchUnref(bench); if (SkCommandLineFlags::ShouldSkip(FLAGS_match, bench->getName())) { continue; } bench->setForceAlpha(alpha); bench->setForceAA(FLAGS_forceAA); bench->setForceFilter(FLAGS_forceFilter); bench->setDither(dither); bench->preDraw(); bool loggedBenchName = false; for (int i = 0; i < configs.count(); ++i) { const int configIndex = configs[i]; const Config& config = gConfigs[configIndex]; if (!bench->isSuitableFor(config.backend)) { continue; } GrContext* context = NULL; #if SK_SUPPORT_GPU SkGLContextHelper* glContext = NULL; if (Benchmark::kGPU_Backend == config.backend) { context = gContextFactory.get(config.contextType); if (NULL == context) { continue; } glContext = gContextFactory.getGLContext(config.contextType); } #endif SkAutoTUnref<SkCanvas> canvas; SkAutoTUnref<SkPicture> recordFrom; SkPictureRecorder recorderTo; const SkIPoint dim = bench->getSize(); SkAutoTUnref<SkSurface> surface; if (Benchmark::kNonRendering_Backend != config.backend) { surface.reset(make_surface(config.fColorType, dim, config.backend, config.sampleCount, context)); if (!surface.get()) { logger.logError(SkStringPrintf( "Device creation failure for config %s. Will skip.\n", config.name)); continue; } switch(benchMode) { case kDeferredSilent_BenchMode: case kDeferred_BenchMode: canvas.reset(SkDeferredCanvas::Create(surface.get())); break; case kRecord_BenchMode: canvas.reset(SkRef(recorderTo.beginRecording(dim.fX, dim.fY))); break; case kPictureRecord_BenchMode: { SkPictureRecorder recorderFrom; bench->draw(1, recorderFrom.beginRecording(dim.fX, dim.fY)); recordFrom.reset(recorderFrom.endRecording()); canvas.reset(SkRef(recorderTo.beginRecording(dim.fX, dim.fY))); break; } case kNormal_BenchMode: canvas.reset(SkRef(surface->getCanvas())); break; default: SkASSERT(false); } } if (NULL != canvas) { canvas->clear(SK_ColorWHITE); if (FLAGS_clip) { perform_clip(canvas, dim.fX, dim.fY); } if (FLAGS_scale) { perform_scale(canvas, dim.fX, dim.fY); } if (FLAGS_rotate) { perform_rotate(canvas, dim.fX, dim.fY); } } if (!loggedBenchName) { loggedBenchName = true; writer.bench(bench->getName(), dim.fX, dim.fY); } #if SK_SUPPORT_GPU SkGLContextHelper* contextHelper = NULL; if (Benchmark::kGPU_Backend == config.backend) { contextHelper = gContextFactory.getGLContext(config.contextType); } BenchTimer timer(contextHelper); #else BenchTimer timer; #endif double previous = std::numeric_limits<double>::infinity(); bool converged = false; // variables used to compute loopsPerFrame double frameIntervalTime = 0.0f; int frameIntervalTotalLoops = 0; bool frameIntervalComputed = false; int loopsPerFrame = 0; int loopsPerIter = 0; if (FLAGS_verbose) { SkDebugf("%s %s: ", bench->getName(), config.name); } if (!FLAGS_dryRun) { do { // Ramp up 1 -> 2 -> 4 -> 8 -> 16 -> ... -> ~1 billion. loopsPerIter = (loopsPerIter == 0) ? 1 : loopsPerIter * 2; if (loopsPerIter >= (1<<30) || timer.fWall > FLAGS_maxMs) { // If you find it takes more than a billion loops to get up to 20ms of runtime, // you've got a computer clocked at several THz or have a broken benchmark. ;) // "1B ought to be enough for anybody." logger.logError(SkStringPrintf( "\nCan't get %s %s to converge in %dms (%d loops)", bench->getName(), config.name, FLAGS_maxMs, loopsPerIter)); break; } if ((benchMode == kRecord_BenchMode || benchMode == kPictureRecord_BenchMode)) { // Clear the recorded commands so that they do not accumulate. canvas.reset(SkRef(recorderTo.beginRecording(dim.fX, dim.fY))); } timer.start(); // Inner loop that allows us to break the run into smaller // chunks (e.g. frames). This is especially useful for the GPU // as we can flush and/or swap buffers to keep the GPU from // queuing up too much work. for (int loopCount = loopsPerIter; loopCount > 0; ) { // Save and restore around each call to draw() to guarantee a pristine canvas. SkAutoCanvasRestore saveRestore(canvas, true/*also save*/); int loops; if (frameIntervalComputed && loopCount > loopsPerFrame) { loops = loopsPerFrame; loopCount -= loopsPerFrame; } else { loops = loopCount; loopCount = 0; } if (benchMode == kPictureRecord_BenchMode) { recordFrom->draw(canvas); } else { bench->draw(loops, canvas); } if (kDeferredSilent_BenchMode == benchMode) { static_cast<SkDeferredCanvas*>(canvas.get())->silentFlush(); } else if (NULL != canvas) { canvas->flush(); } #if SK_SUPPORT_GPU // swap drawing buffers on each frame to prevent the GPU // from queuing up too much work if (NULL != glContext) { glContext->swapBuffers(); } #endif } // Stop truncated timers before GL calls complete, and stop the full timers after. timer.truncatedEnd(); #if SK_SUPPORT_GPU if (NULL != glContext) { context->flush(); SK_GL(*glContext, Finish()); } #endif timer.end(); // setup the frame interval for subsequent iterations if (!frameIntervalComputed) { frameIntervalTime += timer.fWall; frameIntervalTotalLoops += loopsPerIter; if (frameIntervalTime >= FLAGS_minMs) { frameIntervalComputed = true; loopsPerFrame = (int)(((double)frameIntervalTotalLoops / frameIntervalTime) * FLAGS_minMs); if (loopsPerFrame < 1) { loopsPerFrame = 1; } // SkDebugf(" %s has %d loops in %f ms (normalized to %d)\n", // bench->getName(), frameIntervalTotalLoops, // timer.fWall, loopsPerFrame); } } const double current = timer.fWall / loopsPerIter; if (FLAGS_verbose && current > previous) { SkDebugf("↑"); } if (FLAGS_verbose) { SkDebugf("%.3g ", current); } converged = HasConverged(previous, current, timer.fWall); previous = current; } while (!FLAGS_runOnce && !converged); } if (FLAGS_verbose) { SkDebugf("\n"); } if (!FLAGS_dryRun && FLAGS_outDir.count() && Benchmark::kNonRendering_Backend != config.backend) { SkAutoTUnref<SkImage> image(surface->newImageSnapshot()); if (image.get()) { saveFile(bench->getName(), config.name, FLAGS_outDir[0], image); } } if (FLAGS_runOnce) { // Let's not mislead ourselves by looking at Debug build or single iteration bench times! continue; } // Normalize to ms per 1000 iterations. const double normalize = 1000.0 / loopsPerIter; const struct { char shortName; const char* longName; double ms; } times[] = { {'w', "msecs", normalize * timer.fWall}, {'W', "Wmsecs", normalize * timer.fTruncatedWall}, {'c', "cmsecs", normalize * timer.fCpu}, {'C', "Cmsecs", normalize * timer.fTruncatedCpu}, {'g', "gmsecs", normalize * timer.fGpu}, }; writer.config(config.name); for (size_t i = 0; i < SK_ARRAY_COUNT(times); i++) { if (strchr(FLAGS_timers[0], times[i].shortName) && times[i].ms > 0) { writer.timer(times[i].longName, times[i].ms); } } } } #if SK_SUPPORT_GPU gContextFactory.destroyContexts(); #endif return 0; }
void flush() { context->flush(); }