static int get_glprograms_max_stages(const sk_gpu_test::ContextInfo& ctxInfo) { GrContext* context = ctxInfo.grContext(); GrGLGpu* gpu = static_cast<GrGLGpu*>(context->contextPriv().getGpu()); int maxStages = 6; if (kGLES_GrGLStandard == gpu->glStandard()) { // We've had issues with driver crashes and HW limits being exceeded with many effects on // Android devices. We have passes on ARM devices with the default number of stages. // TODO When we run ES 3.00 GLSL in more places, test again #ifdef SK_BUILD_FOR_ANDROID if (kARM_GrGLVendor != gpu->ctxInfo().vendor()) { maxStages = 1; } #endif // On iOS we can exceed the maximum number of varyings. http://skbug.com/6627. #ifdef SK_BUILD_FOR_IOS maxStages = 3; #endif } if (ctxInfo.type() == sk_gpu_test::GrContextFactory::kANGLE_D3D9_ES2_ContextType || ctxInfo.type() == sk_gpu_test::GrContextFactory::kANGLE_D3D11_ES2_ContextType) { // On Angle D3D we will hit a limit of out variables if we use too many stages. maxStages = 3; } return maxStages; }
void onDraw(SkCanvas* canvas) override { GrRenderTargetContext* renderTargetContext = canvas->internal_private_accessTopLayerRenderTargetContext(); if (!renderTargetContext) { skiagm::GM::DrawGpuOnlyMessage(canvas); return; } GrContext* context = canvas->getGrContext(); if (!context) { return; } GrProxyProvider* proxyProvider = context->contextPriv().proxyProvider(); sk_sp<GrTextureProxy> proxy[3]; for (int i = 0; i < 3; ++i) { int index = (0 == i) ? 0 : 1; GrSurfaceDesc desc; desc.fWidth = fBmp[index].width(); desc.fHeight = fBmp[index].height(); desc.fConfig = SkImageInfo2GrPixelConfig(fBmp[index].info(), *context->caps()); SkASSERT(kUnknown_GrPixelConfig != desc.fConfig); proxy[i] = proxyProvider->createTextureProxy( desc, SkBudgeted::kYes, fBmp[index].getPixels(), fBmp[index].rowBytes()); if (!proxy[i]) { return; } } constexpr SkScalar kDrawPad = 10.f; constexpr SkScalar kTestPad = 10.f; constexpr SkScalar kColorSpaceOffset = 36.f; SkISize sizes[3] = {{YSIZE, YSIZE}, {USIZE, USIZE}, {VSIZE, VSIZE}}; for (int space = kJPEG_SkYUVColorSpace; space <= kLastEnum_SkYUVColorSpace; ++space) { SkRect renderRect = SkRect::MakeWH(SkIntToScalar(fBmp[0].width()), SkIntToScalar(fBmp[0].height())); renderRect.outset(kDrawPad, kDrawPad); SkScalar y = kDrawPad + kTestPad + space * kColorSpaceOffset; SkScalar x = kDrawPad + kTestPad; GrPaint grPaint; grPaint.setXPFactory(GrPorterDuffXPFactory::Get(SkBlendMode::kSrc)); auto fp = GrYUVtoRGBEffect::Make(proxy[0], proxy[1], proxy[2], sizes, static_cast<SkYUVColorSpace>(space), true); if (fp) { SkMatrix viewMatrix; viewMatrix.setTranslate(x, y); grPaint.addColorFragmentProcessor(std::move(fp)); std::unique_ptr<GrDrawOp> op(GrRectOpFactory::MakeNonAAFill( std::move(grPaint), viewMatrix, renderRect, GrAAType::kNone)); renderTargetContext->priv().testingOnly_addDrawOp(std::move(op)); } } }
bool SkSurface_Gpu::onDraw(const SkDeferredDisplayList* ddl) { if (!ddl || !this->isCompatible(ddl->characterization())) { return false; } GrRenderTargetContext* rtc = fDevice->accessRenderTargetContext(); GrContext* ctx = fDevice->context(); ctx->contextPriv().copyOpListsFromDDL(ddl, rtc->asRenderTargetProxy()); return true; }
void GrRenderTarget::discard() { // go through context so that all necessary flushing occurs GrContext* context = this->getContext(); if (!context) { return; } sk_sp<GrDrawContext> drawContext(context->contextPriv().makeWrappedDrawContext(sk_ref_sp(this), nullptr)); if (!drawContext) { return; } drawContext->discard(); }
DEF_GPUTEST_FOR_ALL_CONTEXTS(TessellatingPathRendererTests, reporter, ctxInfo) { GrContext* ctx = ctxInfo.grContext(); sk_sp<GrRenderTargetContext> rtc(ctx->contextPriv().makeDeferredRenderTargetContext( SkBackingFit::kApprox, 800, 800, kRGBA_8888_GrPixelConfig, nullptr, 1, GrMipMapped::kNo, kTopLeft_GrSurfaceOrigin)); if (!rtc) { return; } ctx->flush(); // Adding discard to appease vulkan validation warning about loading uninitialized data on draw rtc->discard(); test_path(ctx, rtc.get(), create_path_0()); test_path(ctx, rtc.get(), create_path_1()); test_path(ctx, rtc.get(), create_path_2()); test_path(ctx, rtc.get(), create_path_3()); test_path(ctx, rtc.get(), create_path_4()); test_path(ctx, rtc.get(), create_path_5()); test_path(ctx, rtc.get(), create_path_6()); test_path(ctx, rtc.get(), create_path_7()); test_path(ctx, rtc.get(), create_path_8()); test_path(ctx, rtc.get(), create_path_9()); test_path(ctx, rtc.get(), create_path_10()); test_path(ctx, rtc.get(), create_path_11()); test_path(ctx, rtc.get(), create_path_12()); test_path(ctx, rtc.get(), create_path_13()); test_path(ctx, rtc.get(), create_path_14()); test_path(ctx, rtc.get(), create_path_15()); test_path(ctx, rtc.get(), create_path_16()); SkMatrix nonInvertibleMatrix = SkMatrix::MakeScale(0, 0); std::unique_ptr<GrFragmentProcessor> fp(create_linear_gradient_processor(ctx)); test_path(ctx, rtc.get(), create_path_17(), nonInvertibleMatrix, GrAAType::kCoverage, std::move(fp)); test_path(ctx, rtc.get(), create_path_18()); test_path(ctx, rtc.get(), create_path_19()); test_path(ctx, rtc.get(), create_path_20(), SkMatrix(), GrAAType::kCoverage); test_path(ctx, rtc.get(), create_path_21(), SkMatrix(), GrAAType::kCoverage); test_path(ctx, rtc.get(), create_path_22()); test_path(ctx, rtc.get(), create_path_23()); test_path(ctx, rtc.get(), create_path_24()); test_path(ctx, rtc.get(), create_path_25(), SkMatrix(), GrAAType::kCoverage); test_path(ctx, rtc.get(), create_path_26(), SkMatrix(), GrAAType::kCoverage); test_path(ctx, rtc.get(), create_path_27(), SkMatrix(), GrAAType::kCoverage); test_path(ctx, rtc.get(), create_path_28(), SkMatrix(), GrAAType::kCoverage); test_path(ctx, rtc.get(), create_path_29()); }
bool SkSurface_Gpu::onDraw(const SkDeferredDisplayList* ddl) { if (!ddl || !this->isCompatible(ddl->characterization())) { return false; } #ifdef SK_RASTER_RECORDER_IMPLEMENTATION // Ultimately need to pass opLists from the DeferredDisplayList on to the // SkGpuDevice's renderTargetContext. return ddl->draw(this); #else GrRenderTargetContext* rtc = fDevice->accessRenderTargetContext(); GrContext* ctx = fDevice->context(); ctx->contextPriv().copyOpListsFromDDL(ddl, rtc->asRenderTargetProxy()); return true; #endif }
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(EmptySurfaceSemaphoreTest, reporter, ctxInfo) { GrContext* ctx = ctxInfo.grContext(); if (!ctx->caps()->fenceSyncSupport()) { return; } const SkImageInfo ii = SkImageInfo::Make(MAIN_W, MAIN_H, kRGBA_8888_SkColorType, kPremul_SkAlphaType); sk_sp<SkSurface> mainSurface(SkSurface::MakeRenderTarget(ctx, SkBudgeted::kNo, ii, 0, kTopLeft_GrSurfaceOrigin, nullptr)); // Flush surface once without semaphores to make sure there is no peneding IO for it. mainSurface->flush(); GrBackendSemaphore semaphore; GrSemaphoresSubmitted submitted = mainSurface->flushAndSignalSemaphores(1, &semaphore); REPORTER_ASSERT(reporter, GrSemaphoresSubmitted::kYes == submitted); if (kOpenGL_GrBackend == ctxInfo.backend()) { GrGLGpu* gpu = static_cast<GrGLGpu*>(ctx->contextPriv().getGpu()); const GrGLInterface* interface = gpu->glInterface(); GrGLsync sync = semaphore.glSync(); REPORTER_ASSERT(reporter, sync); bool result; GR_GL_CALL_RET(interface, result, IsSync(sync)); REPORTER_ASSERT(reporter, result); } #ifdef SK_VULKAN if (kVulkan_GrBackend == ctxInfo.backend()) { GrVkGpu* gpu = static_cast<GrVkGpu*>(ctx->contextPriv().getGpu()); const GrVkInterface* interface = gpu->vkInterface(); VkDevice device = gpu->device(); VkQueue queue = gpu->queue(); VkCommandPool cmdPool = gpu->cmdPool(); VkCommandBuffer cmdBuffer; // Create Command Buffer const VkCommandBufferAllocateInfo cmdInfo = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType nullptr, // pNext cmdPool, // commandPool VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level 1 // bufferCount }; VkResult err = GR_VK_CALL(interface, AllocateCommandBuffers(device, &cmdInfo, &cmdBuffer)); if (err) { return; } VkCommandBufferBeginInfo cmdBufferBeginInfo; memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo)); cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cmdBufferBeginInfo.pNext = nullptr; cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; cmdBufferBeginInfo.pInheritanceInfo = nullptr; GR_VK_CALL_ERRCHECK(interface, BeginCommandBuffer(cmdBuffer, &cmdBufferBeginInfo)); GR_VK_CALL_ERRCHECK(interface, EndCommandBuffer(cmdBuffer)); VkFenceCreateInfo fenceInfo; VkFence fence; memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo)); fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; err = GR_VK_CALL(interface, CreateFence(device, &fenceInfo, nullptr, &fence)); SkASSERT(!err); VkPipelineStageFlags waitStages = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT; VkSubmitInfo submitInfo; memset(&submitInfo, 0, sizeof(VkSubmitInfo)); submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submitInfo.pNext = nullptr; submitInfo.waitSemaphoreCount = 1; VkSemaphore vkSem = semaphore.vkSemaphore(); submitInfo.pWaitSemaphores = &vkSem; submitInfo.pWaitDstStageMask = &waitStages; submitInfo.commandBufferCount = 1; submitInfo.pCommandBuffers = &cmdBuffer; submitInfo.signalSemaphoreCount = 0; submitInfo.pSignalSemaphores = nullptr; GR_VK_CALL_ERRCHECK(interface, QueueSubmit(queue, 1, &submitInfo, fence)); err = GR_VK_CALL(interface, WaitForFences(device, 1, &fence, true, 3000000000)); REPORTER_ASSERT(reporter, err != VK_TIMEOUT); GR_VK_CALL(interface, DestroyFence(device, fence, nullptr)); GR_VK_CALL(interface, DestroySemaphore(device, vkSem, nullptr)); // If the above test fails the wait semaphore will never be signaled which can cause the // device to hang when tearing down (even if just tearing down GL). So we Fail here to // kill things. if (err == VK_TIMEOUT) { SK_ABORT("Waiting on semaphore indefinitely"); } } #endif }
void surface_semaphore_test(skiatest::Reporter* reporter, const sk_gpu_test::ContextInfo& mainInfo, const sk_gpu_test::ContextInfo& childInfo1, const sk_gpu_test::ContextInfo& childInfo2, bool flushContext) { GrContext* mainCtx = mainInfo.grContext(); if (!mainCtx->caps()->fenceSyncSupport()) { return; } const SkImageInfo ii = SkImageInfo::Make(MAIN_W, MAIN_H, kRGBA_8888_SkColorType, kPremul_SkAlphaType); sk_sp<SkSurface> mainSurface(SkSurface::MakeRenderTarget(mainCtx, SkBudgeted::kNo, ii, 0, kTopLeft_GrSurfaceOrigin, nullptr)); SkCanvas* mainCanvas = mainSurface->getCanvas(); mainCanvas->clear(SK_ColorBLUE); SkAutoTArray<GrBackendSemaphore> semaphores(2); #ifdef SK_VULKAN if (kVulkan_GrBackend == mainInfo.backend()) { // Initialize the secondary semaphore instead of having Ganesh create one internally GrVkGpu* gpu = static_cast<GrVkGpu*>(mainCtx->contextPriv().getGpu()); const GrVkInterface* interface = gpu->vkInterface(); VkDevice device = gpu->device(); VkSemaphore vkSem; VkSemaphoreCreateInfo createInfo; createInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; createInfo.pNext = nullptr; createInfo.flags = 0; GR_VK_CALL_ERRCHECK(interface, CreateSemaphore(device, &createInfo, nullptr, &vkSem)); semaphores[1].initVulkan(vkSem); } #endif if (flushContext) { mainCtx->flushAndSignalSemaphores(2, semaphores.get()); } else { mainSurface->flushAndSignalSemaphores(2, semaphores.get()); } sk_sp<SkImage> mainImage = mainSurface->makeImageSnapshot(); GrBackendTexture backendTexture = mainImage->getBackendTexture(false); draw_child(reporter, childInfo1, backendTexture, semaphores[0]); #ifdef SK_VULKAN if (kVulkan_GrBackend == mainInfo.backend()) { // In Vulkan we need to make sure we are sending the correct VkImageLayout in with the // backendImage. After the first child draw the layout gets changed to SHADER_READ, so // we just manually set that here. GrVkImageInfo vkInfo; SkAssertResult(backendTexture.getVkImageInfo(&vkInfo)); vkInfo.updateImageLayout(VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); } #endif draw_child(reporter, childInfo2, backendTexture, semaphores[1]); }
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(CopySurface, reporter, ctxInfo) { GrContext* context = ctxInfo.grContext(); static const int kW = 10; static const int kH = 10; static const size_t kRowBytes = sizeof(uint32_t) * kW; SkAutoTMalloc<uint32_t> srcPixels(kW * kH); for (int i = 0; i < kW * kH; ++i) { srcPixels.get()[i] = i; } SkAutoTMalloc<uint32_t> dstPixels(kW * kH); for (int i = 0; i < kW * kH; ++i) { dstPixels.get()[i] = ~i; } static const SkIRect kSrcRects[] { { 0, 0, kW , kH }, {-1, -1, kW+1, kH+1}, { 1, 1, kW-1, kH-1}, { 5, 5, 6 , 6 }, }; static const SkIPoint kDstPoints[] { { 0 , 0 }, { 1 , 1 }, { kW/2, kH/4}, { kW-1, kH-1}, { kW , kH }, { kW+1, kH+2}, {-1 , -1 }, }; const SkImageInfo ii = SkImageInfo::Make(kW, kH, kRGBA_8888_SkColorType, kPremul_SkAlphaType); SkAutoTMalloc<uint32_t> read(kW * kH); for (auto sOrigin : {kBottomLeft_GrSurfaceOrigin, kTopLeft_GrSurfaceOrigin}) { for (auto dOrigin : {kBottomLeft_GrSurfaceOrigin, kTopLeft_GrSurfaceOrigin}) { for (auto sRT : {true, false}) { for (auto dRT : {true, false}) { for (auto srcRect : kSrcRects) { for (auto dstPoint : kDstPoints) { auto src = sk_gpu_test::MakeTextureProxyFromData( context, sRT, kW, kH, ii.colorType(), sOrigin, srcPixels.get(), kRowBytes); auto dst = sk_gpu_test::MakeTextureProxyFromData( context, dRT, kW, kH, ii.colorType(), dOrigin, dstPixels.get(), kRowBytes); if (!src || !dst) { ERRORF(reporter, "Could not create surfaces for copy surface test."); continue; } sk_sp<GrSurfaceContext> dstContext = context->contextPriv().makeWrappedSurfaceContext(std::move(dst)); bool result = dstContext->copy(src.get(), srcRect, dstPoint); bool expectedResult = true; SkIPoint dstOffset = { dstPoint.fX - srcRect.fLeft, dstPoint.fY - srcRect.fTop }; SkIRect copiedDstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, srcRect.width(), srcRect.height()); SkIRect copiedSrcRect; if (!copiedSrcRect.intersect(srcRect, SkIRect::MakeWH(kW, kH))) { expectedResult = false; } else { // If the src rect was clipped, apply same clipping to each side of // copied dst rect. copiedDstRect.fLeft += copiedSrcRect.fLeft - srcRect.fLeft; copiedDstRect.fTop += copiedSrcRect.fTop - srcRect.fTop; copiedDstRect.fRight -= copiedSrcRect.fRight - srcRect.fRight; copiedDstRect.fBottom -= copiedSrcRect.fBottom - srcRect.fBottom; } if (copiedDstRect.isEmpty() || !copiedDstRect.intersect(SkIRect::MakeWH(kW, kH))) { expectedResult = false; } // To make the copied src rect correct we would apply any dst clipping // back to the src rect, but we don't use it again so don't bother. if (expectedResult != result) { ERRORF(reporter, "Expected return value %d from copySurface, got " "%d.", expectedResult, result); continue; } if (!expectedResult || !result) { continue; } sk_memset32(read.get(), 0, kW * kH); if (!dstContext->readPixels(ii, read.get(), kRowBytes, 0, 0)) { ERRORF(reporter, "Error calling readPixels"); continue; } bool abort = false; // Validate that pixels inside copiedDstRect received the correct value // from src and that those outside were not modified. for (int y = 0; y < kH && !abort; ++y) { for (int x = 0; x < kW; ++x) { uint32_t r = read.get()[y * kW + x]; if (copiedDstRect.contains(x, y)) { int sx = x - dstOffset.fX; int sy = y - dstOffset.fY; uint32_t s = srcPixels.get()[sy * kW + sx]; if (s != r) { ERRORF(reporter, "Expected dst %d,%d to contain " "0x%08x copied from src location %d,%d. Got " "0x%08x", x, y, s, sx, sy, r); abort = true; break; } } else { uint32_t d = dstPixels.get()[y * kW + x]; if (d != r) { ERRORF(reporter, "Expected dst %d,%d to be unmodified (" "0x%08x). Got 0x%08x", x, y, d, r); abort = true; break; } } } } } } } } } } }
DEF_GPUTEST_FOR_GL_RENDERING_CONTEXTS(RectangleTexture, reporter, ctxInfo) { GrContext* context = ctxInfo.grContext(); GrProxyProvider* proxyProvider = context->contextPriv().proxyProvider(); sk_gpu_test::GLTestContext* glContext = ctxInfo.glContext(); static const int kWidth = 13; static const int kHeight = 13; GrColor pixels[kWidth * kHeight]; for (int y = 0; y < kHeight; ++y) { for (int x = 0; x < kWidth; ++x) { pixels[y * kWidth + x] = y * kWidth + x; } } for (auto origin : { kBottomLeft_GrSurfaceOrigin, kTopLeft_GrSurfaceOrigin }) { bool useBLOrigin = kBottomLeft_GrSurfaceOrigin == origin; GrGLuint rectTexID = glContext->createTextureRectangle(kWidth, kHeight, GR_GL_RGBA, GR_GL_RGBA, GR_GL_UNSIGNED_BYTE, pixels); if (!rectTexID) { return; } // Let GrContext know that we messed with the GL context directly. context->resetContext(); // Wrap the rectangle texture ID in a GrTexture GrGLTextureInfo rectangleInfo; rectangleInfo.fID = rectTexID; rectangleInfo.fTarget = GR_GL_TEXTURE_RECTANGLE; GrBackendTexture rectangleTex(kWidth, kHeight, kRGBA_8888_GrPixelConfig, rectangleInfo); GrColor refPixels[kWidth * kHeight]; for (int y = 0; y < kHeight; ++y) { for (int x = 0; x < kWidth; ++x) { int y0 = useBLOrigin ? kHeight - y - 1 : y; refPixels[y * kWidth + x] = pixels[y0 * kWidth + x]; } } sk_sp<GrTextureProxy> rectProxy = proxyProvider->wrapBackendTexture(rectangleTex, origin); if (!rectProxy) { ERRORF(reporter, "Error creating proxy for rectangle texture."); GR_GL_CALL(glContext->gl(), DeleteTextures(1, &rectTexID)); continue; } SkASSERT(rectProxy->texPriv().doesNotSupportMipMaps()); SkASSERT(rectProxy->priv().peekTexture()->surfacePriv().doesNotSupportMipMaps()); SkASSERT(rectProxy->texPriv().isClampOnly()); SkASSERT(rectProxy->priv().peekTexture()->surfacePriv().isClampOnly()); test_basic_draw_as_src(reporter, context, rectProxy, refPixels); // Test copy to both a texture and RT test_copy_from_surface(reporter, context, rectProxy.get(), refPixels, false, "RectangleTexture-copy-from"); sk_sp<GrSurfaceContext> rectContext = context->contextPriv().makeWrappedSurfaceContext( std::move(rectProxy)); SkASSERT(rectContext); test_read_pixels(reporter, rectContext.get(), refPixels, "RectangleTexture-read"); test_copy_to_surface(reporter, context->contextPriv().proxyProvider(), rectContext.get(), "RectangleTexture-copy-to"); test_write_pixels(reporter, rectContext.get(), true, "RectangleTexture-write"); test_clear(reporter, rectContext.get()); GR_GL_CALL(glContext->gl(), DeleteTextures(1, &rectTexID)); } }
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(ReadWriteAlpha, reporter, ctxInfo) { GrContext* context = ctxInfo.grContext(); unsigned char alphaData[X_SIZE * Y_SIZE]; static const int kClearValue = 0x2; bool match; static const size_t kRowBytes[] = {0, X_SIZE, X_SIZE + 1, 2 * X_SIZE - 1}; { GrSurfaceDesc desc; desc.fFlags = kNone_GrSurfaceFlags; desc.fConfig = kAlpha_8_GrPixelConfig; // it is a single channel texture desc.fWidth = X_SIZE; desc.fHeight = Y_SIZE; // We are initializing the texture with zeros here memset(alphaData, 0, X_SIZE * Y_SIZE); sk_sp<GrTextureProxy> proxy(GrSurfaceProxy::MakeDeferred(context->resourceProvider(), desc, SkBudgeted::kNo, alphaData, 0)); if (!proxy) { ERRORF(reporter, "Could not create alpha texture."); return; } sk_sp<GrSurfaceContext> sContext(context->contextPriv().makeWrappedSurfaceContext( std::move(proxy), nullptr)); const SkImageInfo ii = SkImageInfo::MakeA8(X_SIZE, Y_SIZE); sk_sp<SkSurface> surf(SkSurface::MakeRenderTarget(context, SkBudgeted::kNo, ii)); // create a distinctive texture for (int y = 0; y < Y_SIZE; ++y) { for (int x = 0; x < X_SIZE; ++x) { alphaData[y * X_SIZE + x] = y*X_SIZE+x; } } for (auto rowBytes : kRowBytes) { // upload the texture (do per-rowbytes iteration because we may overwrite below). bool result = sContext->writePixels(ii, alphaData, 0, 0, 0); REPORTER_ASSERT_MESSAGE(reporter, result, "Initial A8 writePixels failed"); size_t nonZeroRowBytes = rowBytes ? rowBytes : X_SIZE; std::unique_ptr<uint8_t[]> readback(new uint8_t[nonZeroRowBytes * Y_SIZE]); // clear readback to something non-zero so we can detect readback failures memset(readback.get(), kClearValue, nonZeroRowBytes * Y_SIZE); // read the texture back result = sContext->readPixels(ii, readback.get(), rowBytes, 0, 0); REPORTER_ASSERT_MESSAGE(reporter, result, "Initial A8 readPixels failed"); // make sure the original & read back versions match SkString msg; msg.printf("rb:%d A8", SkToU32(rowBytes)); validate_alpha_data(reporter, X_SIZE, Y_SIZE, readback.get(), nonZeroRowBytes, alphaData, msg); // Now try writing to a single channel surface (if we could create one). if (surf) { SkCanvas* canvas = surf->getCanvas(); SkPaint paint; const SkRect rect = SkRect::MakeLTRB(-10, -10, X_SIZE + 10, Y_SIZE + 10); paint.setColor(SK_ColorWHITE); canvas->drawRect(rect, paint); memset(readback.get(), kClearValue, nonZeroRowBytes * Y_SIZE); result = surf->readPixels(ii, readback.get(), nonZeroRowBytes, 0, 0); REPORTER_ASSERT_MESSAGE(reporter, result, "A8 readPixels after clear failed"); match = true; for (int y = 0; y < Y_SIZE && match; ++y) { for (int x = 0; x < X_SIZE && match; ++x) { uint8_t rbValue = readback.get()[y * nonZeroRowBytes + x]; if (0xFF != rbValue) { ERRORF(reporter, "Failed alpha readback after clear. Expected: 0xFF, Got: 0x%02x" " at (%d,%d), rb:%d", rbValue, x, y, SkToU32(rowBytes)); match = false; } } } } } } static const GrPixelConfig kRGBAConfigs[] { kRGBA_8888_GrPixelConfig, kBGRA_8888_GrPixelConfig, kSRGBA_8888_GrPixelConfig }; for (int y = 0; y < Y_SIZE; ++y) { for (int x = 0; x < X_SIZE; ++x) { alphaData[y * X_SIZE + x] = y*X_SIZE+x; } } const SkImageInfo dstInfo = SkImageInfo::Make(X_SIZE, Y_SIZE, kAlpha_8_SkColorType, kPremul_SkAlphaType); // Attempt to read back just alpha from a RGBA/BGRA texture. Once with a texture-only src and // once with a render target. for (auto config : kRGBAConfigs) { for (int rt = 0; rt < 2; ++rt) { GrSurfaceDesc desc; desc.fFlags = rt ? kRenderTarget_GrSurfaceFlag : kNone_GrSurfaceFlags; desc.fConfig = config; desc.fWidth = X_SIZE; desc.fHeight = Y_SIZE; uint32_t rgbaData[X_SIZE * Y_SIZE]; // Make the alpha channel of the rgba texture come from alphaData. for (int y = 0; y < Y_SIZE; ++y) { for (int x = 0; x < X_SIZE; ++x) { rgbaData[y * X_SIZE + x] = GrColorPackRGBA(6, 7, 8, alphaData[y * X_SIZE + x]); } } sk_sp<GrTextureProxy> proxy = GrSurfaceProxy::MakeDeferred(context->resourceProvider(), desc, SkBudgeted::kNo, rgbaData, 0); if (!proxy) { // We always expect to be able to create a RGBA texture if (!rt && kRGBA_8888_GrPixelConfig == desc.fConfig) { ERRORF(reporter, "Failed to create RGBA texture."); } continue; } sk_sp<GrSurfaceContext> sContext = context->contextPriv().makeWrappedSurfaceContext( std::move(proxy), nullptr); for (auto rowBytes : kRowBytes) { size_t nonZeroRowBytes = rowBytes ? rowBytes : X_SIZE; std::unique_ptr<uint8_t[]> readback(new uint8_t[nonZeroRowBytes * Y_SIZE]); // Clear so we don't accidentally see values from previous iteration. memset(readback.get(), kClearValue, nonZeroRowBytes * Y_SIZE); // read the texture back bool result = sContext->readPixels(dstInfo, readback.get(), rowBytes, 0, 0); REPORTER_ASSERT_MESSAGE(reporter, result, "8888 readPixels failed"); // make sure the original & read back versions match SkString msg; msg.printf("rt:%d, rb:%d 8888", rt, SkToU32(rowBytes)); validate_alpha_data(reporter, X_SIZE, Y_SIZE, readback.get(), nonZeroRowBytes, alphaData, msg); } } } }
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(PromiseImageTest, reporter, ctxInfo) { const int kWidth = 10; const int kHeight = 10; GrContext* ctx = ctxInfo.grContext(); GrGpu* gpu = ctx->contextPriv().getGpu(); for (bool releaseImageEarly : {true, false}) { GrBackendTexture backendTex = gpu->createTestingOnlyBackendTexture( nullptr, kWidth, kHeight, kRGBA_8888_GrPixelConfig, true, GrMipMapped::kNo); REPORTER_ASSERT(reporter, backendTex.isValid()); GrBackendFormat backendFormat = backendTex.format(); REPORTER_ASSERT(reporter, backendFormat.isValid()); PromiseTextureChecker promiseChecker(backendTex); GrSurfaceOrigin texOrigin = kTopLeft_GrSurfaceOrigin; sk_sp<SkImage> refImg( SkImage_Gpu::MakePromiseTexture(ctx, backendFormat, kWidth, kHeight, GrMipMapped::kNo, texOrigin, kRGBA_8888_SkColorType, kPremul_SkAlphaType, nullptr, PromiseTextureChecker::Fulfill, PromiseTextureChecker::Release, PromiseTextureChecker::Done, &promiseChecker)); SkImageInfo info = SkImageInfo::MakeN32Premul(kWidth, kHeight); sk_sp<SkSurface> surface = SkSurface::MakeRenderTarget(ctx, SkBudgeted::kNo, info); SkCanvas* canvas = surface->getCanvas(); int expectedFulfillCnt = 0; int expectedReleaseCnt = 0; int expectedDoneCnt = 0; canvas->drawImage(refImg, 0, 0); REPORTER_ASSERT(reporter, check_fulfill_and_release_cnts(promiseChecker, true, expectedFulfillCnt, expectedReleaseCnt, true, expectedDoneCnt, reporter)); bool isVulkan = kVulkan_GrBackend == ctx->contextPriv().getBackend(); canvas->flush(); expectedFulfillCnt++; expectedReleaseCnt++; REPORTER_ASSERT(reporter, check_fulfill_and_release_cnts(promiseChecker, !isVulkan, expectedFulfillCnt, expectedReleaseCnt, !isVulkan, expectedDoneCnt, reporter)); gpu->testingOnly_flushGpuAndSync(); REPORTER_ASSERT(reporter, check_fulfill_and_release_cnts(promiseChecker, true, expectedFulfillCnt, expectedReleaseCnt, true, expectedDoneCnt, reporter)); canvas->drawImage(refImg, 0, 0); canvas->drawImage(refImg, 0, 0); canvas->flush(); expectedFulfillCnt++; expectedReleaseCnt++; gpu->testingOnly_flushGpuAndSync(); REPORTER_ASSERT(reporter, check_fulfill_and_release_cnts(promiseChecker, true, expectedFulfillCnt, expectedReleaseCnt, true, expectedDoneCnt, reporter)); // Now test code path on Vulkan where we released the texture, but the GPU isn't done with // resource yet and we do another draw. We should only call fulfill on the first draw and // use the cached GrBackendTexture on the second. Release should only be called after the // second draw is finished. canvas->drawImage(refImg, 0, 0); canvas->flush(); expectedFulfillCnt++; expectedReleaseCnt++; REPORTER_ASSERT(reporter, check_fulfill_and_release_cnts(promiseChecker, !isVulkan, expectedFulfillCnt, expectedReleaseCnt, !isVulkan, expectedDoneCnt, reporter)); canvas->drawImage(refImg, 0, 0); if (releaseImageEarly) { refImg.reset(); } REPORTER_ASSERT(reporter, check_fulfill_and_release_cnts(promiseChecker, !isVulkan, expectedFulfillCnt, expectedReleaseCnt, !isVulkan, expectedDoneCnt, reporter)); canvas->flush(); expectedFulfillCnt++; gpu->testingOnly_flushGpuAndSync(); expectedReleaseCnt++; if (releaseImageEarly) { expectedDoneCnt++; } REPORTER_ASSERT(reporter, check_fulfill_and_release_cnts(promiseChecker, true, expectedFulfillCnt, expectedReleaseCnt, !isVulkan, expectedDoneCnt, reporter)); expectedFulfillCnt = promiseChecker.fFulfillCount; expectedReleaseCnt = promiseChecker.fReleaseCount; if (!releaseImageEarly) { refImg.reset(); expectedDoneCnt++; } REPORTER_ASSERT(reporter, check_fulfill_and_release_cnts(promiseChecker, true, expectedFulfillCnt, expectedReleaseCnt, true, expectedDoneCnt, reporter)); gpu->deleteTestingOnlyBackendTexture(backendTex); } }
DEF_GPUTEST_FOR_VULKAN_CONTEXT(VkImageLayoutTest, reporter, ctxInfo) { GrContext* context = ctxInfo.grContext(); GrVkGpu* gpu = static_cast<GrVkGpu*>(context->contextPriv().getGpu()); GrBackendTexture backendTex = gpu->createTestingOnlyBackendTexture(nullptr, 1, 1, kRGBA_8888_GrPixelConfig, false, GrMipMapped::kNo); REPORTER_ASSERT(reporter, backendTex.isValid()); GrVkImageInfo info; REPORTER_ASSERT(reporter, backendTex.getVkImageInfo(&info)); VkImageLayout initLayout = info.fImageLayout; // Verify that setting that layout via a copy of a backendTexture is reflected in all the // backendTextures. GrBackendTexture backendTexCopy = backendTex; REPORTER_ASSERT(reporter, backendTexCopy.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, initLayout == info.fImageLayout); backendTexCopy.setVkImageLayout(VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); REPORTER_ASSERT(reporter, backendTex.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == info.fImageLayout); REPORTER_ASSERT(reporter, backendTexCopy.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == info.fImageLayout); // Setting back the layout since we didn't actually change it backendTex.setVkImageLayout(initLayout); sk_sp<SkImage> wrappedImage = SkImage::MakeFromTexture(context, backendTex, kTopLeft_GrSurfaceOrigin, kRGBA_8888_SkColorType, kPremul_SkAlphaType, nullptr); REPORTER_ASSERT(reporter, wrappedImage.get()); sk_sp<GrTextureProxy> texProxy = as_IB(wrappedImage)->asTextureProxyRef(); REPORTER_ASSERT(reporter, texProxy.get()); REPORTER_ASSERT(reporter, texProxy->priv().isInstantiated()); GrTexture* texture = texProxy->priv().peekTexture(); REPORTER_ASSERT(reporter, texture); // Verify that modifying the layout via the GrVkTexture is reflected in the GrBackendTexture GrVkTexture* vkTexture = static_cast<GrVkTexture*>(texture); REPORTER_ASSERT(reporter, initLayout == vkTexture->currentLayout()); vkTexture->updateImageLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); REPORTER_ASSERT(reporter, backendTex.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == info.fImageLayout); GrBackendTexture backendTexImage = wrappedImage->getBackendTexture(false); REPORTER_ASSERT(reporter, backendTexImage.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == info.fImageLayout); // Verify that modifying the layout via the GrBackendTexutre is reflected in the GrVkTexture backendTexImage.setVkImageLayout(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); REPORTER_ASSERT(reporter, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == vkTexture->currentLayout()); #ifdef SK_SUPPORT_LEGACY_BACKEND_OBJECTS // Verify that modifying the layout via the old textureHandle sitll works in is reflected in the // GrVkTexture and GrBackendTexture. GrVkImageInfo* backendInfo = (GrVkImageInfo*)wrappedImage->getTextureHandle(false); REPORTER_ASSERT(reporter, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == backendInfo->fImageLayout); backendInfo->updateImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); REPORTER_ASSERT(reporter, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == vkTexture->currentLayout()); REPORTER_ASSERT(reporter, backendTexImage.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == info.fImageLayout); #endif vkTexture->updateImageLayout(initLayout); REPORTER_ASSERT(reporter, backendTex.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, initLayout == info.fImageLayout); REPORTER_ASSERT(reporter, backendTexCopy.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, initLayout == info.fImageLayout); REPORTER_ASSERT(reporter, backendTexImage.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, initLayout == info.fImageLayout); // Check that we can do things like assigning the backend texture to invalid one, assign an // invalid one, assin a backend texture to inself etc. Success here is that we don't hit any of // our ref counting asserts. REPORTER_ASSERT(reporter, GrBackendTexture::TestingOnly_Equals(backendTex, backendTexCopy)); GrBackendTexture invalidTexture; REPORTER_ASSERT(reporter, !invalidTexture.isValid()); REPORTER_ASSERT(reporter, !GrBackendTexture::TestingOnly_Equals(invalidTexture, backendTexCopy)); backendTexCopy = invalidTexture; REPORTER_ASSERT(reporter, !backendTexCopy.isValid()); REPORTER_ASSERT(reporter, !GrBackendTexture::TestingOnly_Equals(invalidTexture, backendTexCopy)); invalidTexture = backendTex; REPORTER_ASSERT(reporter, invalidTexture.isValid()); REPORTER_ASSERT(reporter, GrBackendTexture::TestingOnly_Equals(invalidTexture, backendTex)); invalidTexture = static_cast<decltype(invalidTexture)&>(invalidTexture); REPORTER_ASSERT(reporter, invalidTexture.isValid()); REPORTER_ASSERT(reporter, GrBackendTexture::TestingOnly_Equals(invalidTexture, invalidTexture)); gpu->deleteTestingOnlyBackendTexture(backendTex); }