void basic_clear_test(skiatest::Reporter* reporter, GrContext* context, GrPixelConfig config) { GrVkGpu* gpu = static_cast<GrVkGpu*>(context->getGpu()); gpu->discard(NULL); SkAutoTMalloc<GrColor> buffer(25); GrSurfaceDesc surfDesc; surfDesc.fFlags = kRenderTarget_GrSurfaceFlag; surfDesc.fOrigin = kTopLeft_GrSurfaceOrigin; surfDesc.fWidth = 5; surfDesc.fHeight = 5; surfDesc.fConfig = config; surfDesc.fSampleCnt = 0; GrTexture* tex = gpu->createTexture(surfDesc, false, nullptr, 0); SkASSERT(tex); SkASSERT(tex->asRenderTarget()); SkIRect rect = SkIRect::MakeWH(5, 5); gpu->clear(rect, GrColor_TRANSPARENT_BLACK, tex->asRenderTarget()); gpu->readPixels(tex, 0, 0, 5, 5, config, (void*)buffer.get(), 0); REPORTER_ASSERT(reporter, does_full_buffer_contain_correct_color(buffer.get(), GrColor_TRANSPARENT_BLACK, config, 5, 5)); gpu->clear(rect, GrColor_WHITE, tex->asRenderTarget()); gpu->readPixels(tex, 0, 0, 5, 5, config, (void*)buffer.get(), 0); REPORTER_ASSERT(reporter, does_full_buffer_contain_correct_color(buffer.get(), GrColor_WHITE, config, 5, 5)); GrColor myColor = GrColorPackRGBA(0xFF, 0x7F, 0x40, 0x20); gpu->clear(rect, myColor, tex->asRenderTarget()); gpu->readPixels(tex, 0, 0, 5, 5, config, (void*)buffer.get(), 0); REPORTER_ASSERT(reporter, does_full_buffer_contain_correct_color(buffer.get(), myColor, config, 5, 5)); }
void wrap_rt_test(skiatest::Reporter* reporter, GrContext* context) { GrVkGpu* gpu = static_cast<GrVkGpu*>(context->getGpu()); GrBackendObject backendObj = gpu->createTestingOnlyBackendTexture(nullptr, kW, kH, kPixelConfig, true); const GrVkImageInfo* backendTex = reinterpret_cast<const GrVkImageInfo*>(backendObj); // check basic borrowed creation GrBackendRenderTargetDesc desc; desc.fWidth = kW; desc.fHeight = kH; desc.fConfig = kPixelConfig; desc.fOrigin = kTopLeft_GrSurfaceOrigin; desc.fSampleCnt = 0; desc.fStencilBits = 0; desc.fRenderTargetHandle = backendObj; GrRenderTarget* rt = gpu->wrapBackendRenderTarget(desc, kBorrow_GrWrapOwnership); REPORTER_ASSERT(reporter, rt); rt->unref(); // image is null GrVkImageInfo backendCopy = *backendTex; backendCopy.fImage = VK_NULL_HANDLE; desc.fRenderTargetHandle = (GrBackendObject)&backendCopy; rt = gpu->wrapBackendRenderTarget(desc, kBorrow_GrWrapOwnership); REPORTER_ASSERT(reporter, !rt); rt = gpu->wrapBackendRenderTarget(desc, kAdopt_GrWrapOwnership); REPORTER_ASSERT(reporter, !rt); // alloc is null backendCopy.fImage = backendTex->fImage; backendCopy.fAlloc = { VK_NULL_HANDLE, 0, 0 }; // can wrap null alloc if borrowing rt = gpu->wrapBackendRenderTarget(desc, kBorrow_GrWrapOwnership); REPORTER_ASSERT(reporter, rt); // but not if adopting rt = gpu->wrapBackendRenderTarget(desc, kAdopt_GrWrapOwnership); REPORTER_ASSERT(reporter, !rt); // check adopt creation backendCopy.fAlloc = backendTex->fAlloc; rt = gpu->wrapBackendRenderTarget(desc, kAdopt_GrWrapOwnership); REPORTER_ASSERT(reporter, rt); rt->unref(); gpu->deleteTestingOnlyBackendTexture(backendObj, true); }
void wrap_tex_test(skiatest::Reporter* reporter, GrContext* context) { GrVkGpu* gpu = static_cast<GrVkGpu*>(context->contextPriv().getGpu()); GrBackendTexture origBackendTex = gpu->createTestingOnlyBackendTexture(nullptr, kW, kH, kPixelConfig, false, GrMipMapped::kNo); const GrVkImageInfo* imageInfo = origBackendTex.getVkImageInfo(); sk_sp<GrTexture> tex = gpu->wrapBackendTexture(origBackendTex, kBorrow_GrWrapOwnership); REPORTER_ASSERT(reporter, tex); // image is null { GrVkImageInfo backendCopy = *imageInfo; backendCopy.fImage = VK_NULL_HANDLE; GrBackendTexture backendTex = GrBackendTexture(kW, kH, backendCopy); tex = gpu->wrapBackendTexture(backendTex, kBorrow_GrWrapOwnership); REPORTER_ASSERT(reporter, !tex); tex = gpu->wrapBackendTexture(backendTex, kAdopt_GrWrapOwnership); REPORTER_ASSERT(reporter, !tex); } // alloc is null { GrVkImageInfo backendCopy = *imageInfo; backendCopy.fAlloc = GrVkAlloc(); GrBackendTexture backendTex = GrBackendTexture(kW, kH, backendCopy); tex = gpu->wrapBackendTexture(backendTex, kBorrow_GrWrapOwnership); REPORTER_ASSERT(reporter, !tex); tex = gpu->wrapBackendTexture(backendTex, kAdopt_GrWrapOwnership); REPORTER_ASSERT(reporter, !tex); } // check adopt creation { GrVkImageInfo backendCopy = *imageInfo; GrBackendTexture backendTex = GrBackendTexture(kW, kH, backendCopy); tex = gpu->wrapBackendTexture(backendTex, kAdopt_GrWrapOwnership); REPORTER_ASSERT(reporter, tex); } gpu->deleteTestingOnlyBackendTexture(&origBackendTex, true); }
void wrap_tex_test(skiatest::Reporter* reporter, GrContext* context) { GrVkGpu* gpu = static_cast<GrVkGpu*>(context->getGpu()); GrBackendObject backendObj = gpu->createTestingOnlyBackendTexture(nullptr, kW, kH, kPixelConfig, false); const GrVkImageInfo* backendTex = reinterpret_cast<const GrVkImageInfo*>(backendObj); // check basic borrowed creation GrBackendTextureDesc desc; desc.fConfig = kPixelConfig; desc.fWidth = kW; desc.fHeight = kH; desc.fTextureHandle = backendObj; GrTexture* tex = gpu->wrapBackendTexture(desc, kBorrow_GrWrapOwnership); REPORTER_ASSERT(reporter, tex); tex->unref(); // image is null GrVkImageInfo backendCopy = *backendTex; backendCopy.fImage = VK_NULL_HANDLE; desc.fTextureHandle = (GrBackendObject) &backendCopy; tex = gpu->wrapBackendTexture(desc, kBorrow_GrWrapOwnership); REPORTER_ASSERT(reporter, !tex); tex = gpu->wrapBackendTexture(desc, kAdopt_GrWrapOwnership); REPORTER_ASSERT(reporter, !tex); // alloc is null backendCopy.fImage = backendTex->fImage; backendCopy.fAlloc = { VK_NULL_HANDLE, 0, 0 }; tex = gpu->wrapBackendTexture(desc, kBorrow_GrWrapOwnership); REPORTER_ASSERT(reporter, !tex); tex = gpu->wrapBackendTexture(desc, kAdopt_GrWrapOwnership); REPORTER_ASSERT(reporter, !tex); // check adopt creation backendCopy.fAlloc = backendTex->fAlloc; tex = gpu->wrapBackendTexture(desc, kAdopt_GrWrapOwnership); REPORTER_ASSERT(reporter, tex); tex->unref(); gpu->deleteTestingOnlyBackendTexture(backendObj, true); }
void wrap_rt_test(skiatest::Reporter* reporter, GrContext* context) { GrVkGpu* gpu = static_cast<GrVkGpu*>(context->contextPriv().getGpu()); GrBackendTexture origBackendTex = gpu->createTestingOnlyBackendTexture(nullptr, kW, kH, kPixelConfig, true, GrMipMapped::kNo); const GrVkImageInfo* imageInfo = origBackendTex.getVkImageInfo(); GrBackendRenderTarget origBackendRT(kW, kH, 1, 0, *imageInfo); sk_sp<GrRenderTarget> rt = gpu->wrapBackendRenderTarget(origBackendRT); REPORTER_ASSERT(reporter, rt); // image is null { GrVkImageInfo backendCopy = *imageInfo; backendCopy.fImage = VK_NULL_HANDLE; GrBackendRenderTarget backendRT(kW, kH, 1, 0, backendCopy); rt = gpu->wrapBackendRenderTarget(backendRT); REPORTER_ASSERT(reporter, !rt); } // alloc is null { GrVkImageInfo backendCopy = *imageInfo; backendCopy.fAlloc = GrVkAlloc(); // can wrap null alloc GrBackendRenderTarget backendRT(kW, kH, 1, 0, backendCopy); rt = gpu->wrapBackendRenderTarget(backendRT); REPORTER_ASSERT(reporter, rt); } // When we wrapBackendRenderTarget it is always borrowed, so we must make sure to free the // resource when we're done. gpu->deleteTestingOnlyBackendTexture(&origBackendTex); }
void sub_clear_test(skiatest::Reporter* reporter, GrContext* context, GrPixelConfig config) { const int width = 10; const int height = 10; const int subWidth = width/2; const int subHeight = height/2; GrVkGpu* gpu = static_cast<GrVkGpu*>(context->getGpu()); gpu->discard(NULL); SkAutoTMalloc<GrColor> buffer(width * height); SkAutoTMalloc<GrColor> subBuffer(subWidth * subHeight); GrSurfaceDesc surfDesc; surfDesc.fFlags = kRenderTarget_GrSurfaceFlag; surfDesc.fOrigin = kTopLeft_GrSurfaceOrigin; surfDesc.fWidth = width; surfDesc.fHeight = height; surfDesc.fConfig = config; surfDesc.fSampleCnt = 0; GrTexture* tex = gpu->createTexture(surfDesc, false, nullptr, 0); SkASSERT(tex); SkASSERT(tex->asRenderTarget()); SkIRect fullRect = SkIRect::MakeWH(10, 10); gpu->clear(fullRect, GrColor_TRANSPARENT_BLACK, tex->asRenderTarget()); gpu->readPixels(tex, 0, 0, width, height, config, (void*)buffer.get(), 0); REPORTER_ASSERT(reporter, does_full_buffer_contain_correct_color(buffer.get(), GrColor_TRANSPARENT_BLACK, config, width, height)); SkIRect rect; rect = SkIRect::MakeXYWH(0, 0, subWidth, subHeight); gpu->clear(rect, GrColor_WHITE, tex->asRenderTarget()); rect = SkIRect::MakeXYWH(subWidth, 0, subWidth, subHeight); gpu->clear(rect, GrColor_WHITE, tex->asRenderTarget()); rect = SkIRect::MakeXYWH(0, subHeight, subWidth, subHeight); gpu->clear(rect, GrColor_WHITE, tex->asRenderTarget()); // Should fail since bottom right sub area has not been cleared to white gpu->readPixels(tex, 0, 0, width, height, config, (void*)buffer.get(), 0); REPORTER_ASSERT(reporter, !does_full_buffer_contain_correct_color(buffer.get(), GrColor_WHITE, config, width, height)); rect = SkIRect::MakeXYWH(subWidth, subHeight, subWidth, subHeight); gpu->clear(rect, GrColor_WHITE, tex->asRenderTarget()); gpu->readPixels(tex, 0, 0, width, height, config, (void*)buffer.get(), 0); REPORTER_ASSERT(reporter, does_full_buffer_contain_correct_color(buffer.get(), GrColor_WHITE, config, width, height)); // Try different colors and that each sub area has correct color GrColor subColor1 = GrColorPackRGBA(0xFF, 0x00, 0x00, 0xFF); GrColor subColor2 = GrColorPackRGBA(0x00, 0xFF, 0x00, 0xFF); GrColor subColor3 = GrColorPackRGBA(0x00, 0x00, 0xFF, 0xFF); GrColor subColor4 = GrColorPackRGBA(0xFF, 0xFF, 0x00, 0xFF); rect = SkIRect::MakeXYWH(0, 0, subWidth, subHeight); gpu->clear(rect, subColor1, tex->asRenderTarget()); rect = SkIRect::MakeXYWH(subWidth, 0, subWidth, subHeight); gpu->clear(rect, subColor2, tex->asRenderTarget()); rect = SkIRect::MakeXYWH(0, subHeight, subWidth, subHeight); gpu->clear(rect, subColor3, tex->asRenderTarget()); rect = SkIRect::MakeXYWH(subWidth, subHeight, subWidth, subHeight); gpu->clear(rect, subColor4, tex->asRenderTarget()); gpu->readPixels(tex, 0, 0, subWidth, subHeight, config, (void*)subBuffer.get(), 0); REPORTER_ASSERT(reporter, does_full_buffer_contain_correct_color(subBuffer.get(), subColor1, config, subWidth, subHeight)); gpu->readPixels(tex, subWidth, 0, subWidth, subHeight, config, (void*)subBuffer.get(), 0); REPORTER_ASSERT(reporter, does_full_buffer_contain_correct_color(subBuffer.get(), subColor2, config, subWidth, subHeight)); gpu->readPixels(tex, 0, subHeight, subWidth, subHeight, config, (void*)subBuffer.get(), 0); REPORTER_ASSERT(reporter, does_full_buffer_contain_correct_color(subBuffer.get(), subColor3, config, subWidth, subHeight)); gpu->readPixels(tex, subWidth, subHeight, subWidth, subHeight, config, (void*)subBuffer.get(), 0); REPORTER_ASSERT(reporter, does_full_buffer_contain_correct_color(subBuffer.get(), subColor4, config, subWidth, subHeight)); }
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(EmptySurfaceSemaphoreTest, reporter, ctxInfo) { GrContext* ctx = ctxInfo.grContext(); if (!ctx->caps()->fenceSyncSupport()) { return; } const SkImageInfo ii = SkImageInfo::Make(MAIN_W, MAIN_H, kRGBA_8888_SkColorType, kPremul_SkAlphaType); sk_sp<SkSurface> mainSurface(SkSurface::MakeRenderTarget(ctx, SkBudgeted::kNo, ii, 0, kTopLeft_GrSurfaceOrigin, nullptr)); // Flush surface once without semaphores to make sure there is no peneding IO for it. mainSurface->flush(); GrBackendSemaphore semaphore; GrSemaphoresSubmitted submitted = mainSurface->flushAndSignalSemaphores(1, &semaphore); REPORTER_ASSERT(reporter, GrSemaphoresSubmitted::kYes == submitted); if (kOpenGL_GrBackend == ctxInfo.backend()) { GrGLGpu* gpu = static_cast<GrGLGpu*>(ctx->contextPriv().getGpu()); const GrGLInterface* interface = gpu->glInterface(); GrGLsync sync = semaphore.glSync(); REPORTER_ASSERT(reporter, sync); bool result; GR_GL_CALL_RET(interface, result, IsSync(sync)); REPORTER_ASSERT(reporter, result); } #ifdef SK_VULKAN if (kVulkan_GrBackend == ctxInfo.backend()) { GrVkGpu* gpu = static_cast<GrVkGpu*>(ctx->contextPriv().getGpu()); const GrVkInterface* interface = gpu->vkInterface(); VkDevice device = gpu->device(); VkQueue queue = gpu->queue(); VkCommandPool cmdPool = gpu->cmdPool(); VkCommandBuffer cmdBuffer; // Create Command Buffer const VkCommandBufferAllocateInfo cmdInfo = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType nullptr, // pNext cmdPool, // commandPool VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level 1 // bufferCount }; VkResult err = GR_VK_CALL(interface, AllocateCommandBuffers(device, &cmdInfo, &cmdBuffer)); if (err) { return; } VkCommandBufferBeginInfo cmdBufferBeginInfo; memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo)); cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cmdBufferBeginInfo.pNext = nullptr; cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; cmdBufferBeginInfo.pInheritanceInfo = nullptr; GR_VK_CALL_ERRCHECK(interface, BeginCommandBuffer(cmdBuffer, &cmdBufferBeginInfo)); GR_VK_CALL_ERRCHECK(interface, EndCommandBuffer(cmdBuffer)); VkFenceCreateInfo fenceInfo; VkFence fence; memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo)); fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; err = GR_VK_CALL(interface, CreateFence(device, &fenceInfo, nullptr, &fence)); SkASSERT(!err); VkPipelineStageFlags waitStages = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT; VkSubmitInfo submitInfo; memset(&submitInfo, 0, sizeof(VkSubmitInfo)); submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submitInfo.pNext = nullptr; submitInfo.waitSemaphoreCount = 1; VkSemaphore vkSem = semaphore.vkSemaphore(); submitInfo.pWaitSemaphores = &vkSem; submitInfo.pWaitDstStageMask = &waitStages; submitInfo.commandBufferCount = 1; submitInfo.pCommandBuffers = &cmdBuffer; submitInfo.signalSemaphoreCount = 0; submitInfo.pSignalSemaphores = nullptr; GR_VK_CALL_ERRCHECK(interface, QueueSubmit(queue, 1, &submitInfo, fence)); err = GR_VK_CALL(interface, WaitForFences(device, 1, &fence, true, 3000000000)); REPORTER_ASSERT(reporter, err != VK_TIMEOUT); GR_VK_CALL(interface, DestroyFence(device, fence, nullptr)); GR_VK_CALL(interface, DestroySemaphore(device, vkSem, nullptr)); // If the above test fails the wait semaphore will never be signaled which can cause the // device to hang when tearing down (even if just tearing down GL). So we Fail here to // kill things. if (err == VK_TIMEOUT) { SK_ABORT("Waiting on semaphore indefinitely"); } } #endif }
void surface_semaphore_test(skiatest::Reporter* reporter, const sk_gpu_test::ContextInfo& mainInfo, const sk_gpu_test::ContextInfo& childInfo1, const sk_gpu_test::ContextInfo& childInfo2, bool flushContext) { GrContext* mainCtx = mainInfo.grContext(); if (!mainCtx->caps()->fenceSyncSupport()) { return; } const SkImageInfo ii = SkImageInfo::Make(MAIN_W, MAIN_H, kRGBA_8888_SkColorType, kPremul_SkAlphaType); sk_sp<SkSurface> mainSurface(SkSurface::MakeRenderTarget(mainCtx, SkBudgeted::kNo, ii, 0, kTopLeft_GrSurfaceOrigin, nullptr)); SkCanvas* mainCanvas = mainSurface->getCanvas(); mainCanvas->clear(SK_ColorBLUE); SkAutoTArray<GrBackendSemaphore> semaphores(2); #ifdef SK_VULKAN if (kVulkan_GrBackend == mainInfo.backend()) { // Initialize the secondary semaphore instead of having Ganesh create one internally GrVkGpu* gpu = static_cast<GrVkGpu*>(mainCtx->contextPriv().getGpu()); const GrVkInterface* interface = gpu->vkInterface(); VkDevice device = gpu->device(); VkSemaphore vkSem; VkSemaphoreCreateInfo createInfo; createInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; createInfo.pNext = nullptr; createInfo.flags = 0; GR_VK_CALL_ERRCHECK(interface, CreateSemaphore(device, &createInfo, nullptr, &vkSem)); semaphores[1].initVulkan(vkSem); } #endif if (flushContext) { mainCtx->flushAndSignalSemaphores(2, semaphores.get()); } else { mainSurface->flushAndSignalSemaphores(2, semaphores.get()); } sk_sp<SkImage> mainImage = mainSurface->makeImageSnapshot(); GrBackendTexture backendTexture = mainImage->getBackendTexture(false); draw_child(reporter, childInfo1, backendTexture, semaphores[0]); #ifdef SK_VULKAN if (kVulkan_GrBackend == mainInfo.backend()) { // In Vulkan we need to make sure we are sending the correct VkImageLayout in with the // backendImage. After the first child draw the layout gets changed to SHADER_READ, so // we just manually set that here. GrVkImageInfo vkInfo; SkAssertResult(backendTexture.getVkImageInfo(&vkInfo)); vkInfo.updateImageLayout(VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); } #endif draw_child(reporter, childInfo2, backendTexture, semaphores[1]); }
static GrBackendTexture make_vk_backend_texture( GrContext* context, AHardwareBuffer* hardwareBuffer, int width, int height, GrPixelConfig config, GrAHardwareBufferImageGenerator::DeleteImageProc* deleteProc, GrAHardwareBufferImageGenerator::DeleteImageCtx* deleteCtx, bool isProtectedContent, const GrBackendFormat& backendFormat) { SkASSERT(context->contextPriv().getBackend() == kVulkan_GrBackend); GrVkGpu* gpu = static_cast<GrVkGpu*>(context->contextPriv().getGpu()); VkPhysicalDevice physicalDevice = gpu->physicalDevice(); VkDevice device = gpu->device(); SkASSERT(gpu); if (!gpu->vkCaps().supportsAndroidHWBExternalMemory()) { return GrBackendTexture(); } SkASSERT(backendFormat.getVkFormat()); VkFormat format = *backendFormat.getVkFormat(); VkResult err; VkAndroidHardwareBufferFormatPropertiesANDROID hwbFormatProps; hwbFormatProps.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID; hwbFormatProps.pNext = nullptr; VkAndroidHardwareBufferPropertiesANDROID hwbProps; hwbProps.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID; hwbProps.pNext = &hwbFormatProps; err = VK_CALL(GetAndroidHardwareBufferProperties(device, hardwareBuffer, &hwbProps)); if (VK_SUCCESS != err) { return GrBackendTexture(); } SkASSERT(format == hwbFormatProps.format); SkASSERT(SkToBool(VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT & hwbFormatProps.formatFeatures) && SkToBool(VK_FORMAT_FEATURE_TRANSFER_SRC_BIT & hwbFormatProps.formatFeatures) && SkToBool(VK_FORMAT_FEATURE_TRANSFER_DST_BIT & hwbFormatProps.formatFeatures)); const VkExternalMemoryImageCreateInfo externalMemoryImageInfo { VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO, // sType nullptr, // pNext VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID, // handleTypes }; VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; // TODO: Check the supported tilings vkGetPhysicalDeviceImageFormatProperties2 to see if we have // to use linear. Add better linear support throughout Ganesh. VkImageTiling tiling = VK_IMAGE_TILING_OPTIMAL; const VkImageCreateInfo imageCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType &externalMemoryImageInfo, // pNext 0, // VkImageCreateFlags VK_IMAGE_TYPE_2D, // VkImageType format, // VkFormat { (uint32_t)width, (uint32_t)height, 1 }, // VkExtent3D 1, // mipLevels 1, // arrayLayers VK_SAMPLE_COUNT_1_BIT, // samples tiling, // VkImageTiling usageFlags, // VkImageUsageFlags VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode 0, // queueFamilyCount 0, // pQueueFamilyIndices VK_IMAGE_LAYOUT_UNDEFINED, // initialLayout }; VkImage image; err = VK_CALL(CreateImage(device, &imageCreateInfo, nullptr, &image)); if (VK_SUCCESS != err) { return GrBackendTexture(); } VkImageMemoryRequirementsInfo2 memReqsInfo; memReqsInfo.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2; memReqsInfo.pNext = nullptr; memReqsInfo.image = image; VkMemoryDedicatedRequirements dedicatedMemReqs; dedicatedMemReqs.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS; dedicatedMemReqs.pNext = nullptr; VkMemoryRequirements2 memReqs; memReqs.sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2; memReqs.pNext = &dedicatedMemReqs; VK_CALL(GetImageMemoryRequirements2(device, &memReqsInfo, &memReqs)); SkASSERT(VK_TRUE == dedicatedMemReqs.requiresDedicatedAllocation); VkPhysicalDeviceMemoryProperties2 phyDevMemProps; phyDevMemProps.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2; phyDevMemProps.pNext = nullptr; uint32_t typeIndex = 0; uint32_t heapIndex = 0; bool foundHeap = false; VK_CALL(GetPhysicalDeviceMemoryProperties2(physicalDevice, &phyDevMemProps)); uint32_t memTypeCnt = phyDevMemProps.memoryProperties.memoryTypeCount; for (uint32_t i = 0; i < memTypeCnt && !foundHeap; ++i) { if (hwbProps.memoryTypeBits & (1 << i)) { const VkPhysicalDeviceMemoryProperties& pdmp = phyDevMemProps.memoryProperties; uint32_t supportedFlags = pdmp.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; if (supportedFlags == VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) { typeIndex = i; heapIndex = pdmp.memoryTypes[i].heapIndex; foundHeap = true; } } } if (!foundHeap) { VK_CALL(DestroyImage(device, image, nullptr)); return GrBackendTexture(); } VkImportAndroidHardwareBufferInfoANDROID hwbImportInfo; hwbImportInfo.sType = VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID; hwbImportInfo.pNext = nullptr; hwbImportInfo.buffer = hardwareBuffer; VkMemoryDedicatedAllocateInfo dedicatedAllocInfo; dedicatedAllocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO; dedicatedAllocInfo.pNext = &hwbImportInfo; dedicatedAllocInfo.image = image; dedicatedAllocInfo.buffer = VK_NULL_HANDLE; VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType &dedicatedAllocInfo, // pNext hwbProps.allocationSize, // allocationSize typeIndex, // memoryTypeIndex }; VkDeviceMemory memory; err = VK_CALL(AllocateMemory(device, &allocInfo, nullptr, &memory)); if (VK_SUCCESS != err) { VK_CALL(DestroyImage(device, image, nullptr)); return GrBackendTexture(); } VkBindImageMemoryInfo bindImageInfo; bindImageInfo.sType = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO; bindImageInfo.pNext = nullptr; bindImageInfo.image = image; bindImageInfo.memory = memory; bindImageInfo.memoryOffset = 0; err = VK_CALL(BindImageMemory2(device, 1, &bindImageInfo)); if (VK_SUCCESS != err) { VK_CALL(DestroyImage(device, image, nullptr)); VK_CALL(FreeMemory(device, memory, nullptr)); return GrBackendTexture(); } GrVkImageInfo imageInfo; imageInfo.fImage = image; imageInfo.fAlloc = GrVkAlloc(memory, 0, hwbProps.allocationSize, 0); imageInfo.fImageTiling = tiling; imageInfo.fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED; imageInfo.fFormat = format; imageInfo.fLevelCount = 1; // TODO: This should possibly be VK_QUEUE_FAMILY_FOREIGN_EXT but current Adreno devices do not // support that extension. Or if we know the source of the AHardwareBuffer is not from a // "foreign" device we can leave them as external. imageInfo.fCurrentQueueFamily = VK_QUEUE_FAMILY_EXTERNAL; *deleteProc = GrAHardwareBufferImageGenerator::DeleteVkImage; *deleteCtx = new VulkanCleanupHelper(gpu, image, memory); return GrBackendTexture(width, height, imageInfo); }
DEF_GPUTEST_FOR_VULKAN_CONTEXT(VkImageLayoutTest, reporter, ctxInfo) { GrContext* context = ctxInfo.grContext(); GrVkGpu* gpu = static_cast<GrVkGpu*>(context->contextPriv().getGpu()); GrBackendTexture backendTex = gpu->createTestingOnlyBackendTexture(nullptr, 1, 1, kRGBA_8888_GrPixelConfig, false, GrMipMapped::kNo); REPORTER_ASSERT(reporter, backendTex.isValid()); GrVkImageInfo info; REPORTER_ASSERT(reporter, backendTex.getVkImageInfo(&info)); VkImageLayout initLayout = info.fImageLayout; // Verify that setting that layout via a copy of a backendTexture is reflected in all the // backendTextures. GrBackendTexture backendTexCopy = backendTex; REPORTER_ASSERT(reporter, backendTexCopy.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, initLayout == info.fImageLayout); backendTexCopy.setVkImageLayout(VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); REPORTER_ASSERT(reporter, backendTex.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == info.fImageLayout); REPORTER_ASSERT(reporter, backendTexCopy.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == info.fImageLayout); // Setting back the layout since we didn't actually change it backendTex.setVkImageLayout(initLayout); sk_sp<SkImage> wrappedImage = SkImage::MakeFromTexture(context, backendTex, kTopLeft_GrSurfaceOrigin, kRGBA_8888_SkColorType, kPremul_SkAlphaType, nullptr); REPORTER_ASSERT(reporter, wrappedImage.get()); sk_sp<GrTextureProxy> texProxy = as_IB(wrappedImage)->asTextureProxyRef(); REPORTER_ASSERT(reporter, texProxy.get()); REPORTER_ASSERT(reporter, texProxy->priv().isInstantiated()); GrTexture* texture = texProxy->priv().peekTexture(); REPORTER_ASSERT(reporter, texture); // Verify that modifying the layout via the GrVkTexture is reflected in the GrBackendTexture GrVkTexture* vkTexture = static_cast<GrVkTexture*>(texture); REPORTER_ASSERT(reporter, initLayout == vkTexture->currentLayout()); vkTexture->updateImageLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); REPORTER_ASSERT(reporter, backendTex.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == info.fImageLayout); GrBackendTexture backendTexImage = wrappedImage->getBackendTexture(false); REPORTER_ASSERT(reporter, backendTexImage.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == info.fImageLayout); // Verify that modifying the layout via the GrBackendTexutre is reflected in the GrVkTexture backendTexImage.setVkImageLayout(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); REPORTER_ASSERT(reporter, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == vkTexture->currentLayout()); #ifdef SK_SUPPORT_LEGACY_BACKEND_OBJECTS // Verify that modifying the layout via the old textureHandle sitll works in is reflected in the // GrVkTexture and GrBackendTexture. GrVkImageInfo* backendInfo = (GrVkImageInfo*)wrappedImage->getTextureHandle(false); REPORTER_ASSERT(reporter, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == backendInfo->fImageLayout); backendInfo->updateImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); REPORTER_ASSERT(reporter, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == vkTexture->currentLayout()); REPORTER_ASSERT(reporter, backendTexImage.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == info.fImageLayout); #endif vkTexture->updateImageLayout(initLayout); REPORTER_ASSERT(reporter, backendTex.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, initLayout == info.fImageLayout); REPORTER_ASSERT(reporter, backendTexCopy.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, initLayout == info.fImageLayout); REPORTER_ASSERT(reporter, backendTexImage.getVkImageInfo(&info)); REPORTER_ASSERT(reporter, initLayout == info.fImageLayout); // Check that we can do things like assigning the backend texture to invalid one, assign an // invalid one, assin a backend texture to inself etc. Success here is that we don't hit any of // our ref counting asserts. REPORTER_ASSERT(reporter, GrBackendTexture::TestingOnly_Equals(backendTex, backendTexCopy)); GrBackendTexture invalidTexture; REPORTER_ASSERT(reporter, !invalidTexture.isValid()); REPORTER_ASSERT(reporter, !GrBackendTexture::TestingOnly_Equals(invalidTexture, backendTexCopy)); backendTexCopy = invalidTexture; REPORTER_ASSERT(reporter, !backendTexCopy.isValid()); REPORTER_ASSERT(reporter, !GrBackendTexture::TestingOnly_Equals(invalidTexture, backendTexCopy)); invalidTexture = backendTex; REPORTER_ASSERT(reporter, invalidTexture.isValid()); REPORTER_ASSERT(reporter, GrBackendTexture::TestingOnly_Equals(invalidTexture, backendTex)); invalidTexture = static_cast<decltype(invalidTexture)&>(invalidTexture); REPORTER_ASSERT(reporter, invalidTexture.isValid()); REPORTER_ASSERT(reporter, GrBackendTexture::TestingOnly_Equals(invalidTexture, invalidTexture)); gpu->deleteTestingOnlyBackendTexture(backendTex); }