bool GrGLRenderTarget::completeStencilAttachment() { GrGLGpu* gpu = this->getGLGpu(); const GrGLInterface* interface = gpu->glInterface(); GrStencilAttachment* stencil = this->renderTargetPriv().getStencilAttachment(); if (nullptr == stencil) { GR_GL_CALL(interface, FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT, GR_GL_RENDERBUFFER, 0)); GR_GL_CALL(interface, FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_DEPTH_ATTACHMENT, GR_GL_RENDERBUFFER, 0)); #ifdef SK_DEBUG if (kChromium_GrGLDriver != gpu->glContext().driver()) { // This check can cause problems in Chromium if the context has been asynchronously // abandoned (see skbug.com/5200) GrGLenum status; GR_GL_CALL_RET(interface, status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); SkASSERT(GR_GL_FRAMEBUFFER_COMPLETE == status); } #endif return true; } else { const GrGLStencilAttachment* glStencil = static_cast<const GrGLStencilAttachment*>(stencil); GrGLuint rb = glStencil->renderbufferID(); gpu->invalidateBoundRenderTarget(); gpu->stats()->incRenderTargetBinds(); GR_GL_CALL(interface, BindFramebuffer(GR_GL_FRAMEBUFFER, this->renderFBOID())); GR_GL_CALL(interface, FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT, GR_GL_RENDERBUFFER, rb)); if (glStencil->format().fPacked) { GR_GL_CALL(interface, FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_DEPTH_ATTACHMENT, GR_GL_RENDERBUFFER, rb)); } else { GR_GL_CALL(interface, FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_DEPTH_ATTACHMENT, GR_GL_RENDERBUFFER, 0)); } #ifdef SK_DEBUG if (kChromium_GrGLDriver != gpu->glContext().driver()) { // This check can cause problems in Chromium if the context has been asynchronously // abandoned (see skbug.com/5200) GrGLenum status; GR_GL_CALL_RET(interface, status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); SkASSERT(GR_GL_FRAMEBUFFER_COMPLETE == status); } #endif return true; } }
void GrGLPathRange::onInitPath(int index, const SkPath& skPath) const { GrGLGpu* gpu = static_cast<GrGLGpu*>(this->getGpu()); if (NULL == gpu) { return; } // Make sure the path at this index hasn't been initted already. SkDEBUGCODE( GrGLboolean isPath; GR_GL_CALL_RET(gpu->glInterface(), isPath, IsPath(fBasePathID + index))); SkASSERT(GR_GL_FALSE == isPath); GrGLPath::InitPathObject(gpu, fBasePathID + index, skPath, this->getStroke()); // TODO: Use a better approximation for the individual path sizes. fGpuMemorySize += 100; }
void GrGLPathRange::onInitPath(int index, const SkPath& origSkPath) const { GrGLGpu* gpu = static_cast<GrGLGpu*>(this->getGpu()); if (NULL == gpu) { return; } // Make sure the path at this index hasn't been initted already. SkDEBUGCODE( GrGLboolean isPath; GR_GL_CALL_RET(gpu->glInterface(), isPath, IsPath(fBasePathID + index))); SkASSERT(GR_GL_FALSE == isPath); const SkPath* skPath = &origSkPath; SkTLazy<SkPath> tmpPath; const GrStrokeInfo* stroke = &fStroke; GrStrokeInfo tmpStroke(SkStrokeRec::kFill_InitStyle); // Dashing must be applied to the path. However, if dashing is present, // we must convert all the paths to fills. The GrStrokeInfo::applyDash leaves // simple paths as strokes but converts other paths to fills. // Thus we must stroke the strokes here, so that all paths in the // path range are using the same style. if (fStroke.isDashed()) { if (!stroke->applyDashToPath(tmpPath.init(), &tmpStroke, *skPath)) { return; } skPath = tmpPath.get(); stroke = &tmpStroke; if (tmpStroke.needToApply()) { if (!tmpStroke.applyToPath(tmpPath.get(), *tmpPath.get())) { return; } tmpStroke.setFillStyle(); } } GrGLPath::InitPathObject(gpu, fBasePathID + index, *skPath, *stroke); // TODO: Use a better approximation for the individual path sizes. fGpuMemorySize += 100; }
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(EmptySurfaceSemaphoreTest, reporter, ctxInfo) { GrContext* ctx = ctxInfo.grContext(); if (!ctx->caps()->fenceSyncSupport()) { return; } const SkImageInfo ii = SkImageInfo::Make(MAIN_W, MAIN_H, kRGBA_8888_SkColorType, kPremul_SkAlphaType); sk_sp<SkSurface> mainSurface(SkSurface::MakeRenderTarget(ctx, SkBudgeted::kNo, ii, 0, kTopLeft_GrSurfaceOrigin, nullptr)); // Flush surface once without semaphores to make sure there is no peneding IO for it. mainSurface->flush(); GrBackendSemaphore semaphore; GrSemaphoresSubmitted submitted = mainSurface->flushAndSignalSemaphores(1, &semaphore); REPORTER_ASSERT(reporter, GrSemaphoresSubmitted::kYes == submitted); if (kOpenGL_GrBackend == ctxInfo.backend()) { GrGLGpu* gpu = static_cast<GrGLGpu*>(ctx->contextPriv().getGpu()); const GrGLInterface* interface = gpu->glInterface(); GrGLsync sync = semaphore.glSync(); REPORTER_ASSERT(reporter, sync); bool result; GR_GL_CALL_RET(interface, result, IsSync(sync)); REPORTER_ASSERT(reporter, result); } #ifdef SK_VULKAN if (kVulkan_GrBackend == ctxInfo.backend()) { GrVkGpu* gpu = static_cast<GrVkGpu*>(ctx->contextPriv().getGpu()); const GrVkInterface* interface = gpu->vkInterface(); VkDevice device = gpu->device(); VkQueue queue = gpu->queue(); VkCommandPool cmdPool = gpu->cmdPool(); VkCommandBuffer cmdBuffer; // Create Command Buffer const VkCommandBufferAllocateInfo cmdInfo = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType nullptr, // pNext cmdPool, // commandPool VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level 1 // bufferCount }; VkResult err = GR_VK_CALL(interface, AllocateCommandBuffers(device, &cmdInfo, &cmdBuffer)); if (err) { return; } VkCommandBufferBeginInfo cmdBufferBeginInfo; memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo)); cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cmdBufferBeginInfo.pNext = nullptr; cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; cmdBufferBeginInfo.pInheritanceInfo = nullptr; GR_VK_CALL_ERRCHECK(interface, BeginCommandBuffer(cmdBuffer, &cmdBufferBeginInfo)); GR_VK_CALL_ERRCHECK(interface, EndCommandBuffer(cmdBuffer)); VkFenceCreateInfo fenceInfo; VkFence fence; memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo)); fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; err = GR_VK_CALL(interface, CreateFence(device, &fenceInfo, nullptr, &fence)); SkASSERT(!err); VkPipelineStageFlags waitStages = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT; VkSubmitInfo submitInfo; memset(&submitInfo, 0, sizeof(VkSubmitInfo)); submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submitInfo.pNext = nullptr; submitInfo.waitSemaphoreCount = 1; VkSemaphore vkSem = semaphore.vkSemaphore(); submitInfo.pWaitSemaphores = &vkSem; submitInfo.pWaitDstStageMask = &waitStages; submitInfo.commandBufferCount = 1; submitInfo.pCommandBuffers = &cmdBuffer; submitInfo.signalSemaphoreCount = 0; submitInfo.pSignalSemaphores = nullptr; GR_VK_CALL_ERRCHECK(interface, QueueSubmit(queue, 1, &submitInfo, fence)); err = GR_VK_CALL(interface, WaitForFences(device, 1, &fence, true, 3000000000)); REPORTER_ASSERT(reporter, err != VK_TIMEOUT); GR_VK_CALL(interface, DestroyFence(device, fence, nullptr)); GR_VK_CALL(interface, DestroySemaphore(device, vkSem, nullptr)); // If the above test fails the wait semaphore will never be signaled which can cause the // device to hang when tearing down (even if just tearing down GL). So we Fail here to // kill things. if (err == VK_TIMEOUT) { SK_ABORT("Waiting on semaphore indefinitely"); } } #endif }
GrContext* GrContextFactory::get(GLContextType type, GrGLStandard forcedGpuAPI) { for (int i = 0; i < fContexts.count(); ++i) { if (forcedGpuAPI != kNone_GrGLStandard && forcedGpuAPI != fContexts[i].fGLContext->gl()->fStandard) continue; if (fContexts[i].fType == type) { fContexts[i].fGLContext->makeCurrent(); return fContexts[i].fGrContext; } } SkAutoTUnref<SkGLContext> glCtx; SkAutoTUnref<GrContext> grCtx; switch (type) { case kNVPR_GLContextType: // fallthru case kNative_GLContextType: glCtx.reset(SkCreatePlatformGLContext(forcedGpuAPI)); break; #ifdef SK_ANGLE case kANGLE_GLContextType: glCtx.reset(SkANGLEGLContext::Create(forcedGpuAPI)); break; #endif #ifdef SK_MESA case kMESA_GLContextType: glCtx.reset(SkMesaGLContext::Create(forcedGpuAPI)); break; #endif case kNull_GLContextType: glCtx.reset(SkNullGLContext::Create(forcedGpuAPI)); break; case kDebug_GLContextType: glCtx.reset(SkDebugGLContext::Create(forcedGpuAPI)); break; } if (NULL == glCtx.get()) { return NULL; } SkASSERT(glCtx->isValid()); // Block NVPR from non-NVPR types. SkAutoTUnref<const GrGLInterface> glInterface(SkRef(glCtx->gl())); if (kNVPR_GLContextType != type) { glInterface.reset(GrGLInterfaceRemoveNVPR(glInterface)); if (!glInterface) { return NULL; } } else { if (!glInterface->hasExtension("GL_NV_path_rendering")) { return NULL; } } glCtx->makeCurrent(); GrBackendContext p3dctx = reinterpret_cast<GrBackendContext>(glInterface.get()); grCtx.reset(GrContext::Create(kOpenGL_GrBackend, p3dctx, fGlobalOptions)); if (!grCtx.get()) { return NULL; } // Warn if path rendering support is not available for the NVPR type. if (kNVPR_GLContextType == type) { if (!grCtx->caps()->shaderCaps()->pathRenderingSupport()) { GrGLGpu* gpu = static_cast<GrGLGpu*>(grCtx->getGpu()); const GrGLubyte* verUByte; GR_GL_CALL_RET(gpu->glInterface(), verUByte, GetString(GR_GL_VERSION)); const char* ver = reinterpret_cast<const char*>(verUByte); SkDebugf("\nWARNING: nvprmsaa config requested, but driver path rendering support not" " available. Maybe update the driver? Your driver version string: \"%s\"\n", ver); } } GPUContext& ctx = fContexts.push_back(); ctx.fGLContext = glCtx.get(); ctx.fGLContext->ref(); ctx.fGrContext = grCtx.get(); ctx.fGrContext->ref(); ctx.fType = type; return ctx.fGrContext; }