void* GrGLBufferImpl::map(GrGpuGL* gpu) { VALIDATE(); SkASSERT(!this->isMapped()); if (0 == fDesc.fID) { fMapPtr = fCPUData; } else { switch (gpu->glCaps().mapBufferType()) { case GrGLCaps::kNone_MapBufferType: VALIDATE(); return NULL; case GrGLCaps::kMapBuffer_MapBufferType: this->bind(gpu); // Let driver know it can discard the old data if (GR_GL_USE_BUFFER_DATA_NULL_HINT || fDesc.fSizeInBytes != fGLSizeInBytes) { fGLSizeInBytes = fDesc.fSizeInBytes; GL_CALL(gpu, BufferData(fBufferType, fGLSizeInBytes, NULL, fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW)); } GR_GL_CALL_RET(gpu->glInterface(), fMapPtr, MapBuffer(fBufferType, GR_GL_WRITE_ONLY)); break; case GrGLCaps::kMapBufferRange_MapBufferType: { this->bind(gpu); // Make sure the GL buffer size agrees with fDesc before mapping. if (fDesc.fSizeInBytes != fGLSizeInBytes) { fGLSizeInBytes = fDesc.fSizeInBytes; GL_CALL(gpu, BufferData(fBufferType, fGLSizeInBytes, NULL, fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW)); } static const GrGLbitfield kAccess = GR_GL_MAP_INVALIDATE_BUFFER_BIT | GR_GL_MAP_WRITE_BIT; GR_GL_CALL_RET(gpu->glInterface(), fMapPtr, MapBufferRange(fBufferType, 0, fGLSizeInBytes, kAccess)); break; } case GrGLCaps::kChromium_MapBufferType: this->bind(gpu); // Make sure the GL buffer size agrees with fDesc before mapping. if (fDesc.fSizeInBytes != fGLSizeInBytes) { fGLSizeInBytes = fDesc.fSizeInBytes; GL_CALL(gpu, BufferData(fBufferType, fGLSizeInBytes, NULL, fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW)); } GR_GL_CALL_RET(gpu->glInterface(), fMapPtr, MapBufferSubData(fBufferType, 0, fGLSizeInBytes, GR_GL_WRITE_ONLY)); break; } } VALIDATE(); return fMapPtr; }
bool GrGLRenderTarget::completeStencilAttachment() { GrGLGpu* gpu = this->getGLGpu(); const GrGLInterface* interface = gpu->glInterface(); GrStencilAttachment* stencil = this->renderTargetPriv().getStencilAttachment(); if (nullptr == stencil) { GR_GL_CALL(interface, FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT, GR_GL_RENDERBUFFER, 0)); GR_GL_CALL(interface, FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_DEPTH_ATTACHMENT, GR_GL_RENDERBUFFER, 0)); #ifdef SK_DEBUG if (kChromium_GrGLDriver != gpu->glContext().driver()) { // This check can cause problems in Chromium if the context has been asynchronously // abandoned (see skbug.com/5200) GrGLenum status; GR_GL_CALL_RET(interface, status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); SkASSERT(GR_GL_FRAMEBUFFER_COMPLETE == status); } #endif return true; } else { const GrGLStencilAttachment* glStencil = static_cast<const GrGLStencilAttachment*>(stencil); GrGLuint rb = glStencil->renderbufferID(); gpu->invalidateBoundRenderTarget(); gpu->stats()->incRenderTargetBinds(); GR_GL_CALL(interface, BindFramebuffer(GR_GL_FRAMEBUFFER, this->renderFBOID())); GR_GL_CALL(interface, FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT, GR_GL_RENDERBUFFER, rb)); if (glStencil->format().fPacked) { GR_GL_CALL(interface, FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_DEPTH_ATTACHMENT, GR_GL_RENDERBUFFER, rb)); } else { GR_GL_CALL(interface, FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_DEPTH_ATTACHMENT, GR_GL_RENDERBUFFER, 0)); } #ifdef SK_DEBUG if (kChromium_GrGLDriver != gpu->glContext().driver()) { // This check can cause problems in Chromium if the context has been asynchronously // abandoned (see skbug.com/5200) GrGLenum status; GR_GL_CALL_RET(interface, status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); SkASSERT(GR_GL_FRAMEBUFFER_COMPLETE == status); } #endif return true; } }
bool GrGLContextInfo::initialize(const GrGLInterface* interface) { this->reset(); // We haven't validated the GrGLInterface yet, so check for GetString // function pointer if (interface->fFunctions.fGetString) { const GrGLubyte* verUByte; GR_GL_CALL_RET(interface, verUByte, GetString(GR_GL_VERSION)); const char* ver = reinterpret_cast<const char*>(verUByte); const GrGLubyte* rendererUByte; GR_GL_CALL_RET(interface, rendererUByte, GetString(GR_GL_RENDERER)); const char* renderer = reinterpret_cast<const char*>(rendererUByte); if (interface->validate()) { fGLVersion = GrGLGetVersionFromString(ver); if (GR_GL_INVALID_VER == fGLVersion) { return false; } if (!GrGetGLSLGeneration(interface, &fGLSLGeneration)) { return false; } fVendor = GrGLGetVendor(interface); /* * Qualcomm drivers have a horrendous bug with some drivers. Though they claim to * support GLES 3.00, some perfectly valid GLSL300 shaders will only compile with * #version 100, and will fail to compile with #version 300 es. In the long term, we * need to lock this down to a specific driver version. */ if (kQualcomm_GrGLVendor == fVendor) { fGLSLGeneration = k110_GrGLSLGeneration; } fRenderer = GrGLGetRendererFromString(renderer); fIsMesa = GrGLIsMesaFromVersionString(ver); fIsChromium = GrGLIsChromiumFromRendererString(renderer); // This must occur before caps init. fInterface.reset(SkRef(interface)); return fGLCaps->init(*this, interface); } } return false; }
bool GrGLContextInfo::initialize(const GrGLInterface* interface) { this->reset(); // We haven't validated the GrGLInterface yet, so check for GetString // function pointer if (interface->fGetString) { const GrGLubyte* verUByte; GR_GL_CALL_RET(interface, verUByte, GetString(GR_GL_VERSION)); const char* ver = reinterpret_cast<const char*>(verUByte); GrGLBinding binding = GrGLGetBindingInUseFromString(ver); if (0 != binding && interface->validate(binding) && fExtensions.init(binding, interface)) { fBindingInUse = binding; fGLVersion = GrGLGetVersionFromString(ver); fGLSLGeneration = GrGetGLSLGeneration(fBindingInUse, interface); fVendor = GrGLGetVendor(interface); fIsMesa = GrGLIsMesaFromVersionString(ver); fGLCaps->init(*this, interface); return true; } } return false; }
// Compiles a GL shader and attaches it to a program. Returns the shader ID if // successful, or 0 if not. static GrGLuint attach_shader(const GrGLContext& glCtx, GrGLuint programId, GrGLenum type, const SkString& shaderSrc) { const GrGLInterface* gli = glCtx.interface(); GrGLuint shaderId; GR_GL_CALL_RET(gli, shaderId, CreateShader(type)); if (0 == shaderId) { return 0; } const GrGLchar* sourceStr = shaderSrc.c_str(); GrGLint sourceLength = static_cast<GrGLint>(shaderSrc.size()); GR_GL_CALL(gli, ShaderSource(shaderId, 1, &sourceStr, &sourceLength)); GR_GL_CALL(gli, CompileShader(shaderId)); // Calling GetShaderiv in Chromium is quite expensive. Assume success in release builds. bool checkCompiled = !glCtx.isChromium(); #ifdef SK_DEBUG checkCompiled = true; #endif if (checkCompiled) { GrGLint compiled = GR_GL_INIT_ZERO; GR_GL_CALL(gli, GetShaderiv(shaderId, GR_GL_COMPILE_STATUS, &compiled)); if (!compiled) { GrGLint infoLen = GR_GL_INIT_ZERO; GR_GL_CALL(gli, GetShaderiv(shaderId, GR_GL_INFO_LOG_LENGTH, &infoLen)); SkAutoMalloc log(sizeof(char)*(infoLen+1)); // outside if for debugger if (infoLen > 0) { // retrieve length even though we don't need it to workaround bug in Chromium cmd // buffer param validation. GrGLsizei length = GR_GL_INIT_ZERO; GR_GL_CALL(gli, GetShaderInfoLog(shaderId, infoLen+1, &length, (char*)log.get())); GrPrintf(shaderSrc.c_str()); GrPrintf("\n%s", log.get()); } SkDEBUGFAIL("Shader compilation failed!"); GR_GL_CALL(gli, DeleteShader(shaderId)); return 0; } } TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "skia_gpu::GLShader", TRACE_EVENT_SCOPE_THREAD, "shader", TRACE_STR_COPY(shaderSrc.c_str())); if (c_PrintShaders) { GrPrintf(shaderSrc.c_str()); GrPrintf("\n"); } // Attach the shader, but defer deletion until after we have linked the program. // This works around a bug in the Android emulator's GLES2 wrapper which // will immediately delete the shader object and free its memory even though it's // attached to a program, which then causes glLinkProgram to fail. GR_GL_CALL(gli, AttachShader(programId, shaderId)); return shaderId; }
GLTestContext::GLFenceSync* GLTestContext::GLFenceSync::CreateIfSupported(const GLTestContext* ctx) { SkAutoTDelete<GLFenceSync> ret(new GLFenceSync); if (kGL_GrGLStandard == ctx->gl()->fStandard) { const GrGLubyte* versionStr; GR_GL_CALL_RET(ctx->gl(), versionStr, GetString(GR_GL_VERSION)); GrGLVersion version = GrGLGetVersionFromString(reinterpret_cast<const char*>(versionStr)); if (version < GR_GL_VER(3,2) && !ctx->gl()->hasExtension("GL_ARB_sync")) { return nullptr; } ret->fGLFenceSync = reinterpret_cast<GLFenceSyncProc>( ctx->onPlatformGetProcAddress("glFenceSync")); ret->fGLClientWaitSync = reinterpret_cast<GLClientWaitSyncProc>( ctx->onPlatformGetProcAddress("glClientWaitSync")); ret->fGLDeleteSync = reinterpret_cast<GLDeleteSyncProc>( ctx->onPlatformGetProcAddress("glDeleteSync")); } else { if (!ctx->gl()->hasExtension("GL_APPLE_sync")) { return nullptr; } ret->fGLFenceSync = reinterpret_cast<GLFenceSyncProc>( ctx->onPlatformGetProcAddress("glFenceSyncAPPLE")); ret->fGLClientWaitSync = reinterpret_cast<GLClientWaitSyncProc>( ctx->onPlatformGetProcAddress("glClientWaitSyncAPPLE")); ret->fGLDeleteSync = reinterpret_cast<GLDeleteSyncProc>( ctx->onPlatformGetProcAddress("glDeleteSyncAPPLE")); } if (!ret->fGLFenceSync || !ret->fGLClientWaitSync || !ret->fGLDeleteSync) { return nullptr; } return ret.release(); }
extern "C" bool SkiaGrGLInterfaceGLVersionGreaterThanOrEqualTo(SkiaGrGLInterfaceRef aGrGLInterface, int32_t major, int32_t minor) { const GrGLubyte* versionUByte; GR_GL_CALL_RET(static_cast<const GrGLInterface*>(aGrGLInterface), versionUByte, GetString(GR_GL_VERSION)); const char* version = reinterpret_cast<const char*>(versionUByte); GrGLVersion glVersion = GrGLGetVersionFromString(version); return GR_GL_INVALID_VER != glVersion && glVersion >= GR_GL_VER(major, minor); }
void GrGLNormalPathProcessor::resolveSeparableVaryings(GrGLGpu* gpu, GrGLuint programId) { int count = fSeparableVaryingInfos.count(); for (int i = 0; i < count; ++i) { GrGLint location; GR_GL_CALL_RET(gpu->glInterface(), location, GetProgramResourceLocation(programId, GR_GL_FRAGMENT_INPUT, fSeparableVaryingInfos[i].fVariable.c_str())); fSeparableVaryingInfos[i].fLocation = location; } }
// Compiles a GL shader, attaches it to a program, and releases the shader's reference. // (That way there's no need to hang on to the GL shader id and delete it later.) static bool attach_shader(const GrGLContext& glCtx, GrGLuint programId, GrGLenum type, const SkString& shaderSrc) { const GrGLInterface* gli = glCtx.interface(); GrGLuint shaderId; GR_GL_CALL_RET(gli, shaderId, CreateShader(type)); if (0 == shaderId) { return false; } const GrGLchar* sourceStr = shaderSrc.c_str(); GrGLint sourceLength = static_cast<GrGLint>(shaderSrc.size()); GR_GL_CALL(gli, ShaderSource(shaderId, 1, &sourceStr, &sourceLength)); GR_GL_CALL(gli, CompileShader(shaderId)); // Calling GetShaderiv in Chromium is quite expensive. Assume success in release builds. bool checkCompiled = !glCtx.info().isChromium(); #ifdef SK_DEBUG checkCompiled = true; #endif if (checkCompiled) { GrGLint compiled = GR_GL_INIT_ZERO; GR_GL_CALL(gli, GetShaderiv(shaderId, GR_GL_COMPILE_STATUS, &compiled)); if (!compiled) { GrGLint infoLen = GR_GL_INIT_ZERO; GR_GL_CALL(gli, GetShaderiv(shaderId, GR_GL_INFO_LOG_LENGTH, &infoLen)); SkAutoMalloc log(sizeof(char)*(infoLen+1)); // outside if for debugger if (infoLen > 0) { // retrieve length even though we don't need it to workaround bug in Chromium cmd // buffer param validation. GrGLsizei length = GR_GL_INIT_ZERO; GR_GL_CALL(gli, GetShaderInfoLog(shaderId, infoLen+1, &length, (char*)log.get())); GrPrintf(shaderSrc.c_str()); GrPrintf("\n%s", log.get()); } SkDEBUGFAIL("Shader compilation failed!"); GR_GL_CALL(gli, DeleteShader(shaderId)); return false; } } if (c_PrintShaders) { GrPrintf(shaderSrc.c_str()); GrPrintf("\n"); } GR_GL_CALL(gli, AttachShader(programId, shaderId)); GR_GL_CALL(gli, DeleteShader(shaderId)); return true; }
GrEGLImage SkANGLEGLContext::texture2DToEGLImage(GrGLuint texID) const { if (!this->gl()->hasExtension("EGL_KHR_gl_texture_2D_image")) { return GR_EGL_NO_IMAGE; } GrEGLImage img; GrEGLint attribs[] = { GR_EGL_GL_TEXTURE_LEVEL, 0, GR_EGL_IMAGE_PRESERVED, GR_EGL_TRUE, GR_EGL_NONE }; // 64 bit cast is to shut Visual C++ up about casting 32 bit value to a pointer. GrEGLClientBuffer clientBuffer = reinterpret_cast<GrEGLClientBuffer>((uint64_t)texID); GR_GL_CALL_RET(this->gl(), img, EGLCreateImage(fDisplay, fContext, GR_EGL_GL_TEXTURE_2D, clientBuffer, attribs)); return img; }
void* GrGLVertexBuffer::lock() { GrAssert(fBufferID); GrAssert(!isLocked()); if (this->getGpu()->getCaps().fBufferLockSupport) { this->bind(); // Let driver know it can discard the old data GL_CALL(BufferData(GR_GL_ARRAY_BUFFER, this->sizeInBytes(), NULL, this->dynamic() ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW)); GR_GL_CALL_RET(GPUGL->glInterface(), fLockPtr, MapBuffer(GR_GL_ARRAY_BUFFER, GR_GL_WRITE_ONLY)); return fLockPtr; } return NULL; }
void* GrGLBufferImpl::lock(GrGpuGL* gpu) { VALIDATE(); SkASSERT(!this->isLocked()); if (0 == fDesc.fID) { fLockPtr = fCPUData; } else if (gpu->caps()->bufferLockSupport()) { this->bind(gpu); // Let driver know it can discard the old data GL_CALL(gpu, BufferData(fBufferType, fDesc.fSizeInBytes, NULL, fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW)); GR_GL_CALL_RET(gpu->glInterface(), fLockPtr, MapBuffer(fBufferType, GR_GL_WRITE_ONLY)); } return fLockPtr; }
static GrGLuint load_shader(const GrGLInterface* gl, const char* shaderSrc, GrGLenum type) { GrGLuint shader; // Create the shader object GR_GL_CALL_RET(gl, shader, CreateShader(type)); // Load the shader source GR_GL_CALL(gl, ShaderSource(shader, 1, &shaderSrc, NULL)); // Compile the shader GR_GL_CALL(gl, CompileShader(shader)); // Check for compile time errors GrGLint success; GrGLchar infoLog[512]; GR_GL_CALL(gl, GetShaderiv(shader, GR_GL_COMPILE_STATUS, &success)); if (!success) { GR_GL_CALL(gl, GetShaderInfoLog(shader, 512, NULL, infoLog)); SkDebugf("ERROR::SHADER::COMPLIATION_FAILED: %s\n", infoLog); } return shader; }
GrGLBinding GrGLGetBindingInUse(const GrGLInterface* gl) { const GrGLubyte* v; GR_GL_CALL_RET(gl, v, GetString(GR_GL_VERSION)); return GrGLGetBindingInUseFromString((const char*) v); }
GrGLSLVersion GrGLGetGLSLVersion(const GrGLInterface* gl) { const GrGLubyte* v; GR_GL_CALL_RET(gl, v, GetString(GR_GL_SHADING_LANGUAGE_VERSION)); return GrGLGetGLSLVersionFromString((const char*) v); }
std::unique_ptr<GrGLContext> GrGLContext::Make(sk_sp<const GrGLInterface> interface, const GrContextOptions& options) { if (!interface->validate()) { return nullptr; } const GrGLubyte* verUByte; GR_GL_CALL_RET(interface.get(), verUByte, GetString(GR_GL_VERSION)); const char* ver = reinterpret_cast<const char*>(verUByte); const GrGLubyte* rendererUByte; GR_GL_CALL_RET(interface.get(), rendererUByte, GetString(GR_GL_RENDERER)); const char* renderer = reinterpret_cast<const char*>(rendererUByte); ConstructorArgs args; args.fGLVersion = GrGLGetVersionFromString(ver); if (GR_GL_INVALID_VER == args.fGLVersion) { return nullptr; } if (!GrGLGetGLSLGeneration(interface.get(), &args.fGLSLGeneration)) { return nullptr; } args.fVendor = GrGLGetVendor(interface.get()); args.fRenderer = GrGLGetRendererFromStrings(renderer, interface->fExtensions); GrGLGetANGLEInfoFromString(renderer, &args.fANGLEBackend, &args.fANGLEVendor, &args.fANGLERenderer); /* * Qualcomm drivers for the 3xx series have a horrendous bug with some drivers. Though they * claim to support GLES 3.00, some perfectly valid GLSL300 shaders will only compile with * #version 100, and will fail to compile with #version 300 es. In the long term, we * need to lock this down to a specific driver version. * ?????/2015 - This bug is still present in Lollipop pre-mr1 * 06/18/2015 - This bug does not affect the nexus 6 (which has an Adreno 4xx). */ if (kAdreno3xx_GrGLRenderer == args.fRenderer) { args.fGLSLGeneration = k110_GrGLSLGeneration; } // Many ES3 drivers only advertise the ES2 image_external extension, but support the _essl3 // extension, and require that it be enabled to work with ESSL3. Other devices require the ES2 // extension to be enabled, even when using ESSL3. Some devices appear to only support the ES2 // extension. As an extreme (optional) solution, we can fallback to using ES2 shading language // if we want to prioritize external texture support. skbug.com/7713 if (GR_IS_GR_GL_ES(interface->fStandard) && options.fPreferExternalImagesOverES3 && !options.fDisableDriverCorrectnessWorkarounds && interface->hasExtension("GL_OES_EGL_image_external") && args.fGLSLGeneration >= k330_GrGLSLGeneration && !interface->hasExtension("GL_OES_EGL_image_external_essl3") && !interface->hasExtension("OES_EGL_image_external_essl3")) { args.fGLSLGeneration = k110_GrGLSLGeneration; } GrGLGetDriverInfo(interface->fStandard, args.fVendor, renderer, ver, &args.fDriver, &args.fDriverVersion); args.fContextOptions = &options; args.fInterface = std::move(interface); return std::unique_ptr<GrGLContext>(new GrGLContext(std::move(args))); }
bool GrGLHasExtension(const GrGLInterface* gl, const char* ext) { const GrGLubyte* glstr; GR_GL_CALL_RET(gl, glstr, GetString(GR_GL_EXTENSIONS)); return GrGLHasExtensionFromString(ext, (const char*) glstr); }
GrContext* GrContextFactory::get(GLContextType type, GrGLStandard forcedGpuAPI) { for (int i = 0; i < fContexts.count(); ++i) { if (forcedGpuAPI != kNone_GrGLStandard && forcedGpuAPI != fContexts[i].fGLContext->gl()->fStandard) continue; if (fContexts[i].fType == type) { fContexts[i].fGLContext->makeCurrent(); return fContexts[i].fGrContext; } } SkAutoTUnref<SkGLContext> glCtx; SkAutoTUnref<GrContext> grCtx; switch (type) { case kNVPR_GLContextType: // fallthru case kNative_GLContextType: glCtx.reset(SkCreatePlatformGLContext(forcedGpuAPI)); break; #ifdef SK_ANGLE case kANGLE_GLContextType: glCtx.reset(SkANGLEGLContext::Create(forcedGpuAPI)); break; #endif #ifdef SK_MESA case kMESA_GLContextType: glCtx.reset(SkMesaGLContext::Create(forcedGpuAPI)); break; #endif case kNull_GLContextType: glCtx.reset(SkNullGLContext::Create(forcedGpuAPI)); break; case kDebug_GLContextType: glCtx.reset(SkDebugGLContext::Create(forcedGpuAPI)); break; } if (NULL == glCtx.get()) { return NULL; } SkASSERT(glCtx->isValid()); // Block NVPR from non-NVPR types. SkAutoTUnref<const GrGLInterface> glInterface(SkRef(glCtx->gl())); if (kNVPR_GLContextType != type) { glInterface.reset(GrGLInterfaceRemoveNVPR(glInterface)); if (!glInterface) { return NULL; } } else { if (!glInterface->hasExtension("GL_NV_path_rendering")) { return NULL; } } glCtx->makeCurrent(); GrBackendContext p3dctx = reinterpret_cast<GrBackendContext>(glInterface.get()); #ifdef SK_VULKAN grCtx.reset(GrContext::Create(kVulkan_GrBackend, p3dctx, fGlobalOptions)); #else grCtx.reset(GrContext::Create(kOpenGL_GrBackend, p3dctx, fGlobalOptions)); #endif if (!grCtx.get()) { return NULL; } // Warn if path rendering support is not available for the NVPR type. if (kNVPR_GLContextType == type) { if (!grCtx->caps()->shaderCaps()->pathRenderingSupport()) { GrGpu* gpu = grCtx->getGpu(); const GrGLContext* ctx = gpu->glContextForTesting(); if (ctx) { const GrGLubyte* verUByte; GR_GL_CALL_RET(ctx->interface(), verUByte, GetString(GR_GL_VERSION)); const char* ver = reinterpret_cast<const char*>(verUByte); SkDebugf("\nWARNING: nvprmsaa config requested, but driver path rendering " "support not available. Maybe update the driver? Your driver version " "string: \"%s\"\n", ver); } else { SkDebugf("\nWARNING: nvprmsaa config requested, but driver path rendering " "support not available.\n"); } } } GPUContext& ctx = fContexts.push_back(); ctx.fGLContext = glCtx.get(); ctx.fGLContext->ref(); ctx.fGrContext = grCtx.get(); ctx.fGrContext->ref(); ctx.fType = type; return ctx.fGrContext; }
GrGLRenderer GrGLGetRenderer(const GrGLInterface* gl) { const GrGLubyte* v; GR_GL_CALL_RET(gl, v, GetString(GR_GL_RENDERER)); return GrGLGetRendererFromString((const char*) v); }
GrGLVendor GrGLGetVendor(const GrGLInterface* gl) { const GrGLubyte* v; GR_GL_CALL_RET(gl, v, GetString(GR_GL_VENDOR)); return GrGLGetVendorFromString((const char*) v); }
static GrGLuint compile_shader(const GrGLContext* ctx) { const char* version = GrGLGetGLSLVersionDecl(*ctx); // setup vertex shader GrGLShaderVar aPosition("a_position", kVec2f_GrSLType, GrShaderVar::kAttribute_TypeModifier); GrGLShaderVar aColor("a_color", kVec3f_GrSLType, GrShaderVar::kAttribute_TypeModifier); GrGLShaderVar oColor("o_color", kVec3f_GrSLType, GrShaderVar::kVaryingOut_TypeModifier); SkString vshaderTxt(version); aPosition.appendDecl(*ctx, &vshaderTxt); vshaderTxt.append(";\n"); aColor.appendDecl(*ctx, &vshaderTxt); vshaderTxt.append(";\n"); oColor.appendDecl(*ctx, &vshaderTxt); vshaderTxt.append(";\n"); vshaderTxt.append( "void main()\n" "{\n" "gl_Position = vec4(a_position, 0.f, 1.f);\n" "o_color = a_color;\n" "}\n"); const GrGLInterface* gl = ctx->interface(); GrGLuint vertexShader = load_shader(gl, vshaderTxt.c_str(), GR_GL_VERTEX_SHADER); // setup fragment shader GrGLShaderVar oFragColor("o_FragColor", kVec4f_GrSLType, GrShaderVar::kOut_TypeModifier); SkString fshaderTxt(version); GrGLAppendGLSLDefaultFloatPrecisionDeclaration(kDefault_GrSLPrecision, gl->fStandard, &fshaderTxt); oColor.setTypeModifier(GrShaderVar::kVaryingIn_TypeModifier); oColor.appendDecl(*ctx, &fshaderTxt); fshaderTxt.append(";\n"); const char* fsOutName; if (ctx->caps()->glslCaps()->mustDeclareFragmentShaderOutput()) { oFragColor.appendDecl(*ctx, &fshaderTxt); fshaderTxt.append(";\n"); fsOutName = oFragColor.c_str(); } else { fsOutName = "gl_FragColor"; } fshaderTxt.appendf( "void main()\n" "{\n" "%s = vec4(o_color, 1.0f);\n" "}\n", fsOutName); GrGLuint fragmentShader = load_shader(gl, fshaderTxt.c_str(), GR_GL_FRAGMENT_SHADER); GrGLint shaderProgram; GR_GL_CALL_RET(gl, shaderProgram, CreateProgram()); GR_GL_CALL(gl, AttachShader(shaderProgram, vertexShader)); GR_GL_CALL(gl, AttachShader(shaderProgram, fragmentShader)); GR_GL_CALL(gl, LinkProgram(shaderProgram)); // Check for linking errors GrGLint success; GrGLchar infoLog[512]; GR_GL_CALL(gl, GetProgramiv(shaderProgram, GR_GL_LINK_STATUS, &success)); if (!success) { GR_GL_CALL(gl, GetProgramInfoLog(shaderProgram, 512, NULL, infoLog)); SkDebugf("Linker Error: %s\n", infoLog); } GR_GL_CALL(gl, DeleteShader(vertexShader)); GR_GL_CALL(gl, DeleteShader(fragmentShader)); return shaderProgram; }
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(EmptySurfaceSemaphoreTest, reporter, ctxInfo) { GrContext* ctx = ctxInfo.grContext(); if (!ctx->caps()->fenceSyncSupport()) { return; } const SkImageInfo ii = SkImageInfo::Make(MAIN_W, MAIN_H, kRGBA_8888_SkColorType, kPremul_SkAlphaType); sk_sp<SkSurface> mainSurface(SkSurface::MakeRenderTarget(ctx, SkBudgeted::kNo, ii, 0, kTopLeft_GrSurfaceOrigin, nullptr)); // Flush surface once without semaphores to make sure there is no peneding IO for it. mainSurface->flush(); GrBackendSemaphore semaphore; GrSemaphoresSubmitted submitted = mainSurface->flushAndSignalSemaphores(1, &semaphore); REPORTER_ASSERT(reporter, GrSemaphoresSubmitted::kYes == submitted); if (kOpenGL_GrBackend == ctxInfo.backend()) { GrGLGpu* gpu = static_cast<GrGLGpu*>(ctx->contextPriv().getGpu()); const GrGLInterface* interface = gpu->glInterface(); GrGLsync sync = semaphore.glSync(); REPORTER_ASSERT(reporter, sync); bool result; GR_GL_CALL_RET(interface, result, IsSync(sync)); REPORTER_ASSERT(reporter, result); } #ifdef SK_VULKAN if (kVulkan_GrBackend == ctxInfo.backend()) { GrVkGpu* gpu = static_cast<GrVkGpu*>(ctx->contextPriv().getGpu()); const GrVkInterface* interface = gpu->vkInterface(); VkDevice device = gpu->device(); VkQueue queue = gpu->queue(); VkCommandPool cmdPool = gpu->cmdPool(); VkCommandBuffer cmdBuffer; // Create Command Buffer const VkCommandBufferAllocateInfo cmdInfo = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType nullptr, // pNext cmdPool, // commandPool VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level 1 // bufferCount }; VkResult err = GR_VK_CALL(interface, AllocateCommandBuffers(device, &cmdInfo, &cmdBuffer)); if (err) { return; } VkCommandBufferBeginInfo cmdBufferBeginInfo; memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo)); cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cmdBufferBeginInfo.pNext = nullptr; cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; cmdBufferBeginInfo.pInheritanceInfo = nullptr; GR_VK_CALL_ERRCHECK(interface, BeginCommandBuffer(cmdBuffer, &cmdBufferBeginInfo)); GR_VK_CALL_ERRCHECK(interface, EndCommandBuffer(cmdBuffer)); VkFenceCreateInfo fenceInfo; VkFence fence; memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo)); fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; err = GR_VK_CALL(interface, CreateFence(device, &fenceInfo, nullptr, &fence)); SkASSERT(!err); VkPipelineStageFlags waitStages = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT; VkSubmitInfo submitInfo; memset(&submitInfo, 0, sizeof(VkSubmitInfo)); submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submitInfo.pNext = nullptr; submitInfo.waitSemaphoreCount = 1; VkSemaphore vkSem = semaphore.vkSemaphore(); submitInfo.pWaitSemaphores = &vkSem; submitInfo.pWaitDstStageMask = &waitStages; submitInfo.commandBufferCount = 1; submitInfo.pCommandBuffers = &cmdBuffer; submitInfo.signalSemaphoreCount = 0; submitInfo.pSignalSemaphores = nullptr; GR_VK_CALL_ERRCHECK(interface, QueueSubmit(queue, 1, &submitInfo, fence)); err = GR_VK_CALL(interface, WaitForFences(device, 1, &fence, true, 3000000000)); REPORTER_ASSERT(reporter, err != VK_TIMEOUT); GR_VK_CALL(interface, DestroyFence(device, fence, nullptr)); GR_VK_CALL(interface, DestroySemaphore(device, vkSem, nullptr)); // If the above test fails the wait semaphore will never be signaled which can cause the // device to hang when tearing down (even if just tearing down GL). So we Fail here to // kill things. if (err == VK_TIMEOUT) { SK_ABORT("Waiting on semaphore indefinitely"); } } #endif }