void StreamBuffer::Init() { m_iterator = 0; m_used_iterator = 0; m_free_iterator = 0; switch(m_uploadtype) { case MAP_AND_SYNC: fences = new GLsync[SYNC_POINTS]; for(u32 i=0; i<SYNC_POINTS; i++) fences[i] = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); case MAP_AND_ORPHAN: case BUFFERSUBDATA: glBindBuffer(m_buffertype, m_buffer); glBufferData(m_buffertype, m_size, NULL, GL_STREAM_DRAW); break; case PINNED_MEMORY: glGetError(); // errors before this allocation should be ignored fences = new GLsync[SYNC_POINTS]; for(u32 i=0; i<SYNC_POINTS; i++) fences[i] = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); pointer = (u8*)AllocateAlignedMemory(ROUND_UP(m_size,ALIGN_PINNED_MEMORY), ALIGN_PINNED_MEMORY ); glBindBuffer(GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD, m_buffer); glBufferData(GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD, ROUND_UP(m_size,ALIGN_PINNED_MEMORY), pointer, GL_STREAM_COPY); glBindBuffer(GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD, 0); glBindBuffer(m_buffertype, m_buffer); // on error, switch to another backend. some old catalyst seems to have broken pinned memory support if(glGetError() != GL_NO_ERROR) { ERROR_LOG(VIDEO, "Pinned memory detected, but not working. Please report this: %s, %s, %s", g_ogl_config.gl_vendor, g_ogl_config.gl_renderer, g_ogl_config.gl_version); Shutdown(); m_uploadtype = MAP_AND_SYNC; Init(); } break; case MAP_AND_RISK: glBindBuffer(m_buffertype, m_buffer); glBufferData(m_buffertype, m_size, NULL, GL_STREAM_DRAW); pointer = (u8*)glMapBufferRange(m_buffertype, 0, m_size, GL_MAP_WRITE_BIT); glUnmapBuffer(m_buffertype); if(!pointer) ERROR_LOG(VIDEO, "Buffer allocation failed"); break; case BUFFERDATA: glBindBuffer(m_buffertype, m_buffer); break; case STREAM_DETECT: case DETECT_MASK: // Just to shutup warnings break; } }
void piglit_init(int argc, char **argv) { GLsync sync; GLenum ret1, ret2; bool pass = true; piglit_require_extension("GL_ARB_sync"); glClear(GL_COLOR_BUFFER_BIT); sync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); ret1 = glClientWaitSync(sync, GL_SYNC_FLUSH_COMMANDS_BIT, 0); glFinish(); ret2 = glClientWaitSync(sync, 0, 0); glDeleteSync(sync); if (ret1 != GL_TIMEOUT_EXPIRED && ret1 != GL_ALREADY_SIGNALED) { fprintf(stderr, "On first wait:\n" " Expected GL_ALREADY_SIGNALED or GL_TIMEOUT_EXPIRED\n" " Got %s\n", piglit_get_gl_enum_name(ret1)); pass = false; } if (ret2 != GL_ALREADY_SIGNALED) { fprintf(stderr, "On repeated wait:\n" " Expected GL_ALREADY_SIGNALED\n" " Got %s\n", piglit_get_gl_enum_name(ret2)); pass = false; } glClear(GL_COLOR_BUFFER_BIT); sync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); glFinish(); ret1 = glClientWaitSync(sync, GL_SYNC_FLUSH_COMMANDS_BIT, 0); if (ret1 != GL_ALREADY_SIGNALED) { fprintf(stderr, "On wait after a finish:\n" " Expected GL_ALREADY_SIGNALED\n" " Got %s\n", piglit_get_gl_enum_name(ret1)); pass = false; } piglit_report_result(pass ? PIGLIT_PASS : PIGLIT_FAIL); }
void StreamBuffer::CreateFences() { for (int i = 0; i < SYNC_POINTS; i++) { m_fences[i] = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); } }
void Sync() { uint32 segment_current = m_offset / m_seg_size; uint32 segment_next = (m_offset + m_size) / m_seg_size; if (segment_current != segment_next) { if (segment_next >= countof(m_fence)) { segment_next = 0; } // Align current transfer on the start of the segment m_offset = m_seg_size * segment_next; // protect the left segment m_fence[segment_current] = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); // Check next segment is free if (m_fence[segment_next]) { GLenum status = glClientWaitSync(m_fence[segment_next], GL_SYNC_FLUSH_COMMANDS_BIT, GL_TIMEOUT_IGNORED); // Potentially it doesn't work on AMD driver which might always return GL_CONDITION_SATISFIED if (status != GL_ALREADY_SIGNALED) { GL_PERF("GL_PIXEL_UNPACK_BUFFER: Sync Sync (%x)! Buffer too small ?", status); } glDeleteSync(m_fence[segment_next]); m_fence[segment_next] = 0; } } }
inline GLsync create_fence_sync() { ARC_GL_CLEAR_ERRORS(); auto r = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE,0); ARC_GL_CHECK_FOR_ERRORS(); return r; }
void piglit_init(int argc, char **argv) { GLsync sync; GLenum ret1, ret2; piglit_require_extension("GL_ARB_sync"); glClear(GL_COLOR_BUFFER_BIT); sync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); ret1 = glClientWaitSync(sync, GL_SYNC_FLUSH_COMMANDS_BIT, ONE_SECOND); ret2 = glClientWaitSync(sync, 0, ONE_SECOND); if (ret1 == GL_TIMEOUT_EXPIRED) { printf("timeout expired on the first wait\n"); piglit_report_result(PIGLIT_SKIP); } if (ret2 != GL_ALREADY_SIGNALED) { fprintf(stderr, "Expected GL_ALREADY_SIGNALED on second wait, got %s", piglit_get_gl_enum_name(ret2)); piglit_report_result(PIGLIT_FAIL); } piglit_report_result(PIGLIT_PASS); }
void piglit_init(int argc, char **argv) { bool pass = true; GLsync valid_sync; GLsync invalid_sync = (GLsync)20; if (piglit_get_gl_version() < 32) { piglit_require_extension("GL_ARB_sync"); } valid_sync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); /* test that valid parameters passed results in NO_ERROR */ glWaitSync(valid_sync, 0, GL_TIMEOUT_IGNORED); pass = piglit_check_gl_error(GL_NO_ERROR) && pass; /* test that invalid sync results in INVALID_VALUE */ glWaitSync(invalid_sync, 0, GL_TIMEOUT_IGNORED); pass = piglit_check_gl_error(GL_INVALID_VALUE) && pass; /* test that invalid flag value results in INVALID_VALUE */ glWaitSync(valid_sync, 3, GL_TIMEOUT_IGNORED); pass = piglit_check_gl_error(GL_INVALID_VALUE) && pass; glDeleteSync(valid_sync); piglit_report_result(pass ? PIGLIT_PASS : PIGLIT_FAIL); }
enum piglit_result piglit_display(void) { GLsync fence; bool pass = true; glViewport(0, 0, piglit_width, piglit_height); glClearColor(0.2, 0.2, 0.2, 0.2); glClear(GL_COLOR_BUFFER_BIT); memcpy(map, red, sizeof(red)); glDrawArrays(GL_TRIANGLE_FAN, 0, 4); /* Wait for any previous rendering to finish before updating * the texture buffer */ fence = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); glClientWaitSync(fence, GL_SYNC_FLUSH_COMMANDS_BIT, GL_TIMEOUT_IGNORED); memcpy(map, green, sizeof(green)); glDrawArrays(GL_TRIANGLE_FAN, 4, 4); pass = piglit_probe_rect_rgba( 0, 0, piglit_width / 2, piglit_height, red) && pass; pass = piglit_probe_rect_rgba(piglit_width / 2, 0, piglit_width / 2, piglit_height, green) && pass; piglit_present_results(); return pass ? PIGLIT_PASS : PIGLIT_FAIL; }
void GLSync::init() { if (m_sync != 0) cleanup(); m_sync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); glFlush(); if (m_sync == 0) Log(EError, "Unable to create a memory sync object!"); }
GLsync renderer::fence() const { #if BUILD_OPENGL const auto s = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); return s; #else return 0; #endif }
void render() { draw(gps.dimx*gps.dimy*6); if (init.display.flag.has_flag(INIT_DISPLAY_FLAG_ARB_SYNC) && GL_ARB_sync) { assert(enabler.sync == NULL); enabler.sync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); } SDL_GL_SwapBuffers(); }
void StreamBuffer::AllocMemory(u32 size) { // insert waiting slots for used memory for (int i = Slot(m_used_iterator); i < Slot(m_iterator); i++) { m_fences[i] = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); } m_used_iterator = m_iterator; // wait for new slots to end of buffer for (int i = Slot(m_free_iterator) + 1; i <= Slot(m_iterator + size) && i < SYNC_POINTS; i++) { glClientWaitSync(m_fences[i], GL_SYNC_FLUSH_COMMANDS_BIT, GL_TIMEOUT_IGNORED); glDeleteSync(m_fences[i]); } // If we allocate a large amount of memory (A), commit a smaller amount, then allocate memory // smaller than allocation A, we will have already waited for these fences in A, but not used // the space. In this case, don't set m_free_iterator to a position before that which we know // is safe to use, which would result in waiting on the same fence(s) next time. if ((m_iterator + size) > m_free_iterator) m_free_iterator = m_iterator + size; // if buffer is full if (m_iterator + size >= m_size) { // insert waiting slots in unused space at the end of the buffer for (int i = Slot(m_used_iterator); i < SYNC_POINTS; i++) { m_fences[i] = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); } // move to the start m_used_iterator = m_iterator = 0; // offset 0 is always aligned // wait for space at the start for (int i = 0; i <= Slot(m_iterator + size); i++) { glClientWaitSync(m_fences[i], GL_SYNC_FLUSH_COMMANDS_BIT, GL_TIMEOUT_IGNORED); glDeleteSync(m_fences[i]); } m_free_iterator = m_iterator + size; } }
void GL::ComputeShader::dispatch(ivec3 group_count) { glDispatchCompute((GLuint)group_count.x, (GLuint)group_count.y, (GLuint)group_count.z); // Force sync GLsync sync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); //glWaitSync(sync, 0, GL_TIMEOUT_IGNORED); glDeleteSync(sync); }
void StreamBuffer::AllocMemory(u32 size) { // insert waiting slots for used memory for (int i = Slot(m_used_iterator); i < Slot(m_iterator); i++) { if (!m_fences[i]) { m_fences[i] = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); } } m_used_iterator = m_iterator; u32 start_fence = Slot(m_free_iterator) + 1; // if buffer is full if (m_iterator + size >= m_size) { // insert waiting slots in unused space at the end of the buffer for (int i = Slot(m_used_iterator); i < SYNC_POINTS; i++) { if (!m_fences[i]) { glDeleteSync(m_fences[i]); } m_fences[i] = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); } // move to the start m_used_iterator = m_iterator = 0; // offset 0 is always aligned // wait for space at the start start_fence = 0; } u32 end_fence = std::min(Slot(m_iterator + size), SYNC_POINTS - 1); for (u32 i = start_fence; i <= end_fence; i++) { if (m_fences[i]) { glClientWaitSync(m_fences[i], GL_SYNC_FLUSH_COMMANDS_BIT, GL_TIMEOUT_IGNORED); glDeleteSync(m_fences[i]); m_fences[i] = 0; } } m_free_iterator = m_iterator + size; }
void GLSink::draw() { unsigned int uiBufferIdx; glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Update stream glBindTexture(GL_TEXTURE_2D, m_uiTexture); FrameData* pFrame = NULL; // block until new frame is available uiBufferIdx = m_pSyncBuffer->getBufferForReading((void*&)pFrame); m_pInputBuffer->bindBuffer(pFrame->uiBufferId); if (m_bUseP2P) { // This is a non-blocking call, but the GPU is instructed to wait until // the marker value is pFrame->uiTransferId before processing the subsequent // instructions m_pInputBuffer->waitMarker(pFrame->uiTransferId); } // Copy bus addressable buffer into texture object glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, m_uiTextureWidth, m_uiTextureHeight, m_nExtFormat, m_nType, NULL); // Insert fence to determine when the buffer was copied into the texture // and we can release the buffer GLsync Fence = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); // Draw quad with mapped texture glPushMatrix(); // Scale quad to the AR of the incoming texture glScalef(m_fAspectRatio, 1.0f, 1.0f); // Draw quad with mapped texture glBindBuffer(GL_ARRAY_BUFFER, m_uiQuad); glDrawArrays(GL_QUADS, 0, 4); glBindBuffer(GL_ARRAY_BUFFER, 0); glPopMatrix(); glBindTexture(GL_TEXTURE_2D, 0); // Wait until buffer is no longer needed and release it if (glIsSync(Fence)) { glClientWaitSync(Fence, GL_SYNC_FLUSH_COMMANDS_BIT, OneSecond); glDeleteSync(Fence); } m_pSyncBuffer->releaseReadBuffer(); }
void RenderThread::renderFrame() { auto windowSize = _window->geometry().size(); uvec2 readFboSize; uint32_t readFbo{ 0 }; if (_activeFrame) { const auto &frame = _activeFrame; _backend->recycle(); _backend->syncCache(); _gpuContext->enableStereo(frame->stereoState._enable); if (frame && !frame->batches.empty()) { _gpuContext->executeFrame(frame); } auto &glBackend = static_cast<gpu::gl::GLBackend&>(*_backend); readFbo = glBackend.getFramebufferID(frame->framebuffer); readFboSize = frame->framebuffer->getSize(); CHECK_GL_ERROR(); } else { hifi::qml::OffscreenSurface::TextureAndFence newTextureAndFence; if (_offscreen->fetchTexture(newTextureAndFence)) { if (_uiTexture != 0) { auto readFence = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); glFlush(); _offscreen->getDiscardLambda()(_uiTexture, readFence); _uiTexture = 0; } glWaitSync((GLsync)newTextureAndFence.second, 0, GL_TIMEOUT_IGNORED); glDeleteSync((GLsync)newTextureAndFence.second); _uiTexture = newTextureAndFence.first; glBindFramebuffer(GL_READ_FRAMEBUFFER, _uiFbo); glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, _uiTexture, 0); } if (_uiTexture != 0) { readFbo = _uiFbo; readFboSize = { windowSize.width(), windowSize.height() }; } } if (readFbo) { glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0); glBindFramebuffer(GL_READ_FRAMEBUFFER, readFbo); glBlitFramebuffer( 0, 0, readFboSize.x, readFboSize.y, 0, 0, windowSize.width(), windowSize.height(), GL_COLOR_BUFFER_BIT, GL_NEAREST); glBindFramebuffer(GL_READ_FRAMEBUFFER, 0); } else { glClearColor(1, 0, 0, 1); glClear(GL_COLOR_BUFFER_BIT); } _glContext.swapBuffers(); }
static void opengl_fence(int command) { #ifdef USE_GLES #else if (command == FENCE_SET) { if (g_has_nv_fence) { //printf("...\n"); glSetFenceNV(g_fence, GL_ALL_COMPLETED_NV); CHECK_GL_ERROR_MSG("glSetFenceNV(g_fence, GL_ALL_COMPLETED_NV)"); } else if (g_has_apple_fence) { glSetFenceAPPLE(g_fence); CHECK_GL_ERROR_MSG("glSetFenceAPPLE(g_fence)"); } else if (g_has_arb_sync) { g_sync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); CHECK_GL_ERROR_MSG("glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0)"); } } else if (command == FENCE_WAIT) { if (g_has_nv_fence) { //printf("-- f --\n"); //glFinishFenceNV(g_fence); //int64_t t1 = fs_get_monotonic_time(); //fs_ml_usleep(10000); while (!glTestFenceNV(g_fence)) { CHECK_GL_ERROR_MSG("glTestFenceNV(g_fence)"); //printf("-> %lld\n", fs_get_monotonic_time() - t1); //printf("%d\n", glGetError()); fs_ml_usleep(1000); //printf("-> %lld\n", fs_get_monotonic_time() - t1); } CHECK_GL_ERROR_MSG("glTestFenceNV(g_fence)"); } else if (g_has_apple_fence) { while (!glTestFenceAPPLE(g_fence)) { CHECK_GL_ERROR_MSG("glTestFenceAPPLE(g_fence)"); fs_ml_usleep(1000); } CHECK_GL_ERROR_MSG("glTestFenceAPPLE(g_fence)"); } else if (g_has_arb_sync) { int flags = GL_SYNC_FLUSH_COMMANDS_BIT; while (glClientWaitSync(g_sync, flags, 0) == GL_TIMEOUT_EXPIRED) { CHECK_GL_ERROR_MSG("glClientWaitSync(g_sync, flags, 0)"); flags = 0; fs_ml_usleep(1000); } CHECK_GL_ERROR_MSG("glClientWaitSync(g_sync, flags, 0)"); } } #endif }
/// // Main rendering call for the scene // void renderScene(void) { glClearColor(0.0f, 0.0f, 1.0f, 1.0f); glClear(GL_COLOR_BUFFER_BIT); globalSync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE,0); computeTexture(); computeVBO(); //displayTexture(imWidth, imHeight); renderVBO(vbolen); glutSwapBuffers(); }
void wined3d_event_query_issue(struct wined3d_event_query *query, IWineD3DDeviceImpl *device) { const struct wined3d_gl_info *gl_info; struct wined3d_context *context; if (query->context) { if (!query->context->gl_info->supported[ARB_SYNC] && query->context->tid != GetCurrentThreadId()) { #ifdef VBOX_WINE_WITH_SINGLE_CONTEXT ERR("unexpected\n"); #endif context_free_event_query(query); context = context_acquire(device, NULL, CTXUSAGE_RESOURCELOAD); context_alloc_event_query(context, query); } else { context = context_acquire(device, query->context->current_rt, CTXUSAGE_RESOURCELOAD); } } else { context = context_acquire(device, NULL, CTXUSAGE_RESOURCELOAD); context_alloc_event_query(context, query); } gl_info = context->gl_info; ENTER_GL(); if (gl_info->supported[ARB_SYNC]) { if (query->object.sync) GL_EXTCALL(glDeleteSync(query->object.sync)); checkGLcall("glDeleteSync"); query->object.sync = GL_EXTCALL(glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0)); checkGLcall("glFenceSync"); } else if (gl_info->supported[APPLE_FENCE]) { GL_EXTCALL(glSetFenceAPPLE(query->object.id)); checkGLcall("glSetFenceAPPLE"); } else if (gl_info->supported[NV_FENCE]) { GL_EXTCALL(glSetFenceNV(query->object.id, GL_ALL_COMPLETED_NV)); checkGLcall("glSetFenceNV"); } LEAVE_GL(); context_release(context); }
void StreamBuffer::AllocMemory(u32 size) { // insert waiting slots for used memory for (int i = SLOT(m_used_iterator); i < SLOT(m_iterator); i++) { fences[i] = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); } m_used_iterator = m_iterator; // wait for new slots to end of buffer for (int i = SLOT(m_free_iterator) + 1; i <= SLOT(m_iterator + size) && i < SYNC_POINTS; i++) { glClientWaitSync(fences[i], GL_SYNC_FLUSH_COMMANDS_BIT, GL_TIMEOUT_IGNORED); glDeleteSync(fences[i]); } m_free_iterator = m_iterator + size; // if buffer is full if (m_iterator + size >= m_size) { // insert waiting slots in unused space at the end of the buffer for (int i = SLOT(m_used_iterator); i < SYNC_POINTS; i++) { fences[i] = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); } // move to the start m_used_iterator = m_iterator = 0; // offset 0 is always aligned // wait for space at the start for (int i = 0; i <= SLOT(m_iterator + size); i++) { glClientWaitSync(fences[i], GL_SYNC_FLUSH_COMMANDS_BIT, GL_TIMEOUT_IGNORED); glDeleteSync(fences[i]); } m_free_iterator = m_iterator + size; } }
void NvSharedVBOGL::EndUpdate() { #if !USE_PERSISTENT_MAPPING if (0 == m_vbo) { return; } glBindBuffer(GL_ARRAY_BUFFER, m_vbo); glFlushMappedBufferRange(GL_ARRAY_BUFFER, 0, m_dataSize); glUnmapBuffer(GL_ARRAY_BUFFER); #endif m_fences[m_index] = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); }
/* ============ R_RotateRingbuffer ============ */ static GLsizei R_RotateRingbuffer( glRingbuffer_t *rb ) { rb->syncs[ rb->activeSegment ] = glFenceSync( GL_SYNC_GPU_COMMANDS_COMPLETE, 0 ); rb->activeSegment++; if( rb->activeSegment >= DYN_BUFFER_SEGMENTS ) rb->activeSegment = 0; // wait until next segment is ready in 1 sec intervals while( glClientWaitSync( rb->syncs[ rb->activeSegment ], GL_SYNC_FLUSH_COMMANDS_BIT, 10000000 ) == GL_TIMEOUT_EXPIRED ) { ri.Printf( PRINT_WARNING, "long wait for GL buffer" ); }; glDeleteSync( rb->syncs[ rb->activeSegment ] ); return rb->activeSegment * rb->segmentElements; }
void wined3d_event_query_issue(struct wined3d_event_query *query, const struct wined3d_device *device) { const struct wined3d_gl_info *gl_info; struct wined3d_context *context; if (query->context) { if (!query->context->gl_info->supported[ARB_SYNC] && query->context->tid != GetCurrentThreadId()) { context_free_event_query(query); context = context_acquire(device, NULL); context_alloc_event_query(context, query); } else { context = context_acquire(device, query->context->current_rt); } } else { context = context_acquire(device, NULL); context_alloc_event_query(context, query); } gl_info = context->gl_info; if (gl_info->supported[ARB_SYNC]) { if (query->object.sync) GL_EXTCALL(glDeleteSync(query->object.sync)); checkGLcall("glDeleteSync"); query->object.sync = GL_EXTCALL(glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0)); checkGLcall("glFenceSync"); } else if (gl_info->supported[APPLE_FENCE]) { GL_EXTCALL(glSetFenceAPPLE(query->object.id)); checkGLcall("glSetFenceAPPLE"); } else if (gl_info->supported[NV_FENCE]) { GL_EXTCALL(glSetFenceNV(query->object.id, GL_ALL_COMPLETED_NV)); checkGLcall("glSetFenceNV"); } context_release(context); }
/* ============ R_InitRingbuffer ============ */ static void R_InitRingbuffer( GLenum target, GLsizei elementSize, GLsizei segmentElements, glRingbuffer_t *rb ) { GLsizei totalSize = elementSize * segmentElements * DYN_BUFFER_SEGMENTS; int i; glBufferStorage( target, totalSize, nullptr, GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT ); rb->baseAddr = glMapBufferRange( target, 0, totalSize, GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT | GL_MAP_FLUSH_EXPLICIT_BIT ); rb->elementSize = elementSize; rb->segmentElements = segmentElements; rb->activeSegment = 0; for( i = 1; i < DYN_BUFFER_SEGMENTS; i++ ) { rb->syncs[ i ] = glFenceSync( GL_SYNC_GPU_COMMANDS_COMPLETE, 0 ); } }
void GLSink::draw() { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Update stream glBindTexture(GL_TEXTURE_2D, m_uiTexture); FrameData* pFrame = NULL; // block until new frame is available m_uiBufferIdx = m_pInputBuffer->getBufferForReading((void*&)pFrame); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, m_pUnPackBuffer[m_uiBufferIdx]); glWaitMarkerAMD(m_pUnPackBuffer[m_uiBufferIdx], pFrame->uiTransferId); // Copy pinned mem to texture glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, m_uiTextureWidth, m_uiTextureHeight, m_nExtFormat, m_nType, NULL); // Insert fence to determine when we can release the buffer GLsync Fence = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); glPushMatrix(); // Scale quad to the AR of the incoming texture glScalef(m_fAspectRatio, 1.0f, 1.0f); // Draw quad with mapped texture glBindBuffer(GL_ARRAY_BUFFER, m_uiQuad); glDrawArrays(GL_QUADS, 0, 4); glBindBuffer(GL_ARRAY_BUFFER, 0); glPopMatrix(); glBindTexture(GL_TEXTURE_2D, 0); if (glIsSync(Fence)) { glClientWaitSync(Fence, GL_SYNC_FLUSH_COMMANDS_BIT, OneSecond); glDeleteSync(Fence); } m_pInputBuffer->releaseReadBuffer(); }
void EmitParticles ( ESContext *esContext, float deltaTime ) { UserData *userData = esContext->userData; GLuint srcVBO = userData->particleVBOs[ userData->curSrcIndex ]; GLuint dstVBO = userData->particleVBOs[ ( userData->curSrcIndex + 1 ) % 2 ]; glUseProgram ( userData->emitProgramObject ); SetupVertexAttributes ( esContext, srcVBO ); // Set transform feedback buffer glBindBuffer ( GL_TRANSFORM_FEEDBACK_BUFFER, dstVBO ); glBindBufferBase ( GL_TRANSFORM_FEEDBACK_BUFFER, 0, dstVBO ); // Turn off rasterization - we are not drawing glEnable ( GL_RASTERIZER_DISCARD ); // Set uniforms glUniform1f ( userData->emitTimeLoc, userData->time ); glUniform1f ( userData->emitEmissionRateLoc, EMISSION_RATE ); // Bind the 3D noise texture glActiveTexture ( GL_TEXTURE0 ); glBindTexture ( GL_TEXTURE_3D, userData->noiseTextureId ); glUniform1i ( userData->emitNoiseSamplerLoc, 0 ); // Emit particles using transform feedback glBeginTransformFeedback ( GL_POINTS ); glDrawArrays ( GL_POINTS, 0, NUM_PARTICLES ); glEndTransformFeedback(); // Create a sync object to ensure transform feedback results are completed before the draw that uses them. userData->emitSync = glFenceSync ( GL_SYNC_GPU_COMMANDS_COMPLETE, 0 ); // Restore state glDisable ( GL_RASTERIZER_DISCARD ); glUseProgram ( 0 ); glBindBufferBase ( GL_TRANSFORM_FEEDBACK_BUFFER, 0, 0 ); glBindBuffer ( GL_ARRAY_BUFFER, 0 ); glBindTexture ( GL_TEXTURE_3D, 0 ); // Ping pong the buffers userData->curSrcIndex = ( userData->curSrcIndex + 1 ) % 2; }
void NextPboWithSync() { m_fence[m_current_pbo] = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); NextPbo(); if (m_fence[m_current_pbo]) { #ifdef ENABLE_OGL_DEBUG_FENCE GLenum status = glClientWaitSync(m_fence[m_current_pbo], GL_SYNC_FLUSH_COMMANDS_BIT, GL_TIMEOUT_IGNORED); #else glClientWaitSync(m_fence[m_current_pbo], GL_SYNC_FLUSH_COMMANDS_BIT, GL_TIMEOUT_IGNORED); #endif glDeleteSync(m_fence[m_current_pbo]); m_fence[m_current_pbo] = 0; #ifdef ENABLE_OGL_DEBUG_FENCE if (status != GL_ALREADY_SIGNALED) { fprintf(stderr, "GL_PIXEL_UNPACK_BUFFER: Sync Sync! Buffer too small\n"); } #endif } }
enum piglit_result piglit_display(void) { GLsync fence; bool pass = true; int x0 = piglit_width / 4; int x1 = piglit_width * 3 / 4; int y0 = piglit_height / 4; int y1 = piglit_height * 3 / 4; int i; glViewport(0, 0, piglit_width, piglit_height); glClear(GL_COLOR_BUFFER_BIT); for (i = 0; i < NUM_SQUARES; i++) { /* Wait for any previous rendering to finish before * updating the UBOs */ fence = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); glClientWaitSync(fence, GL_SYNC_FLUSH_COMMANDS_BIT, GL_TIMEOUT_IGNORED); /* Load UBO data */ memcpy(ubos[0], pos_size[i], sizeof(pos_size[0])); memcpy(ubos[1], color[i], sizeof(color[0])); memcpy(ubos[2], &rotation[i], sizeof(rotation[0])); piglit_draw_rect(-1, -1, 2, 2); } pass = probe(x0, y0, 0) && pass; pass = probe(x1, y0, 1) && pass; pass = probe(x0, y1, 2) && pass; pass = probe(x1, y1, 3) && pass; piglit_present_results(); return pass ? PIGLIT_PASS : PIGLIT_FAIL; }
bool render() { glm::ivec2 WindowSize(this->getWindowSize()); { glBindBuffer(GL_UNIFORM_BUFFER, BufferName[buffer::TRANSFORM]); glm::mat4* Pointer = (glm::mat4*)glMapBufferRange( GL_UNIFORM_BUFFER, 0, sizeof(glm::mat4), GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_BUFFER_BIT); glm::mat4 Projection = glm::perspective(glm::pi<float>() * 0.25f, 4.0f / 3.0f, 0.1f, 100.0f); glm::mat4 Model = glm::mat4(1.0f); *Pointer = Projection * this->view() * Model; // Make sure the uniform buffer is uploaded glUnmapBuffer(GL_UNIFORM_BUFFER); } glViewport(0, 0, WindowSize.x, WindowSize.y); glClearBufferfv(GL_COLOR, 0, &glm::vec4(1.0f, 0.5f, 0.0f, 1.0f)[0]); SyncName = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); glUseProgram(ProgramName); glUniform1i(UniformDiffuse, 0); glUniformBlockBinding(ProgramName, UniformTransform, semantic::uniform::TRANSFORM0); glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, TextureName); glBindBufferBase(GL_UNIFORM_BUFFER, semantic::uniform::TRANSFORM0, BufferName[buffer::TRANSFORM]); glBindVertexArray(VertexArrayName); glDrawArraysInstanced(GL_TRIANGLES, 0, VertexCount, 1); GLint64 MaxTimeout(1000); GLenum Result = glClientWaitSync(SyncName, GL_SYNC_FLUSH_COMMANDS_BIT, (GLuint64)MaxTimeout); return true; }
static bool test_object_ptr_label() { GLsync sync; GLsizei length; GLchar label[TestLabelLen + 1]; bool pass = true; puts("Test ObjectPtrLabel"); /* basic check to see if glObjectPtrLabel/glGetObjectPtrLabel * set/get the label */ sync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); ObjectPtrLabel(sync, -1, TestLabel); GetObjectPtrLabel(sync, TestLabelLen + 1, &length, label); if (length != TestLabelLen || (strcmp(TestLabel, label) != 0)) { fprintf(stderr, "Label or length does not match\n"); printf(" actual label: %s actual length: %i\n", label, length); printf(" expected label: %s expected length: %i\n", TestLabel, TestLabelLen); pass = false; } glDeleteSync(sync); /* An INVALID_VALUE is generated if the <ptr> parameter of ObjectPtrLabel * is not the name of a sync object. */ ObjectPtrLabel(NULL, length, label); if (!piglit_check_gl_error(GL_INVALID_VALUE)) { fprintf(stderr, "GL_INVALID_VALUE should be generated when ObjectPtrLabel()" " ptr is not the name of a sync object\n"); pass = false; } return pass; }