void StreamBuffer::Shutdown() { switch(m_uploadtype) { case MAP_AND_SYNC: for(u32 i=0; i<SYNC_POINTS; i++) glDeleteSync(fences[i]); delete [] fences; break; case MAP_AND_RISK: case MAP_AND_ORPHAN: case BUFFERSUBDATA: case BUFFERDATA: break; case PINNED_MEMORY: for(u32 i=0; i<SYNC_POINTS; i++) glDeleteSync(fences[i]); delete [] fences; glBindBuffer(m_buffertype, 0); glFinish(); // ogl pipeline must be flushed, else this buffer can be in use FreeAlignedMemory(pointer); break; case STREAM_DETECT: case DETECT_MASK: // Just to shutup warnings break; } }
void StreamBuffer::DeleteFences() { for (int i = Slot(m_free_iterator) + 1; i < SYNC_POINTS; i++) { glDeleteSync(m_fences[i]); } for (int i = 0; i < Slot(m_iterator); i++) { glDeleteSync(m_fences[i]); } }
transfer* reserveTransfer() { transfer* Transfer = new transfer; if(ReadPixelBufferFree.empty()) { glGenBuffers(1, &Transfer->Buffer); glBindBuffer(GL_PIXEL_PACK_BUFFER, Transfer->Buffer); glBufferData(GL_PIXEL_PACK_BUFFER, 640 * 480 * 4, NULL, GL_DYNAMIC_DRAW); Transfer->Fence = nullptr; } else { Transfer = ReadPixelBufferFree.back(); if (Transfer->Fence) { glDeleteSync(Transfer->Fence); Transfer->Fence = nullptr; } ReadPixelBufferFree.pop(); } ReadPixelBufferLive.push(Transfer); return Transfer; }
static void Screenshot( const ScreenshotCommand &cmd ) { //NOTE: glGetSynciv never returns GL_SIGNALED on Mac // see also: chromium src/ui/gl/gl_fence.cc revision 213908 -> 213907 // https://src.chromium.org/viewvc/chrome/trunk/src/ui/gl/gl_fence.cc?r1=213908&r2=213907 // device details: Yosemite 10.10.3, Intel HD Graphics 5000 #if defined(XS_OS_MAC) GLint res = glClientWaitSync( cmd.sync, GL_SYNC_FLUSH_COMMANDS_BIT, GL_TIMEOUT_IGNORED ); SDL_assert( res != GL_TIMEOUT_EXPIRED ); #else GLint signalled = GL_UNSIGNALED; do { glGetSynciv( cmd.sync, GL_SYNC_STATUS, 1, nullptr, &signalled ); } while ( signalled != GL_SIGNALED ); #endif glDeleteSync( cmd.sync ); glBindBuffer( GL_PIXEL_PACK_BUFFER, cmd.pbo ); void *data = glMapBuffer( GL_PIXEL_PACK_BUFFER, GL_READ_ONLY ); console.Print( PrintLevel::Normal, "Writing screenshot %s (%ix%i)...\n", cmd.name, cmd.width, cmd.height ); //TODO: strip alpha? WritePNG( cmd.name, reinterpret_cast<uint8_t *>( data ), cmd.width, cmd.height, 4 ); glUnmapBuffer( GL_PIXEL_PACK_BUFFER ); }
void queryTranfer() { while(!ReadPixelBufferLive.empty()) { transfer* Transfer = ReadPixelBufferLive.front(); GLint Status = 0; GLsizei Length = 0; glGetSynciv(Transfer->Fence, GL_SYNC_STATUS, 4, &Length, &Status); if(Status == GL_SIGNALED) { glDeleteSync(Transfer->Fence); glBindBuffer(GL_PIXEL_PACK_BUFFER, Transfer->Stagging); void* Data = glMapBufferRange(GL_PIXEL_PACK_BUFFER, 0, 640 * 480 * 4, GL_MAP_READ_BIT); memcpy(&ReadPixelData[0], Data, 640 * 480 * 4); glUnmapBuffer(GL_PIXEL_PACK_BUFFER); ReadPixelBufferFree.push(Transfer); ReadPixelBufferLive.pop(); } else { break; } } }
void Sync() { uint32 segment_current = m_offset / m_seg_size; uint32 segment_next = (m_offset + m_size) / m_seg_size; if (segment_current != segment_next) { if (segment_next >= countof(m_fence)) { segment_next = 0; } // Align current transfer on the start of the segment m_offset = m_seg_size * segment_next; // protect the left segment m_fence[segment_current] = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); // Check next segment is free if (m_fence[segment_next]) { GLenum status = glClientWaitSync(m_fence[segment_next], GL_SYNC_FLUSH_COMMANDS_BIT, GL_TIMEOUT_IGNORED); // Potentially it doesn't work on AMD driver which might always return GL_CONDITION_SATISFIED if (status != GL_ALREADY_SIGNALED) { GL_PERF("GL_PIXEL_UNPACK_BUFFER: Sync Sync (%x)! Buffer too small ?", status); } glDeleteSync(m_fence[segment_next]); m_fence[segment_next] = 0; } } }
void NvSharedVBOGL::Finish() { // Wait on all fences and release them as they complete if (nullptr != m_fences) { for (uint32_t i = 0; i < m_numBuffers; ++i) { GLenum waitStatus = GL_UNSIGNALED; while (waitStatus != GL_ALREADY_SIGNALED && waitStatus != GL_CONDITION_SATISFIED) { waitStatus = glClientWaitSync(m_fences[i], GL_SYNC_FLUSH_COMMANDS_BIT, 1); } glDeleteSync(m_fences[i]); } delete[] m_fences; m_fences = nullptr; } if (nullptr != m_vboData) { glBindBuffer(GL_ARRAY_BUFFER, m_vbo); glUnmapBuffer(GL_ARRAY_BUFFER); } if (0 != m_vbo) { glDeleteBuffers(1, &m_vbo); } }
void piglit_init(int argc, char **argv) { bool pass = true; GLsync valid_sync; GLsync invalid_sync = (GLsync)20; if (piglit_get_gl_version() < 32) { piglit_require_extension("GL_ARB_sync"); } valid_sync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); /* test that valid parameters passed results in NO_ERROR */ glWaitSync(valid_sync, 0, GL_TIMEOUT_IGNORED); pass = piglit_check_gl_error(GL_NO_ERROR) && pass; /* test that invalid sync results in INVALID_VALUE */ glWaitSync(invalid_sync, 0, GL_TIMEOUT_IGNORED); pass = piglit_check_gl_error(GL_INVALID_VALUE) && pass; /* test that invalid flag value results in INVALID_VALUE */ glWaitSync(valid_sync, 3, GL_TIMEOUT_IGNORED); pass = piglit_check_gl_error(GL_INVALID_VALUE) && pass; glDeleteSync(valid_sync); piglit_report_result(pass ? PIGLIT_PASS : PIGLIT_FAIL); }
void StreamBuffer::AllocMemory(u32 size) { // insert waiting slots for used memory for (int i = Slot(m_used_iterator); i < Slot(m_iterator); i++) { m_fences[i] = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); } m_used_iterator = m_iterator; // wait for new slots to end of buffer for (int i = Slot(m_free_iterator) + 1; i <= Slot(m_iterator + size) && i < SYNC_POINTS; i++) { glClientWaitSync(m_fences[i], GL_SYNC_FLUSH_COMMANDS_BIT, GL_TIMEOUT_IGNORED); glDeleteSync(m_fences[i]); } // If we allocate a large amount of memory (A), commit a smaller amount, then allocate memory // smaller than allocation A, we will have already waited for these fences in A, but not used // the space. In this case, don't set m_free_iterator to a position before that which we know // is safe to use, which would result in waiting on the same fence(s) next time. if ((m_iterator + size) > m_free_iterator) m_free_iterator = m_iterator + size; // if buffer is full if (m_iterator + size >= m_size) { // insert waiting slots in unused space at the end of the buffer for (int i = Slot(m_used_iterator); i < SYNC_POINTS; i++) { m_fences[i] = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); } // move to the start m_used_iterator = m_iterator = 0; // offset 0 is always aligned // wait for space at the start for (int i = 0; i <= Slot(m_iterator + size); i++) { glClientWaitSync(m_fences[i], GL_SYNC_FLUSH_COMMANDS_BIT, GL_TIMEOUT_IGNORED); glDeleteSync(m_fences[i]); } m_free_iterator = m_iterator + size; } }
void GL::ComputeShader::dispatch(ivec3 group_count) { glDispatchCompute((GLuint)group_count.x, (GLuint)group_count.y, (GLuint)group_count.z); // Force sync GLsync sync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); //glWaitSync(sync, 0, GL_TIMEOUT_IGNORED); glDeleteSync(sync); }
bool end() { glDeleteSync(SyncName); glDeleteBuffers(buffer::MAX, &BufferName[0]); glDeleteProgram(ProgramName); glDeleteTextures(1, &TextureName); glDeleteVertexArrays(1, &VertexArrayName); return this->checkError("end"); }
bool end() { glDeleteSync(SyncName); glDeleteBuffers(buffer::MAX, &BufferName[0]); glDeleteProgram(ProgramName); glDeleteTextures(1, &TextureName); glDeleteVertexArrays(1, &VertexArrayName); return true; }
void StreamBuffer::AllocMemory(u32 size) { // insert waiting slots for used memory for (int i = Slot(m_used_iterator); i < Slot(m_iterator); i++) { if (!m_fences[i]) { m_fences[i] = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); } } m_used_iterator = m_iterator; u32 start_fence = Slot(m_free_iterator) + 1; // if buffer is full if (m_iterator + size >= m_size) { // insert waiting slots in unused space at the end of the buffer for (int i = Slot(m_used_iterator); i < SYNC_POINTS; i++) { if (!m_fences[i]) { glDeleteSync(m_fences[i]); } m_fences[i] = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); } // move to the start m_used_iterator = m_iterator = 0; // offset 0 is always aligned // wait for space at the start start_fence = 0; } u32 end_fence = std::min(Slot(m_iterator + size), SYNC_POINTS - 1); for (u32 i = start_fence; i <= end_fence; i++) { if (m_fences[i]) { glClientWaitSync(m_fences[i], GL_SYNC_FLUSH_COMMANDS_BIT, GL_TIMEOUT_IGNORED); glDeleteSync(m_fences[i]); m_fences[i] = 0; } } m_free_iterator = m_iterator + size; }
void Destroy() { m_map = NULL; m_offset = 0; for (size_t i = 0; i < countof(m_fence); i++) { glDeleteSync(m_fence[i]); } glDeleteBuffers(1, &m_buffer); }
void RenderThread::renderFrame() { auto windowSize = _window->geometry().size(); uvec2 readFboSize; uint32_t readFbo{ 0 }; if (_activeFrame) { const auto &frame = _activeFrame; _backend->recycle(); _backend->syncCache(); _gpuContext->enableStereo(frame->stereoState._enable); if (frame && !frame->batches.empty()) { _gpuContext->executeFrame(frame); } auto &glBackend = static_cast<gpu::gl::GLBackend&>(*_backend); readFbo = glBackend.getFramebufferID(frame->framebuffer); readFboSize = frame->framebuffer->getSize(); CHECK_GL_ERROR(); } else { hifi::qml::OffscreenSurface::TextureAndFence newTextureAndFence; if (_offscreen->fetchTexture(newTextureAndFence)) { if (_uiTexture != 0) { auto readFence = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); glFlush(); _offscreen->getDiscardLambda()(_uiTexture, readFence); _uiTexture = 0; } glWaitSync((GLsync)newTextureAndFence.second, 0, GL_TIMEOUT_IGNORED); glDeleteSync((GLsync)newTextureAndFence.second); _uiTexture = newTextureAndFence.first; glBindFramebuffer(GL_READ_FRAMEBUFFER, _uiFbo); glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, _uiTexture, 0); } if (_uiTexture != 0) { readFbo = _uiFbo; readFboSize = { windowSize.width(), windowSize.height() }; } } if (readFbo) { glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0); glBindFramebuffer(GL_READ_FRAMEBUFFER, readFbo); glBlitFramebuffer( 0, 0, readFboSize.x, readFboSize.y, 0, 0, windowSize.width(), windowSize.height(), GL_COLOR_BUFFER_BIT, GL_NEAREST); glBindFramebuffer(GL_READ_FRAMEBUFFER, 0); } else { glClearColor(1, 0, 0, 1); glClear(GL_COLOR_BUFFER_BIT); } _glContext.swapBuffers(); }
void GLSink::draw() { unsigned int uiBufferIdx; glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Update stream glBindTexture(GL_TEXTURE_2D, m_uiTexture); FrameData* pFrame = NULL; // block until new frame is available uiBufferIdx = m_pSyncBuffer->getBufferForReading((void*&)pFrame); m_pInputBuffer->bindBuffer(pFrame->uiBufferId); if (m_bUseP2P) { // This is a non-blocking call, but the GPU is instructed to wait until // the marker value is pFrame->uiTransferId before processing the subsequent // instructions m_pInputBuffer->waitMarker(pFrame->uiTransferId); } // Copy bus addressable buffer into texture object glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, m_uiTextureWidth, m_uiTextureHeight, m_nExtFormat, m_nType, NULL); // Insert fence to determine when the buffer was copied into the texture // and we can release the buffer GLsync Fence = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); // Draw quad with mapped texture glPushMatrix(); // Scale quad to the AR of the incoming texture glScalef(m_fAspectRatio, 1.0f, 1.0f); // Draw quad with mapped texture glBindBuffer(GL_ARRAY_BUFFER, m_uiQuad); glDrawArrays(GL_QUADS, 0, 4); glBindBuffer(GL_ARRAY_BUFFER, 0); glPopMatrix(); glBindTexture(GL_TEXTURE_2D, 0); // Wait until buffer is no longer needed and release it if (glIsSync(Fence)) { glClientWaitSync(Fence, GL_SYNC_FLUSH_COMMANDS_BIT, OneSecond); glDeleteSync(Fence); } m_pSyncBuffer->releaseReadBuffer(); }
void StreamBuffer::DeleteFences() { for (int i = 0; i < SYNC_POINTS; i++) { if (m_fences[i]) { glDeleteSync(m_fences[i]); } m_fences[i] = 0; } }
void piglit_init(int argc, char **argv) { GLsync sync; GLenum ret1, ret2; bool pass = true; piglit_require_extension("GL_ARB_sync"); glClear(GL_COLOR_BUFFER_BIT); sync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); ret1 = glClientWaitSync(sync, GL_SYNC_FLUSH_COMMANDS_BIT, 0); glFinish(); ret2 = glClientWaitSync(sync, 0, 0); glDeleteSync(sync); if (ret1 != GL_TIMEOUT_EXPIRED && ret1 != GL_ALREADY_SIGNALED) { fprintf(stderr, "On first wait:\n" " Expected GL_ALREADY_SIGNALED or GL_TIMEOUT_EXPIRED\n" " Got %s\n", piglit_get_gl_enum_name(ret1)); pass = false; } if (ret2 != GL_ALREADY_SIGNALED) { fprintf(stderr, "On repeated wait:\n" " Expected GL_ALREADY_SIGNALED\n" " Got %s\n", piglit_get_gl_enum_name(ret2)); pass = false; } glClear(GL_COLOR_BUFFER_BIT); sync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); glFinish(); ret1 = glClientWaitSync(sync, GL_SYNC_FLUSH_COMMANDS_BIT, 0); if (ret1 != GL_ALREADY_SIGNALED) { fprintf(stderr, "On wait after a finish:\n" " Expected GL_ALREADY_SIGNALED\n" " Got %s\n", piglit_get_gl_enum_name(ret1)); pass = false; } piglit_report_result(pass ? PIGLIT_PASS : PIGLIT_FAIL); }
void wined3d_event_query_issue(struct wined3d_event_query *query, IWineD3DDeviceImpl *device) { const struct wined3d_gl_info *gl_info; struct wined3d_context *context; if (query->context) { if (!query->context->gl_info->supported[ARB_SYNC] && query->context->tid != GetCurrentThreadId()) { #ifdef VBOX_WINE_WITH_SINGLE_CONTEXT ERR("unexpected\n"); #endif context_free_event_query(query); context = context_acquire(device, NULL, CTXUSAGE_RESOURCELOAD); context_alloc_event_query(context, query); } else { context = context_acquire(device, query->context->current_rt, CTXUSAGE_RESOURCELOAD); } } else { context = context_acquire(device, NULL, CTXUSAGE_RESOURCELOAD); context_alloc_event_query(context, query); } gl_info = context->gl_info; ENTER_GL(); if (gl_info->supported[ARB_SYNC]) { if (query->object.sync) GL_EXTCALL(glDeleteSync(query->object.sync)); checkGLcall("glDeleteSync"); query->object.sync = GL_EXTCALL(glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0)); checkGLcall("glFenceSync"); } else if (gl_info->supported[APPLE_FENCE]) { GL_EXTCALL(glSetFenceAPPLE(query->object.id)); checkGLcall("glSetFenceAPPLE"); } else if (gl_info->supported[NV_FENCE]) { GL_EXTCALL(glSetFenceNV(query->object.id, GL_ALL_COMPLETED_NV)); checkGLcall("glSetFenceNV"); } LEAVE_GL(); context_release(context); }
void Destroy() { for (size_t i = 0; i < countof(m_pool); i++) { m_map[i] = NULL; m_offset[i] = 0; glDeleteSync(m_fence[i]); // Don't know if we must do it glBindBuffer(GL_PIXEL_UNPACK_BUFFER, m_pool[i]); glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER); } glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); glDeleteBuffers(countof(m_pool), m_pool); }
WindowsBuffer::~WindowsBuffer() { glDeleteBuffers(getNumBuffers(), _bufferIDs); delete[] _bufferIDs; for (int i = 0; i < getNumBuffers(); i++) { if (_fences[i] != 0) { glDeleteSync(_fences[i]); } } delete [] _fences; }
/* ============ R_ShutdownRingbuffer ============ */ static void R_ShutdownRingbuffer( GLenum target, glRingbuffer_t *rb ) { int i; glUnmapBuffer( target ); rb->baseAddr = nullptr; for( i = 0; i < DYN_BUFFER_SEGMENTS; i++ ) { if( i == rb->activeSegment ) continue; glDeleteSync( rb->syncs[ i ] ); } }
void StreamBuffer::AllocMemory(u32 size) { // insert waiting slots for used memory for (int i = SLOT(m_used_iterator); i < SLOT(m_iterator); i++) { fences[i] = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); } m_used_iterator = m_iterator; // wait for new slots to end of buffer for (int i = SLOT(m_free_iterator) + 1; i <= SLOT(m_iterator + size) && i < SYNC_POINTS; i++) { glClientWaitSync(fences[i], GL_SYNC_FLUSH_COMMANDS_BIT, GL_TIMEOUT_IGNORED); glDeleteSync(fences[i]); } m_free_iterator = m_iterator + size; // if buffer is full if (m_iterator + size >= m_size) { // insert waiting slots in unused space at the end of the buffer for (int i = SLOT(m_used_iterator); i < SYNC_POINTS; i++) { fences[i] = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); } // move to the start m_used_iterator = m_iterator = 0; // offset 0 is always aligned // wait for space at the start for (int i = 0; i <= SLOT(m_iterator + size); i++) { glClientWaitSync(fences[i], GL_SYNC_FLUSH_COMMANDS_BIT, GL_TIMEOUT_IGNORED); glDeleteSync(fences[i]); } m_free_iterator = m_iterator + size; } }
bool NvSharedVBOGL::BeginUpdate() { // Next buffer in the cycle uint32_t nextIndex = (m_index + 1) % m_numBuffers; // Wait for the copy we're about to write... if (nullptr != m_fences[nextIndex]) { GLenum waitStatus = glClientWaitSync(m_fences[nextIndex], GL_SYNC_FLUSH_COMMANDS_BIT, 1); if (waitStatus == GL_TIMEOUT_EXPIRED) { #ifdef _DEBUG LOGI("Timed out waiting for NvSharedVBOGL sync!"); #endif return false; } else if (waitStatus == GL_WAIT_FAILED) { #ifdef _DEBUG LOGI("Failed waiting for NvSharedVBOGL sync!"); #endif return false; } // Successfully waited for the fence. Clear it and continue; glDeleteSync(m_fences[nextIndex]); m_fences[nextIndex] = nullptr; } #if !USE_PERSISTENT_MAPPING if (0 == m_vbo) { return false; } glBindBuffer(GL_ARRAY_BUFFER, m_vbo); GLbitfield flags = GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT | GL_MAP_FLUSH_EXPLICIT_BIT | GL_MAP_UNSYNCHRONIZED_BIT; GLvoid* pBuff = glMapBufferRange(GL_ARRAY_BUFFER, m_dataSize * nextIndex, m_dataSize, flags); m_vboData = static_cast<uint8_t*>(pBuff); if (nullptr == m_vboData) { CHECK_GL_ERROR(); return false; } #endif m_index = nextIndex; return true; }
void Destroy() { m_map = NULL; m_offset = 0; // Don't know if we must do it glBindBuffer(GL_PIXEL_UNPACK_BUFFER, m_buffer); glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); for (size_t i = 0; i < countof(m_fence); i++) { glDeleteSync(m_fence[i]); } glDeleteBuffers(1, &m_buffer); }
static void gl2_renderchain_fence_free(void *data, void *chain_data) { #ifdef HAVE_GL_SYNC unsigned i; gl2_renderchain_t *chain = (gl2_renderchain_t*)chain_data; for (i = 0; i < chain->fence_count; i++) { glClientWaitSync(chain->fences[i], GL_SYNC_FLUSH_COMMANDS_BIT, 1000000000); glDeleteSync(chain->fences[i]); } chain->fence_count = 0; #endif }
/* ============ R_RotateRingbuffer ============ */ static GLsizei R_RotateRingbuffer( glRingbuffer_t *rb ) { rb->syncs[ rb->activeSegment ] = glFenceSync( GL_SYNC_GPU_COMMANDS_COMPLETE, 0 ); rb->activeSegment++; if( rb->activeSegment >= DYN_BUFFER_SEGMENTS ) rb->activeSegment = 0; // wait until next segment is ready in 1 sec intervals while( glClientWaitSync( rb->syncs[ rb->activeSegment ], GL_SYNC_FLUSH_COMMANDS_BIT, 10000000 ) == GL_TIMEOUT_EXPIRED ) { ri.Printf( PRINT_WARNING, "long wait for GL buffer" ); }; glDeleteSync( rb->syncs[ rb->activeSegment ] ); return rb->activeSegment * rb->segmentElements; }
void wined3d_event_query_issue(struct wined3d_event_query *query, const struct wined3d_device *device) { const struct wined3d_gl_info *gl_info; struct wined3d_context *context; if (query->context) { if (!query->context->gl_info->supported[ARB_SYNC] && query->context->tid != GetCurrentThreadId()) { context_free_event_query(query); context = context_acquire(device, NULL); context_alloc_event_query(context, query); } else { context = context_acquire(device, query->context->current_rt); } } else { context = context_acquire(device, NULL); context_alloc_event_query(context, query); } gl_info = context->gl_info; if (gl_info->supported[ARB_SYNC]) { if (query->object.sync) GL_EXTCALL(glDeleteSync(query->object.sync)); checkGLcall("glDeleteSync"); query->object.sync = GL_EXTCALL(glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0)); checkGLcall("glFenceSync"); } else if (gl_info->supported[APPLE_FENCE]) { GL_EXTCALL(glSetFenceAPPLE(query->object.id)); checkGLcall("glSetFenceAPPLE"); } else if (gl_info->supported[NV_FENCE]) { GL_EXTCALL(glSetFenceNV(query->object.id, GL_ALL_COMPLETED_NV)); checkGLcall("glSetFenceNV"); } context_release(context); }
/** Prepare draw calls before scene rendering */ void DrawCalls::prepareDrawCalls(scene::ICameraSceneNode *camnode) { CPUParticleManager::getInstance()->reset(); TextBillboardDrawer::reset(); PROFILER_PUSH_CPU_MARKER("- culling", 0xFF, 0xFF, 0x0); SP::prepareDrawCalls(); parseSceneManager( irr_driver->getSceneManager()->getRootSceneNode()->getChildren(), camnode); SP::handleDynamicDrawCall(); SP::updateModelMatrix(); PROFILER_POP_CPU_MARKER(); PROFILER_PUSH_CPU_MARKER("- cpu particle generation", 0x2F, 0x1F, 0x11); CPUParticleManager::getInstance()->generateAll(); PROFILER_POP_CPU_MARKER(); // Add a 1 s timeout if (m_sync != 0) { PROFILER_PUSH_CPU_MARKER("- Sync Stall", 0xFF, 0x0, 0x0); GLenum reason = glClientWaitSync(m_sync, GL_SYNC_FLUSH_COMMANDS_BIT, 0); if (reason != GL_ALREADY_SIGNALED) { do { reason = glClientWaitSync(m_sync, GL_SYNC_FLUSH_COMMANDS_BIT, 1000000); } while (reason == GL_TIMEOUT_EXPIRED); } glDeleteSync(m_sync); m_sync = 0; PROFILER_POP_CPU_MARKER(); } PROFILER_PUSH_CPU_MARKER("- particle and text billboard upload", 0x3F, 0x03, 0x61); CPUParticleManager::getInstance()->uploadAll(); TextBillboardDrawer::updateAll(); PROFILER_POP_CPU_MARKER(); PROFILER_PUSH_CPU_MARKER("- SP::upload instance and skinning matrices", 0xFF, 0x0, 0xFF); SP::uploadAll(); PROFILER_POP_CPU_MARKER(); }
void GLSink::draw() { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Update stream glBindTexture(GL_TEXTURE_2D, m_uiTexture); FrameData* pFrame = NULL; // block until new frame is available m_uiBufferIdx = m_pInputBuffer->getBufferForReading((void*&)pFrame); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, m_pUnPackBuffer[m_uiBufferIdx]); glWaitMarkerAMD(m_pUnPackBuffer[m_uiBufferIdx], pFrame->uiTransferId); // Copy pinned mem to texture glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, m_uiTextureWidth, m_uiTextureHeight, m_nExtFormat, m_nType, NULL); // Insert fence to determine when we can release the buffer GLsync Fence = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); glPushMatrix(); // Scale quad to the AR of the incoming texture glScalef(m_fAspectRatio, 1.0f, 1.0f); // Draw quad with mapped texture glBindBuffer(GL_ARRAY_BUFFER, m_uiQuad); glDrawArrays(GL_QUADS, 0, 4); glBindBuffer(GL_ARRAY_BUFFER, 0); glPopMatrix(); glBindTexture(GL_TEXTURE_2D, 0); if (glIsSync(Fence)) { glClientWaitSync(Fence, GL_SYNC_FLUSH_COMMANDS_BIT, OneSecond); glDeleteSync(Fence); } m_pInputBuffer->releaseReadBuffer(); }