// CAUTION: bind texture should in context thread only status_t SurfaceTexture::bindToAuxSlotLocked() { if (EGL_NO_IMAGE_KHR != mBackAuxSlot->eglSlot.mEglImage) { AuxSlot *tmp = mBackAuxSlot; mBackAuxSlot = mFrontAuxSlot; mFrontAuxSlot = tmp; glBindTexture(mTexTarget, mTexName); glEGLImageTargetTexture2DOES(mTexTarget, (GLeglImageOES)mFrontAuxSlot->eglSlot.mEglImage); // insert fence sync object just after new front texture applied EGLSyncKHR eglFence = mFrontAuxSlot->eglSlot.mEglFence; if (eglFence != EGL_NO_SYNC_KHR) { XLOGI("[%s] fence sync already exists in mFrontAuxSlot:%p, destoryed it", __func__, mFrontAuxSlot); eglDestroySyncKHR(mEglDisplay, eglFence); } eglFence = eglCreateSyncKHR(mEglDisplay, EGL_SYNC_FENCE_KHR, NULL); if (eglFence == EGL_NO_SYNC_KHR) { XLOGE("[%s] error creating fence: %#x", __func__, eglGetError()); } glFlush(); mFrontAuxSlot->eglSlot.mEglFence = eglFence; } mAuxSlotDirty = false; return NO_ERROR; }
void GraphicsContext3DInternal::destroyEGLSync(FBO* fbo) { if (fbo->sync() == EGL_NO_SYNC_KHR) return; eglClientWaitSyncKHR(m_dpy, fbo->sync(), 0, 0); eglDestroySyncKHR(m_dpy, fbo->sync()); fbo->setSync(EGL_NO_SYNC_KHR); }
status_t SurfaceTexture::doGLFenceWaitLocked() const { EGLDisplay dpy = eglGetCurrentDisplay(); EGLContext ctx = eglGetCurrentContext(); if (mEglDisplay != dpy || mEglDisplay == EGL_NO_DISPLAY) { ST_LOGE("doGLFenceWait: invalid current EGLDisplay"); return INVALID_OPERATION; } if (mEglContext != ctx || mEglContext == EGL_NO_CONTEXT) { ST_LOGE("doGLFenceWait: invalid current EGLContext"); return INVALID_OPERATION; } if (mCurrentFence != NULL) { if (useWaitSync) { // Create an EGLSyncKHR from the current fence. int fenceFd = mCurrentFence->dup(); if (fenceFd == -1) { ST_LOGE("doGLFenceWait: error dup'ing fence fd: %d", errno); return -errno; } EGLint attribs[] = { EGL_SYNC_NATIVE_FENCE_FD_ANDROID, fenceFd, EGL_NONE }; EGLSyncKHR sync = eglCreateSyncKHR(dpy, EGL_SYNC_NATIVE_FENCE_ANDROID, attribs); if (sync == EGL_NO_SYNC_KHR) { close(fenceFd); ST_LOGE("doGLFenceWait: error creating EGL fence: %#x", eglGetError()); return UNKNOWN_ERROR; } // XXX: The spec draft is inconsistent as to whether this should // return an EGLint or void. Ignore the return value for now, as // it's not strictly needed. eglWaitSyncANDROID(dpy, sync, 0); EGLint eglErr = eglGetError(); eglDestroySyncKHR(dpy, sync); if (eglErr != EGL_SUCCESS) { ST_LOGE("doGLFenceWait: error waiting for EGL fence: %#x", eglErr); return UNKNOWN_ERROR; } } else { status_t err = mCurrentFence->waitForever(1000, "SurfaceTexture::doGLFenceWaitLocked"); if (err != NO_ERROR) { ST_LOGE("doGLFenceWait: error waiting for fence: %d", err); return err; } } } return NO_ERROR; }
void TransferQueue::blitTileFromQueue(GLuint fboID, BaseTileTexture* destTex, GLuint srcTexId, GLenum srcTexTarget, int index) { #if GPU_UPLOAD_WITHOUT_DRAW glBindFramebuffer(GL_FRAMEBUFFER, fboID); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, srcTexId, 0); glBindTexture(GL_TEXTURE_2D, destTex->m_ownTextureId); glCopyTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, 0, 0, destTex->getSize().width(), destTex->getSize().height()); #else // Then set up the FBO and copy the SurfTex content in. glBindFramebuffer(GL_FRAMEBUFFER, fboID); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, destTex->m_ownTextureId, 0); setGLStateForCopy(destTex->getSize().width(), destTex->getSize().height()); GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER); if (status != GL_FRAMEBUFFER_COMPLETE) { XLOG("Error: glCheckFramebufferStatus failed"); glBindFramebuffer(GL_FRAMEBUFFER, 0); return; } // Use empty rect to set up the special matrix to draw. SkRect rect = SkRect::MakeEmpty(); TilesManager::instance()->shader()->drawQuad(rect, srcTexId, 1.0, srcTexTarget, GL_NEAREST); // To workaround a sync issue on some platforms, we should insert the sync // here while in the current FBO. // This will essentially kick off the GPU command buffer, and the Tex Gen // thread will then have to wait for this buffer to finish before writing // into the same memory. EGLDisplay dpy = eglGetCurrentDisplay(); if (m_currentDisplay != dpy) m_currentDisplay = dpy; if (m_currentDisplay != EGL_NO_DISPLAY) { if (m_transferQueue[index].m_syncKHR != EGL_NO_SYNC_KHR) eglDestroySyncKHR(m_currentDisplay, m_transferQueue[index].m_syncKHR); m_transferQueue[index].m_syncKHR = eglCreateSyncKHR(m_currentDisplay, EGL_SYNC_FENCE_KHR, 0); } GLUtils::checkEglError("CreateSyncKHR"); #endif }
void BufferQueue::freeBufferLocked(int i) { mSlots[i].mGraphicBuffer = 0; if (mSlots[i].mBufferState == BufferSlot::ACQUIRED) { mSlots[i].mNeedsCleanupOnRelease = true; } mSlots[i].mBufferState = BufferSlot::FREE; mSlots[i].mFrameNumber = 0; mSlots[i].mAcquireCalled = false; // destroy fence as BufferQueue now takes ownership if (mSlots[i].mFence != EGL_NO_SYNC_KHR) { eglDestroySyncKHR(mSlots[i].mEglDisplay, mSlots[i].mFence); mSlots[i].mFence = EGL_NO_SYNC_KHR; } }
void GraphicsContext3DInternal::releaseFrontBuffer() { LOGWEBGL("GraphicsContext3DInternal::releaseFrontBuffer()"); MutexLocker lock(m_fboMutex); FBO* fbo = m_frontFBO; if (fbo) { fbo->setLocked(false); if (fbo->sync() != EGL_NO_SYNC_KHR) { eglDestroySyncKHR(m_dpy, fbo->sync()); } fbo->setSync(); } updateFrontBuffer(); }
void BufferQueueCoreBF::freeBufferLocked(int slot) { BQ_LOGV("freeBufferLocked: slot %d", slot); mSlots[slot].mGraphicBuffer.clear(); if (mSlots[slot].mBufferState == BufferSlot::ACQUIRED) { mSlots[slot].mNeedsCleanupOnRelease = true; } mSlots[slot].mBufferState = BufferSlot::FREE; mSlots[slot].mFrameNumber = UINT32_MAX; mSlots[slot].mAcquireCalled = false; mSlots[slot].mFrameNumber = 0; // Destroy fence as BufferQueue now takes ownership if (mSlots[slot].mEglFence != EGL_NO_SYNC_KHR) { eglDestroySyncKHR(mSlots[slot].mEglDisplay, mSlots[slot].mEglFence); mSlots[slot].mEglFence = EGL_NO_SYNC_KHR; } mSlots[slot].mFence = Fence::NO_FENCE; }
status_t SurfaceTexture::freeAuxSlotLocked(AuxSlot &bs) { if (EGL_NO_IMAGE_KHR != bs.eglSlot.mEglImage || bs.slot.mGraphicBuffer != NULL) { XLOGI("[%s]", __func__); if (NO_ERROR == unregisterMva((mHalRegisterLoopMemoryObj_t *)bs.mMva)) { bs.mMva = NULL; } if (EGL_NO_SYNC_KHR != bs.eglSlot.mEglFence) { eglDestroySyncKHR(mEglDisplay, bs.eglSlot.mEglFence); bs.eglSlot.mEglFence = EGL_NO_SYNC_KHR; } bs.slot.mGraphicBuffer = NULL; eglDestroyImageKHR(mEglDisplay, bs.eglSlot.mEglImage); bs.eglSlot.mEglImage = EGL_NO_IMAGE_KHR; } return NO_ERROR; }
static int EGLFenceWait(EGLDisplay egl_display, int acquireFenceFd) { int ret = 0; EGLint attribs[] = {EGL_SYNC_NATIVE_FENCE_FD_ANDROID, acquireFenceFd, EGL_NONE}; EGLSyncKHR egl_sync = eglCreateSyncKHR(egl_display, EGL_SYNC_NATIVE_FENCE_ANDROID, attribs); if (egl_sync == EGL_NO_SYNC_KHR) { ALOGE("Failed to make EGLSyncKHR from acquireFenceFd: %s", GetEGLError()); close(acquireFenceFd); return 1; } EGLint success = eglWaitSyncKHR(egl_display, egl_sync, 0); if (success == EGL_FALSE) { ALOGE("Failed to wait for acquire: %s", GetEGLError()); ret = 1; } eglDestroySyncKHR(egl_display, egl_sync); return ret; }
void BufferQueueCore::clearBufferSlotLocked(int slot) { BQ_LOGV("clearBufferSlotLocked: slot %d", slot); mSlots[slot].mGraphicBuffer.clear(); mSlots[slot].mBufferState.reset(); mSlots[slot].mRequestBufferCalled = false; mSlots[slot].mFrameNumber = 0; mSlots[slot].mAcquireCalled = false; mSlots[slot].mNeedsReallocation = true; // Destroy fence as BufferQueue now takes ownership if (mSlots[slot].mEglFence != EGL_NO_SYNC_KHR) { eglDestroySyncKHR(mSlots[slot].mEglDisplay, mSlots[slot].mEglFence); mSlots[slot].mEglFence = EGL_NO_SYNC_KHR; } mSlots[slot].mFence = Fence::NO_FENCE; mSlots[slot].mEglDisplay = EGL_NO_DISPLAY; if (mLastQueuedSlot == slot) { mLastQueuedSlot = INVALID_BUFFER_SLOT; } }
status_t SurfaceTexture::syncForReleaseLocked(EGLDisplay dpy) { ST_LOGV("syncForReleaseLocked"); if (mUseFenceSync && mCurrentTexture != BufferQueue::INVALID_BUFFER_SLOT) { EGLSyncKHR fence = mEGLSlots[mCurrentTexture].mFence; if (fence != EGL_NO_SYNC_KHR) { // There is already a fence for the current slot. We need to wait // on that before replacing it with another fence to ensure that all // outstanding buffer accesses have completed before the producer // accesses it. EGLint result = eglClientWaitSyncKHR(dpy, fence, 0, 1000000000); if (result == EGL_FALSE) { ST_LOGE("syncForReleaseLocked: error waiting for previous " "fence: %#x", eglGetError()); return UNKNOWN_ERROR; } else if (result == EGL_TIMEOUT_EXPIRED_KHR) { ST_LOGE("syncForReleaseLocked: timeout waiting for previous " "fence"); return TIMED_OUT; } eglDestroySyncKHR(dpy, fence); } // Create a fence for the outstanding accesses in the current OpenGL ES // context. fence = eglCreateSyncKHR(dpy, EGL_SYNC_FENCE_KHR, NULL); if (fence == EGL_NO_SYNC_KHR) { ST_LOGE("syncForReleaseLocked: error creating fence: %#x", eglGetError()); return UNKNOWN_ERROR; } glFlush(); mEGLSlots[mCurrentTexture].mFence = fence; } return OK; }
status_t BufferQueue::dequeueBuffer(int *outBuf, uint32_t w, uint32_t h, uint32_t format, uint32_t usage) { ATRACE_CALL(); ST_LOGV("dequeueBuffer: w=%d h=%d fmt=%#x usage=%#x", w, h, format, usage); if ((w && !h) || (!w && h)) { ST_LOGE("dequeueBuffer: invalid size: w=%u, h=%u", w, h); return BAD_VALUE; } status_t returnFlags(OK); EGLDisplay dpy = EGL_NO_DISPLAY; EGLSyncKHR fence = EGL_NO_SYNC_KHR; { // Scope for the lock Mutex::Autolock lock(mMutex); if (format == 0) { format = mDefaultBufferFormat; } // turn on usage bits the consumer requested usage |= mConsumerUsageBits; int found = -1; int foundSync = -1; int dequeuedCount = 0; bool tryAgain = true; while (tryAgain) { if (mAbandoned) { ST_LOGE("dequeueBuffer: SurfaceTexture has been abandoned!"); return NO_INIT; } // We need to wait for the FIFO to drain if the number of buffer // needs to change. // // The condition "number of buffers needs to change" is true if // - the client doesn't care about how many buffers there are // - AND the actual number of buffer is different from what was // set in the last setBufferCountServer() // - OR - // setBufferCountServer() was set to a value incompatible with // the synchronization mode (for instance because the sync mode // changed since) // // As long as this condition is true AND the FIFO is not empty, we // wait on mDequeueCondition. const int minBufferCountNeeded = mSynchronousMode ? mMinSyncBufferSlots : mMinAsyncBufferSlots; const bool numberOfBuffersNeedsToChange = !mClientBufferCount && ((mServerBufferCount != mBufferCount) || (mServerBufferCount < minBufferCountNeeded)); if (!mQueue.isEmpty() && numberOfBuffersNeedsToChange) { // wait for the FIFO to drain mDequeueCondition.wait(mMutex); // NOTE: we continue here because we need to reevaluate our // whole state (eg: we could be abandoned or disconnected) continue; } if (numberOfBuffersNeedsToChange) { // here we're guaranteed that mQueue is empty freeAllBuffersLocked(); mBufferCount = mServerBufferCount; if (mBufferCount < minBufferCountNeeded) mBufferCount = minBufferCountNeeded; mBufferHasBeenQueued = false; returnFlags |= ISurfaceTexture::RELEASE_ALL_BUFFERS; } // look for a free buffer to give to the client found = INVALID_BUFFER_SLOT; foundSync = INVALID_BUFFER_SLOT; dequeuedCount = 0; for (int i = 0; i < mBufferCount; i++) { const int state = mSlots[i].mBufferState; if (state == BufferSlot::DEQUEUED) { dequeuedCount++; } // this logic used to be if (FLAG_ALLOW_DEQUEUE_CURRENT_BUFFER) // but dequeuing the current buffer is disabled. if (false) { // This functionality has been temporarily removed so // BufferQueue and SurfaceTexture can be refactored into // separate objects } else { if (state == BufferSlot::FREE) { /* We return the oldest of the free buffers to avoid * stalling the producer if possible. This is because * the consumer may still have pending reads of the * buffers in flight. */ bool isOlder = mSlots[i].mFrameNumber < mSlots[found].mFrameNumber; if (found < 0 || isOlder) { foundSync = i; found = i; } } } } // clients are not allowed to dequeue more than one buffer // if they didn't set a buffer count. if (!mClientBufferCount && dequeuedCount) { ST_LOGE("dequeueBuffer: can't dequeue multiple buffers without " "setting the buffer count"); return -EINVAL; } // See whether a buffer has been queued since the last // setBufferCount so we know whether to perform the // mMinUndequeuedBuffers check below. if (mBufferHasBeenQueued) { // make sure the client is not trying to dequeue more buffers // than allowed. const int avail = mBufferCount - (dequeuedCount+1); if (avail < (mMinUndequeuedBuffers-int(mSynchronousMode))) { ST_LOGE("dequeueBuffer: mMinUndequeuedBuffers=%d exceeded " "(dequeued=%d)", mMinUndequeuedBuffers-int(mSynchronousMode), dequeuedCount); return -EBUSY; } } // if no buffer is found, wait for a buffer to be released tryAgain = found == INVALID_BUFFER_SLOT; if (tryAgain) { mDequeueCondition.wait(mMutex); } } if (found == INVALID_BUFFER_SLOT) { // This should not happen. ST_LOGE("dequeueBuffer: no available buffer slots"); return -EBUSY; } const int buf = found; *outBuf = found; ATRACE_BUFFER_INDEX(buf); const bool useDefaultSize = !w && !h; if (useDefaultSize) { // use the default size w = mDefaultWidth; h = mDefaultHeight; } const bool updateFormat = (format != 0); if (!updateFormat) { // keep the current (or default) format format = mPixelFormat; } // buffer is now in DEQUEUED (but can also be current at the same time, // if we're in synchronous mode) mSlots[buf].mBufferState = BufferSlot::DEQUEUED; const sp<GraphicBuffer>& buffer(mSlots[buf].mGraphicBuffer); if ((buffer == NULL) || (uint32_t(buffer->width) != w) || (uint32_t(buffer->height) != h) || (uint32_t(buffer->format) != format) || ((uint32_t(buffer->usage) & usage) != usage)) { status_t error; sp<GraphicBuffer> graphicBuffer( mGraphicBufferAlloc->createGraphicBuffer( w, h, format, usage, &error)); if (graphicBuffer == 0) { ST_LOGE("dequeueBuffer: SurfaceComposer::createGraphicBuffer " "failed"); return error; } if (updateFormat) { mPixelFormat = format; } mSlots[buf].mAcquireCalled = false; mSlots[buf].mGraphicBuffer = graphicBuffer; mSlots[buf].mRequestBufferCalled = false; mSlots[buf].mFence = EGL_NO_SYNC_KHR; mSlots[buf].mEglDisplay = EGL_NO_DISPLAY; returnFlags |= ISurfaceTexture::BUFFER_NEEDS_REALLOCATION; } dpy = mSlots[buf].mEglDisplay; fence = mSlots[buf].mFence; mSlots[buf].mFence = EGL_NO_SYNC_KHR; } // end lock scope if (fence != EGL_NO_SYNC_KHR) { EGLint result = eglClientWaitSyncKHR(dpy, fence, 0, 1000000000); // If something goes wrong, log the error, but return the buffer without // synchronizing access to it. It's too late at this point to abort the // dequeue operation. if (result == EGL_FALSE) { ST_LOGE("dequeueBuffer: error waiting for fence: %#x", eglGetError()); } else if (result == EGL_TIMEOUT_EXPIRED_KHR) { ST_LOGE("dequeueBuffer: timeout waiting for fence"); } eglDestroySyncKHR(dpy, fence); } ST_LOGV("dequeueBuffer: returning slot=%d buf=%p flags=%#x", *outBuf, mSlots[*outBuf].mGraphicBuffer->handle, returnFlags); return returnFlags; }
status_t SurfaceTexture::dequeueBuffer(int *outBuf, uint32_t w, uint32_t h, uint32_t format, uint32_t usage) { ST_LOGV("dequeueBuffer: w=%d h=%d fmt=%#x usage=%#x", w, h, format, usage); if ((w && !h) || (!w && h)) { ST_LOGE("dequeueBuffer: invalid size: w=%u, h=%u", w, h); return BAD_VALUE; } status_t returnFlags(OK); EGLDisplay dpy = EGL_NO_DISPLAY; EGLSyncKHR fence = EGL_NO_SYNC_KHR; { // Scope for the lock Mutex::Autolock lock(mMutex); int found = -1; int foundSync = -1; int dequeuedCount = 0; bool tryAgain = true; #ifdef MISSING_GRALLOC_BUFFERS int dequeueRetries = 5; #endif while (tryAgain) { if (mAbandoned) { ST_LOGE("dequeueBuffer: SurfaceTexture has been abandoned!"); return NO_INIT; } // We need to wait for the FIFO to drain if the number of buffer // needs to change. // // The condition "number of buffers needs to change" is true if // - the client doesn't care about how many buffers there are // - AND the actual number of buffer is different from what was // set in the last setBufferCountServer() // - OR - // setBufferCountServer() was set to a value incompatible with // the synchronization mode (for instance because the sync mode // changed since) // // As long as this condition is true AND the FIFO is not empty, we // wait on mDequeueCondition. const int minBufferCountNeeded = mSynchronousMode ? MIN_SYNC_BUFFER_SLOTS : MIN_ASYNC_BUFFER_SLOTS; const bool numberOfBuffersNeedsToChange = !mClientBufferCount && ((mServerBufferCount != mBufferCount) || (mServerBufferCount < minBufferCountNeeded)); if (!mQueue.isEmpty() && numberOfBuffersNeedsToChange) { // wait for the FIFO to drain mDequeueCondition.wait(mMutex); // NOTE: we continue here because we need to reevaluate our // whole state (eg: we could be abandoned or disconnected) continue; } if (numberOfBuffersNeedsToChange) { // here we're guaranteed that mQueue is empty freeAllBuffersLocked(); mBufferCount = mServerBufferCount; if (mBufferCount < minBufferCountNeeded) mBufferCount = minBufferCountNeeded; mCurrentTexture = INVALID_BUFFER_SLOT; returnFlags |= ISurfaceTexture::RELEASE_ALL_BUFFERS; } // look for a free buffer to give to the client found = INVALID_BUFFER_SLOT; foundSync = INVALID_BUFFER_SLOT; dequeuedCount = 0; for (int i = 0; i < mBufferCount; i++) { const int state = mSlots[i].mBufferState; if (state == BufferSlot::DEQUEUED) { dequeuedCount++; } // if buffer is FREE it CANNOT be current LOGW_IF((state == BufferSlot::FREE) && (mCurrentTexture==i), "dequeueBuffer: buffer %d is both FREE and current!", i); if (FLAG_ALLOW_DEQUEUE_CURRENT_BUFFER) { if (state == BufferSlot::FREE || i == mCurrentTexture) { foundSync = i; if (i != mCurrentTexture) { found = i; break; } } } else { if (state == BufferSlot::FREE) { /* We return the oldest of the free buffers to avoid * stalling the producer if possible. This is because * the consumer may still have pending reads of the * buffers in flight. */ bool isOlder = mSlots[i].mFrameNumber < mSlots[found].mFrameNumber; if (found < 0 || isOlder) { foundSync = i; found = i; } } } } // clients are not allowed to dequeue more than one buffer // if they didn't set a buffer count. if (!mClientBufferCount && dequeuedCount) { #ifdef MISSING_GRALLOC_BUFFERS if (--dequeueRetries) { LOGD("SurfaceTexture::dequeue: Not allowed to dequeue more " "than a buffer SLEEPING\n"); usleep(10000); } else { mClientBufferCount = mServerBufferCount; LOGD("SurfaceTexture::dequeue: Not allowed to dequeue more " "than a buffer RETRY mBufferCount:%d mServerBufferCount:%d\n", mBufferCount, mServerBufferCount); } continue; #else ST_LOGE("dequeueBuffer: can't dequeue multiple buffers without " "setting the buffer count"); #endif return -EINVAL; } // See whether a buffer has been queued since the last // setBufferCount so we know whether to perform the // MIN_UNDEQUEUED_BUFFERS check below. bool bufferHasBeenQueued = mCurrentTexture != INVALID_BUFFER_SLOT; if (bufferHasBeenQueued) { // make sure the client is not trying to dequeue more buffers // than allowed. const int avail = mBufferCount - (dequeuedCount+1); if (avail < (MIN_UNDEQUEUED_BUFFERS-int(mSynchronousMode))) { #ifdef MISSING_GRALLOC_BUFFERS if (mClientBufferCount != 0) { mBufferCount++; mClientBufferCount = mServerBufferCount = mBufferCount; LOGD("SurfaceTexture::dequeuebuffer: MIN EXCEEDED " "mBuffer:%d bumped\n", mBufferCount); continue; } #endif ST_LOGE("dequeueBuffer: MIN_UNDEQUEUED_BUFFERS=%d exceeded " "(dequeued=%d)", MIN_UNDEQUEUED_BUFFERS-int(mSynchronousMode), dequeuedCount); return -EBUSY; } } // we're in synchronous mode and didn't find a buffer, we need to // wait for some buffers to be consumed tryAgain = mSynchronousMode && (foundSync == INVALID_BUFFER_SLOT); if (tryAgain) { mDequeueCondition.wait(mMutex); } } if (mSynchronousMode && found == INVALID_BUFFER_SLOT) { // foundSync guaranteed to be != INVALID_BUFFER_SLOT found = foundSync; } if (found == INVALID_BUFFER_SLOT) { // This should not happen. ST_LOGE("dequeueBuffer: no available buffer slots"); return -EBUSY; } const int buf = found; *outBuf = found; const bool useDefaultSize = !w && !h; if (useDefaultSize) { // use the default size w = mDefaultWidth; h = mDefaultHeight; } const bool updateFormat = (format != 0); if (!updateFormat) { // keep the current (or default) format format = mPixelFormat; } // buffer is now in DEQUEUED (but can also be current at the same time, // if we're in synchronous mode) mSlots[buf].mBufferState = BufferSlot::DEQUEUED; const sp<GraphicBuffer>& buffer(mSlots[buf].mGraphicBuffer); #ifdef QCOM_HARDWARE qBufGeometry currentGeometry; if (buffer != NULL) currentGeometry.set(buffer->width, buffer->height, buffer->format); else currentGeometry.set(0, 0, 0); qBufGeometry requiredGeometry; requiredGeometry.set(w, h, format); qBufGeometry updatedGeometry; updatedGeometry.set(mNextBufferInfo.width, mNextBufferInfo.height, mNextBufferInfo.format); #endif if ((buffer == NULL) || #ifdef QCOM_HARDWARE needNewBuffer(currentGeometry, requiredGeometry, updatedGeometry) || #else (uint32_t(buffer->width) != w) || (uint32_t(buffer->height) != h) || (uint32_t(buffer->format) != format) || #endif ((uint32_t(buffer->usage) & usage) != usage)) { #ifdef QCOM_HARDWARE if (buffer != NULL) { mGraphicBufferAlloc->freeGraphicBufferAtIndex(buf); } #endif usage |= GraphicBuffer::USAGE_HW_TEXTURE; status_t error; sp<GraphicBuffer> graphicBuffer( mGraphicBufferAlloc->createGraphicBuffer( w, h, format, usage, &error)); if (graphicBuffer == 0) { ST_LOGE("dequeueBuffer: SurfaceComposer::createGraphicBuffer " "failed"); return error; } if (updateFormat) { mPixelFormat = format; } #ifdef QCOM_HARDWARE checkBuffer((native_handle_t *)graphicBuffer->handle, mReqSize, usage); #endif mSlots[buf].mGraphicBuffer = graphicBuffer; mSlots[buf].mRequestBufferCalled = false; mSlots[buf].mFence = EGL_NO_SYNC_KHR; if (mSlots[buf].mEglImage != EGL_NO_IMAGE_KHR) { eglDestroyImageKHR(mSlots[buf].mEglDisplay, mSlots[buf].mEglImage); mSlots[buf].mEglImage = EGL_NO_IMAGE_KHR; mSlots[buf].mEglDisplay = EGL_NO_DISPLAY; } if (mCurrentTexture == buf) { // The current texture no longer references the buffer in this slot // since we just allocated a new buffer. mCurrentTexture = INVALID_BUFFER_SLOT; } returnFlags |= ISurfaceTexture::BUFFER_NEEDS_REALLOCATION; } dpy = mSlots[buf].mEglDisplay; fence = mSlots[buf].mFence; mSlots[buf].mFence = EGL_NO_SYNC_KHR; } if (fence != EGL_NO_SYNC_KHR) { EGLint result = eglClientWaitSyncKHR(dpy, fence, 0, 1000000000); // If something goes wrong, log the error, but return the buffer without // synchronizing access to it. It's too late at this point to abort the // dequeue operation. if (result == EGL_FALSE) { LOGE("dequeueBuffer: error waiting for fence: %#x", eglGetError()); } else if (result == EGL_TIMEOUT_EXPIRED_KHR) { LOGE("dequeueBuffer: timeout waiting for fence"); } eglDestroySyncKHR(dpy, fence); } ST_LOGV("dequeueBuffer: returning slot=%d buf=%p flags=%#x", *outBuf, mSlots[*outBuf].mGraphicBuffer->handle, returnFlags); return returnFlags; }
void ContextSwitchRenderer::drawWorkload() { SCOPED_TRACE(); if (mWorkload > 8) { return; // This test does not support higher workloads. } // Set the background clear color to black. glClearColor(0.0f, 0.0f, 0.0f, 0.0f); glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT); // No culling of back faces glDisable(GL_CULL_FACE); // No depth testing glDisable(GL_DEPTH_TEST); EGLSyncKHR fence = eglCreateSyncKHR(mEglDisplay, EGL_SYNC_FENCE_KHR, NULL); const int TOTAL_NUM_CONTEXTS = NUM_WORKER_CONTEXTS + 1; const float TRANSLATION = 0.9f - (TOTAL_NUM_CONTEXTS * 0.2f); for (int i = 0; i < TOTAL_NUM_CONTEXTS; i++) { eglWaitSyncKHR(mEglDisplay, fence, 0); eglDestroySyncKHR(mEglDisplay, fence); glUseProgram(mProgramId); // Set the texture. glActiveTexture (GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, mTextureId); glUniform1i(mTextureUniformHandle, 0); // Set the x translate. glUniform1f(mTranslateUniformHandle, (i * 0.2f) + TRANSLATION); glEnableVertexAttribArray(mPositionHandle); glEnableVertexAttribArray(mTexCoordHandle); glVertexAttribPointer(mPositionHandle, 3, GL_FLOAT, false, 0, CS_VERTICES); glVertexAttribPointer(mTexCoordHandle, 2, GL_FLOAT, false, 0, CS_TEX_COORDS); glDrawArrays(GL_TRIANGLES, 0, CS_NUM_VERTICES); fence = eglCreateSyncKHR(mEglDisplay, EGL_SYNC_FENCE_KHR, NULL); // Switch to next context. if (i < (mWorkload - 1)) { eglMakeCurrent(mEglDisplay, mEglSurface, mEglSurface, mContexts[i]); // Switch to FBO and re-attach. if (mOffscreen) { glBindFramebuffer(GL_FRAMEBUFFER, mFboIds[i]); glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, mFboDepthId); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, mFboTexId, 0); glViewport(0, 0, mFboWidth, mFboHeight); } } GLuint err = glGetError(); if (err != GL_NO_ERROR) { ALOGE("GLError %d in drawWorkload", err); break; } } eglWaitSyncKHR(mEglDisplay, fence, 0); eglDestroySyncKHR(mEglDisplay, fence); // Switch back to the main context. eglMakeCurrent(mEglDisplay, mEglSurface, mEglSurface, mEglContext); if (mOffscreen) { glBindFramebuffer(GL_FRAMEBUFFER, mFboId); glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, mFboDepthId); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, mFboTexId, 0); glViewport(0, 0, mFboWidth, mFboHeight); } }
// conversion function should format by format, chip by chip // currently input is MTK_I420, and output is IMG_YV12/ABGR status_t SurfaceTexture::convertToAuxSlotLocked(bool isForce) { // check invalid buffer if (BufferQueue::INVALID_BUFFER_SLOT == mCurrentTexture) { mAuxSlotConvert = false; return INVALID_OPERATION; } ATRACE_CALL(); // 1) normal BufferQueue needs conversion now // 2) SurfaceTextureLayer neesd conversion after HWC bool isNeedConversionNow = (BufferQueue::TYPE_BufferQueue == mBufferQueue->getType()) || ((true == isForce) && (BufferQueue::TYPE_SurfaceTextureLayer == mBufferQueue->getType())); //if ((true == isNeedConversionNow) && (BufferQueue::NO_CONNECTED_API != getConnectedApi())) { if (true == isNeedConversionNow) { XLOGI("do convertToAuxSlot..."); Slot &src = mSlots[mCurrentTexture]; AuxSlot &dst = *mBackAuxSlot; // fence sync here for buffer not used by G3D EGLSyncKHR fence = mFrontAuxSlot->eglSlot.mEglFence; if (fence != EGL_NO_SYNC_KHR) { EGLint result = eglClientWaitSyncKHR(mEglDisplay, fence, 0, 1000000000); if (result == EGL_FALSE) { XLOGW("[%s] FAILED waiting for front fence: %#x, tearing risk", __func__, eglGetError()); } else if (result == EGL_TIMEOUT_EXPIRED_KHR) { XLOGW("[%s] TIMEOUT waiting for front fence, tearing risk", __func__); } eglDestroySyncKHR(mEglDisplay, fence); mFrontAuxSlot->eglSlot.mEglFence = EGL_NO_SYNC_KHR; } #ifdef USE_MDP uint32_t hal_out_fmt; uint32_t mdp_in_fmt; uint32_t mdp_out_fmt; //if (NATIVE_WINDOW_API_CAMERA == getConnectedApi()) { hal_out_fmt = HAL_PIXEL_FORMAT_RGBA_8888; // camera path needs RGBA for MDP resource mdp_out_fmt = MHAL_FORMAT_ABGR_8888; //} else { // hal_out_fmt = HAL_PIXEL_FORMAT_YV12; // mdp_out_fmt = MHAL_FORMAT_IMG_YV12; //} // !!! only convert for I420 now !!! mdp_in_fmt = MHAL_FORMAT_YUV_420; // source graphic buffer sp<GraphicBuffer> sg = src.mGraphicBuffer; // destination graphic buffer sp<GraphicBuffer> dg = dst.slot.mGraphicBuffer; // free if current aux slot exist and not fit if ((EGL_NO_IMAGE_KHR != dst.eglSlot.mEglImage && dg != NULL) && ((sg->width != dg->width) || (sg->height != dg->height) || (hal_out_fmt != (uint32_t)dg->format))) { XLOGI("[%s] free old aux slot ", __func__); XLOGI(" src[w:%d, h:%d, f:0x%x] dst[w:%d, h:%d, f:0x%x] required format:0x%x", sg->width, sg->height, sg->format, dg->width, dg->height, dg->format, hal_out_fmt); freeAuxSlotLocked(dst); } // create aux buffer if current is NULL if ((EGL_NO_IMAGE_KHR == dst.eglSlot.mEglImage) && (dst.slot.mGraphicBuffer == NULL)) { XLOGI("[%s] create dst buffer and image", __func__); XLOGI(" before create new aux buffer: %p", __func__, dg.get()); dg = dst.slot.mGraphicBuffer = new GraphicBuffer(sg->width, sg->height, hal_out_fmt, sg->usage); if (dg == NULL) { XLOGE(" create aux GraphicBuffer FAILED", __func__); freeAuxSlotLocked(dst); return BAD_VALUE; } else { XLOGI(" create aux GraphicBuffer: %p", __func__, dg.get()); } dst.eglSlot.mEglImage = createImage(mEglDisplay, dg); if (EGL_NO_IMAGE_KHR == dst.eglSlot.mEglImage) { XLOGE("[%s] create aux eglImage FAILED", __func__); freeAuxSlotLocked(dst); return BAD_VALUE; } XLOGI("[%s] create aux slot success", __func__); XLOGI(" src[w:%d, h:%d, f:0x%x], dst[w:%d, h:%d, f:0x%x]", sg->width, sg->height, sg->format, dg->width, dg->height, dg->format); dst.mMva = registerMva(dg); } status_t lockret; uint8_t *src_yp, *dst_yp; lockret = sg->lock(LOCK_FOR_MDP, (void**)&src_yp); if (NO_ERROR != lockret) { XLOGE("[%s] buffer lock fail: %s", __func__, strerror(lockret)); return INVALID_OPERATION; } lockret = dg->lock(LOCK_FOR_MDP, (void**)&dst_yp); if (NO_ERROR != lockret) { XLOGE("[%s] buffer lock fail: %s", __func__, strerror(lockret)); return INVALID_OPERATION; } { mHalBltParam_t bltParam; memset(&bltParam, 0, sizeof(bltParam)); bltParam.srcAddr = (MUINT32)src_yp; bltParam.srcX = 0; bltParam.srcY = 0; bltParam.srcW = sg->width; // !!! I420 content is forced 16 align !!! bltParam.srcWStride = ALIGN(sg->width, 16); bltParam.srcH = sg->height; bltParam.srcHStride = sg->height; bltParam.srcFormat = mdp_in_fmt; bltParam.dstAddr = (MUINT32)dst_yp; bltParam.dstW = dg->width; // already 32 align bltParam.pitch = dg->stride; bltParam.dstH = dg->height; bltParam.dstFormat = mdp_out_fmt; #ifdef MTK_75DISPLAY_ENHANCEMENT_SUPPORT bltParam.doImageProcess = (NATIVE_WINDOW_API_MEDIA == getConnectedApi()) ? 1 : 0; #endif // mdp bitblt and check if (MHAL_NO_ERROR != ipcBitBlt(&bltParam)) { if (1 == bltParam.doImageProcess) { XLOGW("[%s] bitblt FAILED with PQ, disable and try again", __func__); bltParam.doImageProcess = 0; if (MHAL_NO_ERROR != ipcBitBlt(&bltParam)) { XLOGE("[%s] bitblt FAILED, unlock buffer and return", __func__); dst.slot.mGraphicBuffer->unlock(); src.mGraphicBuffer->unlock(); return INVALID_OPERATION; } } else { XLOGE("[%s] bitblt FAILED, unlock buffer and return", __func__); dst.slot.mGraphicBuffer->unlock(); src.mGraphicBuffer->unlock(); return INVALID_OPERATION; } } else { // for drawing debug line if (true == mLine) { int _stride = bltParam.pitch; uint8_t *_ptr = (uint8_t*)bltParam.dstAddr; static uint32_t offset = bltParam.dstH / 4; //ST_XLOGI("!!!!! draw line, ptr: %p, offset: %d, stride: %d, height: %d", _ptr, offset, _stride, bltParam.dstH); if (NULL != _ptr) { memset((void*)(_ptr + offset * _stride * 3 / 2), 0xFF, _stride * 20 * 3 / 2); } offset += 20; if (offset >= bltParam.dstH * 3 / 4) offset = bltParam.dstH / 4; } } } dg->unlock(); sg->unlock(); #else // ! USE_MDP status_t err = swConversionLocked(src, dst); if (NO_ERROR != err) return err; #endif // USE_MDP mAuxSlotConvert = false; mAuxSlotDirty = true; } return NO_ERROR; }
status_t SurfaceTexture::syncForReleaseLocked(EGLDisplay dpy) { ST_LOGV("syncForReleaseLocked"); if (mCurrentTexture != BufferQueue::INVALID_BUFFER_SLOT) { if (useNativeFenceSync) { EGLSyncKHR sync = eglCreateSyncKHR(dpy, EGL_SYNC_NATIVE_FENCE_ANDROID, NULL); if (sync == EGL_NO_SYNC_KHR) { ST_LOGE("syncForReleaseLocked: error creating EGL fence: %#x", eglGetError()); return UNKNOWN_ERROR; } glFlush(); int fenceFd = eglDupNativeFenceFDANDROID(dpy, sync); eglDestroySyncKHR(dpy, sync); if (fenceFd == EGL_NO_NATIVE_FENCE_FD_ANDROID) { ST_LOGE("syncForReleaseLocked: error dup'ing native fence " "fd: %#x", eglGetError()); return UNKNOWN_ERROR; } sp<Fence> fence(new Fence(fenceFd)); status_t err = addReleaseFenceLocked(mCurrentTexture, fence); if (err != OK) { ST_LOGE("syncForReleaseLocked: error adding release fence: " "%s (%d)", strerror(-err), err); return err; } } else if (mUseFenceSync) { EGLSyncKHR fence = mEglSlots[mCurrentTexture].mEglFence; if (fence != EGL_NO_SYNC_KHR) { // There is already a fence for the current slot. We need to // wait on that before replacing it with another fence to // ensure that all outstanding buffer accesses have completed // before the producer accesses it. EGLint result = eglClientWaitSyncKHR(dpy, fence, 0, 1000000000); if (result == EGL_FALSE) { ST_LOGE("syncForReleaseLocked: error waiting for previous " "fence: %#x", eglGetError()); return UNKNOWN_ERROR; } else if (result == EGL_TIMEOUT_EXPIRED_KHR) { ST_LOGE("syncForReleaseLocked: timeout waiting for previous " "fence"); return TIMED_OUT; } eglDestroySyncKHR(dpy, fence); } // Create a fence for the outstanding accesses in the current // OpenGL ES context. fence = eglCreateSyncKHR(dpy, EGL_SYNC_FENCE_KHR, NULL); if (fence == EGL_NO_SYNC_KHR) { ST_LOGE("syncForReleaseLocked: error creating fence: %#x", eglGetError()); return UNKNOWN_ERROR; } glFlush(); mEglSlots[mCurrentTexture].mEglFence = fence; } } return OK; }
status_t BufferQueue::dequeueBuffer(int *outBuf, sp<Fence>* outFence, uint32_t w, uint32_t h, uint32_t format, uint32_t usage) { ATRACE_CALL(); ST_LOGV("dequeueBuffer: w=%d h=%d fmt=%#x usage=%#x", w, h, format, usage); if ((w && !h) || (!w && h)) { ST_LOGE("dequeueBuffer: invalid size: w=%u, h=%u", w, h); return BAD_VALUE; } status_t returnFlags(OK); EGLDisplay dpy = EGL_NO_DISPLAY; EGLSyncKHR eglFence = EGL_NO_SYNC_KHR; { // Scope for the lock Mutex::Autolock lock(mMutex); if (format == 0) { format = mDefaultBufferFormat; } // turn on usage bits the consumer requested usage |= mConsumerUsageBits; int found = -1; int dequeuedCount = 0; bool tryAgain = true; while (tryAgain) { if (mAbandoned) { ST_LOGE("dequeueBuffer: BufferQueue has been abandoned!"); return NO_INIT; } const int maxBufferCount = getMaxBufferCountLocked(); // Free up any buffers that are in slots beyond the max buffer // count. for (int i = maxBufferCount; i < NUM_BUFFER_SLOTS; i++) { assert(mSlots[i].mBufferState == BufferSlot::FREE); if (mSlots[i].mGraphicBuffer != NULL) { freeBufferLocked(i); returnFlags |= IGraphicBufferProducer::RELEASE_ALL_BUFFERS; } } // look for a free buffer to give to the client found = INVALID_BUFFER_SLOT; dequeuedCount = 0; for (int i = 0; i < maxBufferCount; i++) { const int state = mSlots[i].mBufferState; if (state == BufferSlot::DEQUEUED) { dequeuedCount++; } if (state == BufferSlot::FREE) { /* We return the oldest of the free buffers to avoid * stalling the producer if possible. This is because * the consumer may still have pending reads of the * buffers in flight. */ if ((found < 0) || mSlots[i].mFrameNumber < mSlots[found].mFrameNumber) { found = i; } } } // clients are not allowed to dequeue more than one buffer // if they didn't set a buffer count. if (!mOverrideMaxBufferCount && dequeuedCount) { ST_LOGE("dequeueBuffer: can't dequeue multiple buffers without " "setting the buffer count"); return -EINVAL; } // See whether a buffer has been queued since the last // setBufferCount so we know whether to perform the min undequeued // buffers check below. if (mBufferHasBeenQueued) { // make sure the client is not trying to dequeue more buffers // than allowed. const int newUndequeuedCount = maxBufferCount - (dequeuedCount+1); const int minUndequeuedCount = getMinUndequeuedBufferCountLocked(); if (newUndequeuedCount < minUndequeuedCount) { ST_LOGE("dequeueBuffer: min undequeued buffer count (%d) " "exceeded (dequeued=%d undequeudCount=%d)", minUndequeuedCount, dequeuedCount, newUndequeuedCount); return -EBUSY; } } // If no buffer is found, wait for a buffer to be released or for // the max buffer count to change. tryAgain = found == INVALID_BUFFER_SLOT; if (tryAgain) { mDequeueCondition.wait(mMutex); } } if (found == INVALID_BUFFER_SLOT) { // This should not happen. ST_LOGE("dequeueBuffer: no available buffer slots"); return -EBUSY; } const int buf = found; *outBuf = found; ATRACE_BUFFER_INDEX(buf); const bool useDefaultSize = !w && !h; if (useDefaultSize) { // use the default size w = mDefaultWidth; h = mDefaultHeight; } mSlots[buf].mBufferState = BufferSlot::DEQUEUED; const sp<GraphicBuffer>& buffer(mSlots[buf].mGraphicBuffer); if ((buffer == NULL) || (uint32_t(buffer->width) != w) || (uint32_t(buffer->height) != h) || (uint32_t(buffer->format) != format) || ((uint32_t(buffer->usage) & usage) != usage)) { mSlots[buf].mAcquireCalled = false; mSlots[buf].mGraphicBuffer = NULL; mSlots[buf].mRequestBufferCalled = false; mSlots[buf].mEglFence = EGL_NO_SYNC_KHR; mSlots[buf].mFence = Fence::NO_FENCE; mSlots[buf].mEglDisplay = EGL_NO_DISPLAY; returnFlags |= IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION; } dpy = mSlots[buf].mEglDisplay; eglFence = mSlots[buf].mEglFence; *outFence = mSlots[buf].mFence; mSlots[buf].mEglFence = EGL_NO_SYNC_KHR; mSlots[buf].mFence = Fence::NO_FENCE; } // end lock scope if (returnFlags & IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION) { status_t error; sp<GraphicBuffer> graphicBuffer( mGraphicBufferAlloc->createGraphicBuffer( w, h, format, usage, &error)); if (graphicBuffer == 0) { ST_LOGE("dequeueBuffer: SurfaceComposer::createGraphicBuffer " "failed"); return error; } { // Scope for the lock Mutex::Autolock lock(mMutex); if (mAbandoned) { ST_LOGE("dequeueBuffer: BufferQueue has been abandoned!"); return NO_INIT; } mSlots[*outBuf].mGraphicBuffer = graphicBuffer; } } if (eglFence != EGL_NO_SYNC_KHR) { EGLint result = eglClientWaitSyncKHR(dpy, eglFence, 0, 1000000000); // If something goes wrong, log the error, but return the buffer without // synchronizing access to it. It's too late at this point to abort the // dequeue operation. if (result == EGL_FALSE) { ST_LOGE("dequeueBuffer: error waiting for fence: %#x", eglGetError()); } else if (result == EGL_TIMEOUT_EXPIRED_KHR) { ST_LOGE("dequeueBuffer: timeout waiting for fence"); } eglDestroySyncKHR(dpy, eglFence); } ST_LOGV("dequeueBuffer: returning slot=%d buf=%p flags=%#x", *outBuf, mSlots[*outBuf].mGraphicBuffer->handle, returnFlags); return returnFlags; }
void EglManager::fence() { EGLSyncKHR fence = eglCreateSyncKHR(mEglDisplay, EGL_SYNC_FENCE_KHR, NULL); eglClientWaitSyncKHR(mEglDisplay, fence, EGL_SYNC_FLUSH_COMMANDS_BIT_KHR, EGL_FOREVER_KHR); eglDestroySyncKHR(mEglDisplay, fence); }