// This function must be called inside the m_transferQueueItemLocks, for the // wait, m_interruptedByRemovingOp and getHasGLContext(). // Only called by updateQueueWithBitmap() for now. bool TransferQueue::readyForUpdate() { if (!getHasGLContext()) return false; // Don't use a while loop since when the WebView tear down, the emptyCount // will still be 0, and we bailed out b/c of GL context lost. if (!m_emptyItemCount) { if (m_interruptedByRemovingOp) return false; m_transferQueueItemCond.wait(m_transferQueueItemLocks); if (m_interruptedByRemovingOp) return false; } if (!getHasGLContext()) return false; // Disable this wait until we figure out why this didn't work on some // drivers b/5332112. #if 0 if (m_currentUploadType == GpuUpload && m_currentDisplay != EGL_NO_DISPLAY) { // Check the GPU fence EGLSyncKHR syncKHR = m_transferQueue[getNextTransferQueueIndex()].m_syncKHR; if (syncKHR != EGL_NO_SYNC_KHR) eglClientWaitSyncKHR(m_currentDisplay, syncKHR, EGL_SYNC_FLUSH_COMMANDS_BIT_KHR, EGL_FOREVER_KHR); } GLUtils::checkEglError("WaitSyncKHR"); #endif return true; }
void GraphicsContext3DInternal::destroyEGLSync(FBO* fbo) { if (fbo->sync() == EGL_NO_SYNC_KHR) return; eglClientWaitSyncKHR(m_dpy, fbo->sync(), 0, 0); eglDestroySyncKHR(m_dpy, fbo->sync()); fbo->setSync(EGL_NO_SYNC_KHR); }
status_t SurfaceTexture::syncForReleaseLocked(EGLDisplay dpy) { ST_LOGV("syncForReleaseLocked"); if (mUseFenceSync && mCurrentTexture != BufferQueue::INVALID_BUFFER_SLOT) { EGLSyncKHR fence = mEGLSlots[mCurrentTexture].mFence; if (fence != EGL_NO_SYNC_KHR) { // There is already a fence for the current slot. We need to wait // on that before replacing it with another fence to ensure that all // outstanding buffer accesses have completed before the producer // accesses it. EGLint result = eglClientWaitSyncKHR(dpy, fence, 0, 1000000000); if (result == EGL_FALSE) { ST_LOGE("syncForReleaseLocked: error waiting for previous " "fence: %#x", eglGetError()); return UNKNOWN_ERROR; } else if (result == EGL_TIMEOUT_EXPIRED_KHR) { ST_LOGE("syncForReleaseLocked: timeout waiting for previous " "fence"); return TIMED_OUT; } eglDestroySyncKHR(dpy, fence); } // Create a fence for the outstanding accesses in the current OpenGL ES // context. fence = eglCreateSyncKHR(dpy, EGL_SYNC_FENCE_KHR, NULL); if (fence == EGL_NO_SYNC_KHR) { ST_LOGE("syncForReleaseLocked: error creating fence: %#x", eglGetError()); return UNKNOWN_ERROR; } glFlush(); mEGLSlots[mCurrentTexture].mFence = fence; } return OK; }
status_t SurfaceTexture::syncForReleaseLocked(EGLDisplay dpy) { ST_LOGV("syncForReleaseLocked"); if (mCurrentTexture != BufferQueue::INVALID_BUFFER_SLOT) { if (useNativeFenceSync) { EGLSyncKHR sync = eglCreateSyncKHR(dpy, EGL_SYNC_NATIVE_FENCE_ANDROID, NULL); if (sync == EGL_NO_SYNC_KHR) { ST_LOGE("syncForReleaseLocked: error creating EGL fence: %#x", eglGetError()); return UNKNOWN_ERROR; } glFlush(); int fenceFd = eglDupNativeFenceFDANDROID(dpy, sync); eglDestroySyncKHR(dpy, sync); if (fenceFd == EGL_NO_NATIVE_FENCE_FD_ANDROID) { ST_LOGE("syncForReleaseLocked: error dup'ing native fence " "fd: %#x", eglGetError()); return UNKNOWN_ERROR; } sp<Fence> fence(new Fence(fenceFd)); status_t err = addReleaseFenceLocked(mCurrentTexture, fence); if (err != OK) { ST_LOGE("syncForReleaseLocked: error adding release fence: " "%s (%d)", strerror(-err), err); return err; } } else if (mUseFenceSync) { EGLSyncKHR fence = mEglSlots[mCurrentTexture].mEglFence; if (fence != EGL_NO_SYNC_KHR) { // There is already a fence for the current slot. We need to // wait on that before replacing it with another fence to // ensure that all outstanding buffer accesses have completed // before the producer accesses it. EGLint result = eglClientWaitSyncKHR(dpy, fence, 0, 1000000000); if (result == EGL_FALSE) { ST_LOGE("syncForReleaseLocked: error waiting for previous " "fence: %#x", eglGetError()); return UNKNOWN_ERROR; } else if (result == EGL_TIMEOUT_EXPIRED_KHR) { ST_LOGE("syncForReleaseLocked: timeout waiting for previous " "fence"); return TIMED_OUT; } eglDestroySyncKHR(dpy, fence); } // Create a fence for the outstanding accesses in the current // OpenGL ES context. fence = eglCreateSyncKHR(dpy, EGL_SYNC_FENCE_KHR, NULL); if (fence == EGL_NO_SYNC_KHR) { ST_LOGE("syncForReleaseLocked: error creating fence: %#x", eglGetError()); return UNKNOWN_ERROR; } glFlush(); mEglSlots[mCurrentTexture].mEglFence = fence; } } return OK; }
static jboolean com_android_server_AssetAtlasService_upload(JNIEnv* env, jobject, jobject graphicBuffer, jlong bitmapHandle) { SkBitmap* bitmap = reinterpret_cast<SkBitmap*>(bitmapHandle); // The goal of this method is to copy the bitmap into the GraphicBuffer // using the GPU to swizzle the texture content sp<GraphicBuffer> buffer(graphicBufferForJavaObject(env, graphicBuffer)); if (buffer != NULL) { EGLDisplay display = eglGetDisplay(EGL_DEFAULT_DISPLAY); if (display == EGL_NO_DISPLAY) return JNI_FALSE; EGLint major; EGLint minor; if (!eglInitialize(display, &major, &minor)) { ALOGW("Could not initialize EGL"); return JNI_FALSE; } // We're going to use a 1x1 pbuffer surface later on // The configuration doesn't really matter for what we're trying to do EGLint configAttrs[] = { EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT, EGL_RED_SIZE, 8, EGL_GREEN_SIZE, 8, EGL_BLUE_SIZE, 8, EGL_ALPHA_SIZE, 0, EGL_DEPTH_SIZE, 0, EGL_STENCIL_SIZE, 0, EGL_NONE }; EGLConfig configs[1]; EGLint configCount; if (!eglChooseConfig(display, configAttrs, configs, 1, &configCount)) { ALOGW("Could not select EGL configuration"); eglReleaseThread(); eglTerminate(display); return JNI_FALSE; } if (configCount <= 0) { ALOGW("Could not find EGL configuration"); eglReleaseThread(); eglTerminate(display); return JNI_FALSE; } // These objects are initialized below but the default "null" // values are used to cleanup properly at any point in the // initialization sequence GLuint texture = 0; EGLImageKHR image = EGL_NO_IMAGE_KHR; EGLSurface surface = EGL_NO_SURFACE; EGLSyncKHR fence = EGL_NO_SYNC_KHR; EGLint attrs[] = { EGL_CONTEXT_CLIENT_VERSION, 2, EGL_NONE }; EGLContext context = eglCreateContext(display, configs[0], EGL_NO_CONTEXT, attrs); if (context == EGL_NO_CONTEXT) { ALOGW("Could not create EGL context"); CLEANUP_GL_AND_RETURN(JNI_FALSE); } // Create the 1x1 pbuffer EGLint surfaceAttrs[] = { EGL_WIDTH, 1, EGL_HEIGHT, 1, EGL_NONE }; surface = eglCreatePbufferSurface(display, configs[0], surfaceAttrs); if (surface == EGL_NO_SURFACE) { ALOGW("Could not create EGL surface"); CLEANUP_GL_AND_RETURN(JNI_FALSE); } if (!eglMakeCurrent(display, surface, surface, context)) { ALOGW("Could not change current EGL context"); CLEANUP_GL_AND_RETURN(JNI_FALSE); } // We use an EGLImage to access the content of the GraphicBuffer // The EGL image is later bound to a 2D texture EGLClientBuffer clientBuffer = (EGLClientBuffer) buffer->getNativeBuffer(); EGLint imageAttrs[] = { EGL_IMAGE_PRESERVED_KHR, EGL_TRUE, EGL_NONE }; image = eglCreateImageKHR(display, EGL_NO_CONTEXT, EGL_NATIVE_BUFFER_ANDROID, clientBuffer, imageAttrs); if (image == EGL_NO_IMAGE_KHR) { ALOGW("Could not create EGL image"); CLEANUP_GL_AND_RETURN(JNI_FALSE); } glGenTextures(1, &texture); glBindTexture(GL_TEXTURE_2D, texture); glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, image); if (glGetError() != GL_NO_ERROR) { ALOGW("Could not create/bind texture"); CLEANUP_GL_AND_RETURN(JNI_FALSE); } // Upload the content of the bitmap in the GraphicBuffer glPixelStorei(GL_UNPACK_ALIGNMENT, bitmap->bytesPerPixel()); glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, bitmap->width(), bitmap->height(), GL_RGBA, GL_UNSIGNED_BYTE, bitmap->getPixels()); if (glGetError() != GL_NO_ERROR) { ALOGW("Could not upload to texture"); CLEANUP_GL_AND_RETURN(JNI_FALSE); } // The fence is used to wait for the texture upload to finish // properly. We cannot rely on glFlush() and glFinish() as // some drivers completely ignore these API calls fence = eglCreateSyncKHR(display, EGL_SYNC_FENCE_KHR, NULL); if (fence == EGL_NO_SYNC_KHR) { ALOGW("Could not create sync fence %#x", eglGetError()); CLEANUP_GL_AND_RETURN(JNI_FALSE); } // The flag EGL_SYNC_FLUSH_COMMANDS_BIT_KHR will trigger a // pipeline flush (similar to what a glFlush() would do.) EGLint waitStatus = eglClientWaitSyncKHR(display, fence, EGL_SYNC_FLUSH_COMMANDS_BIT_KHR, FENCE_TIMEOUT); if (waitStatus != EGL_CONDITION_SATISFIED_KHR) { ALOGW("Failed to wait for the fence %#x", eglGetError()); CLEANUP_GL_AND_RETURN(JNI_FALSE); } CLEANUP_GL_AND_RETURN(JNI_TRUE); } return JNI_FALSE; }
status_t BufferQueue::dequeueBuffer(int *outBuf, uint32_t w, uint32_t h, uint32_t format, uint32_t usage) { ATRACE_CALL(); ST_LOGV("dequeueBuffer: w=%d h=%d fmt=%#x usage=%#x", w, h, format, usage); if ((w && !h) || (!w && h)) { ST_LOGE("dequeueBuffer: invalid size: w=%u, h=%u", w, h); return BAD_VALUE; } status_t returnFlags(OK); EGLDisplay dpy = EGL_NO_DISPLAY; EGLSyncKHR fence = EGL_NO_SYNC_KHR; { // Scope for the lock Mutex::Autolock lock(mMutex); if (format == 0) { format = mDefaultBufferFormat; } // turn on usage bits the consumer requested usage |= mConsumerUsageBits; int found = -1; int foundSync = -1; int dequeuedCount = 0; bool tryAgain = true; while (tryAgain) { if (mAbandoned) { ST_LOGE("dequeueBuffer: SurfaceTexture has been abandoned!"); return NO_INIT; } // We need to wait for the FIFO to drain if the number of buffer // needs to change. // // The condition "number of buffers needs to change" is true if // - the client doesn't care about how many buffers there are // - AND the actual number of buffer is different from what was // set in the last setBufferCountServer() // - OR - // setBufferCountServer() was set to a value incompatible with // the synchronization mode (for instance because the sync mode // changed since) // // As long as this condition is true AND the FIFO is not empty, we // wait on mDequeueCondition. const int minBufferCountNeeded = mSynchronousMode ? mMinSyncBufferSlots : mMinAsyncBufferSlots; const bool numberOfBuffersNeedsToChange = !mClientBufferCount && ((mServerBufferCount != mBufferCount) || (mServerBufferCount < minBufferCountNeeded)); if (!mQueue.isEmpty() && numberOfBuffersNeedsToChange) { // wait for the FIFO to drain mDequeueCondition.wait(mMutex); // NOTE: we continue here because we need to reevaluate our // whole state (eg: we could be abandoned or disconnected) continue; } if (numberOfBuffersNeedsToChange) { // here we're guaranteed that mQueue is empty freeAllBuffersLocked(); mBufferCount = mServerBufferCount; if (mBufferCount < minBufferCountNeeded) mBufferCount = minBufferCountNeeded; mBufferHasBeenQueued = false; returnFlags |= ISurfaceTexture::RELEASE_ALL_BUFFERS; } // look for a free buffer to give to the client found = INVALID_BUFFER_SLOT; foundSync = INVALID_BUFFER_SLOT; dequeuedCount = 0; for (int i = 0; i < mBufferCount; i++) { const int state = mSlots[i].mBufferState; if (state == BufferSlot::DEQUEUED) { dequeuedCount++; } // this logic used to be if (FLAG_ALLOW_DEQUEUE_CURRENT_BUFFER) // but dequeuing the current buffer is disabled. if (false) { // This functionality has been temporarily removed so // BufferQueue and SurfaceTexture can be refactored into // separate objects } else { if (state == BufferSlot::FREE) { /* We return the oldest of the free buffers to avoid * stalling the producer if possible. This is because * the consumer may still have pending reads of the * buffers in flight. */ bool isOlder = mSlots[i].mFrameNumber < mSlots[found].mFrameNumber; if (found < 0 || isOlder) { foundSync = i; found = i; } } } } // clients are not allowed to dequeue more than one buffer // if they didn't set a buffer count. if (!mClientBufferCount && dequeuedCount) { ST_LOGE("dequeueBuffer: can't dequeue multiple buffers without " "setting the buffer count"); return -EINVAL; } // See whether a buffer has been queued since the last // setBufferCount so we know whether to perform the // mMinUndequeuedBuffers check below. if (mBufferHasBeenQueued) { // make sure the client is not trying to dequeue more buffers // than allowed. const int avail = mBufferCount - (dequeuedCount+1); if (avail < (mMinUndequeuedBuffers-int(mSynchronousMode))) { ST_LOGE("dequeueBuffer: mMinUndequeuedBuffers=%d exceeded " "(dequeued=%d)", mMinUndequeuedBuffers-int(mSynchronousMode), dequeuedCount); return -EBUSY; } } // if no buffer is found, wait for a buffer to be released tryAgain = found == INVALID_BUFFER_SLOT; if (tryAgain) { mDequeueCondition.wait(mMutex); } } if (found == INVALID_BUFFER_SLOT) { // This should not happen. ST_LOGE("dequeueBuffer: no available buffer slots"); return -EBUSY; } const int buf = found; *outBuf = found; ATRACE_BUFFER_INDEX(buf); const bool useDefaultSize = !w && !h; if (useDefaultSize) { // use the default size w = mDefaultWidth; h = mDefaultHeight; } const bool updateFormat = (format != 0); if (!updateFormat) { // keep the current (or default) format format = mPixelFormat; } // buffer is now in DEQUEUED (but can also be current at the same time, // if we're in synchronous mode) mSlots[buf].mBufferState = BufferSlot::DEQUEUED; const sp<GraphicBuffer>& buffer(mSlots[buf].mGraphicBuffer); if ((buffer == NULL) || (uint32_t(buffer->width) != w) || (uint32_t(buffer->height) != h) || (uint32_t(buffer->format) != format) || ((uint32_t(buffer->usage) & usage) != usage)) { status_t error; sp<GraphicBuffer> graphicBuffer( mGraphicBufferAlloc->createGraphicBuffer( w, h, format, usage, &error)); if (graphicBuffer == 0) { ST_LOGE("dequeueBuffer: SurfaceComposer::createGraphicBuffer " "failed"); return error; } if (updateFormat) { mPixelFormat = format; } mSlots[buf].mAcquireCalled = false; mSlots[buf].mGraphicBuffer = graphicBuffer; mSlots[buf].mRequestBufferCalled = false; mSlots[buf].mFence = EGL_NO_SYNC_KHR; mSlots[buf].mEglDisplay = EGL_NO_DISPLAY; returnFlags |= ISurfaceTexture::BUFFER_NEEDS_REALLOCATION; } dpy = mSlots[buf].mEglDisplay; fence = mSlots[buf].mFence; mSlots[buf].mFence = EGL_NO_SYNC_KHR; } // end lock scope if (fence != EGL_NO_SYNC_KHR) { EGLint result = eglClientWaitSyncKHR(dpy, fence, 0, 1000000000); // If something goes wrong, log the error, but return the buffer without // synchronizing access to it. It's too late at this point to abort the // dequeue operation. if (result == EGL_FALSE) { ST_LOGE("dequeueBuffer: error waiting for fence: %#x", eglGetError()); } else if (result == EGL_TIMEOUT_EXPIRED_KHR) { ST_LOGE("dequeueBuffer: timeout waiting for fence"); } eglDestroySyncKHR(dpy, fence); } ST_LOGV("dequeueBuffer: returning slot=%d buf=%p flags=%#x", *outBuf, mSlots[*outBuf].mGraphicBuffer->handle, returnFlags); return returnFlags; }
status_t SurfaceTexture::dequeueBuffer(int *outBuf, uint32_t w, uint32_t h, uint32_t format, uint32_t usage) { ST_LOGV("dequeueBuffer: w=%d h=%d fmt=%#x usage=%#x", w, h, format, usage); if ((w && !h) || (!w && h)) { ST_LOGE("dequeueBuffer: invalid size: w=%u, h=%u", w, h); return BAD_VALUE; } status_t returnFlags(OK); EGLDisplay dpy = EGL_NO_DISPLAY; EGLSyncKHR fence = EGL_NO_SYNC_KHR; { // Scope for the lock Mutex::Autolock lock(mMutex); int found = -1; int foundSync = -1; int dequeuedCount = 0; bool tryAgain = true; #ifdef MISSING_GRALLOC_BUFFERS int dequeueRetries = 5; #endif while (tryAgain) { if (mAbandoned) { ST_LOGE("dequeueBuffer: SurfaceTexture has been abandoned!"); return NO_INIT; } // We need to wait for the FIFO to drain if the number of buffer // needs to change. // // The condition "number of buffers needs to change" is true if // - the client doesn't care about how many buffers there are // - AND the actual number of buffer is different from what was // set in the last setBufferCountServer() // - OR - // setBufferCountServer() was set to a value incompatible with // the synchronization mode (for instance because the sync mode // changed since) // // As long as this condition is true AND the FIFO is not empty, we // wait on mDequeueCondition. const int minBufferCountNeeded = mSynchronousMode ? MIN_SYNC_BUFFER_SLOTS : MIN_ASYNC_BUFFER_SLOTS; const bool numberOfBuffersNeedsToChange = !mClientBufferCount && ((mServerBufferCount != mBufferCount) || (mServerBufferCount < minBufferCountNeeded)); if (!mQueue.isEmpty() && numberOfBuffersNeedsToChange) { // wait for the FIFO to drain mDequeueCondition.wait(mMutex); // NOTE: we continue here because we need to reevaluate our // whole state (eg: we could be abandoned or disconnected) continue; } if (numberOfBuffersNeedsToChange) { // here we're guaranteed that mQueue is empty freeAllBuffersLocked(); mBufferCount = mServerBufferCount; if (mBufferCount < minBufferCountNeeded) mBufferCount = minBufferCountNeeded; mCurrentTexture = INVALID_BUFFER_SLOT; returnFlags |= ISurfaceTexture::RELEASE_ALL_BUFFERS; } // look for a free buffer to give to the client found = INVALID_BUFFER_SLOT; foundSync = INVALID_BUFFER_SLOT; dequeuedCount = 0; for (int i = 0; i < mBufferCount; i++) { const int state = mSlots[i].mBufferState; if (state == BufferSlot::DEQUEUED) { dequeuedCount++; } // if buffer is FREE it CANNOT be current LOGW_IF((state == BufferSlot::FREE) && (mCurrentTexture==i), "dequeueBuffer: buffer %d is both FREE and current!", i); if (FLAG_ALLOW_DEQUEUE_CURRENT_BUFFER) { if (state == BufferSlot::FREE || i == mCurrentTexture) { foundSync = i; if (i != mCurrentTexture) { found = i; break; } } } else { if (state == BufferSlot::FREE) { /* We return the oldest of the free buffers to avoid * stalling the producer if possible. This is because * the consumer may still have pending reads of the * buffers in flight. */ bool isOlder = mSlots[i].mFrameNumber < mSlots[found].mFrameNumber; if (found < 0 || isOlder) { foundSync = i; found = i; } } } } // clients are not allowed to dequeue more than one buffer // if they didn't set a buffer count. if (!mClientBufferCount && dequeuedCount) { #ifdef MISSING_GRALLOC_BUFFERS if (--dequeueRetries) { LOGD("SurfaceTexture::dequeue: Not allowed to dequeue more " "than a buffer SLEEPING\n"); usleep(10000); } else { mClientBufferCount = mServerBufferCount; LOGD("SurfaceTexture::dequeue: Not allowed to dequeue more " "than a buffer RETRY mBufferCount:%d mServerBufferCount:%d\n", mBufferCount, mServerBufferCount); } continue; #else ST_LOGE("dequeueBuffer: can't dequeue multiple buffers without " "setting the buffer count"); #endif return -EINVAL; } // See whether a buffer has been queued since the last // setBufferCount so we know whether to perform the // MIN_UNDEQUEUED_BUFFERS check below. bool bufferHasBeenQueued = mCurrentTexture != INVALID_BUFFER_SLOT; if (bufferHasBeenQueued) { // make sure the client is not trying to dequeue more buffers // than allowed. const int avail = mBufferCount - (dequeuedCount+1); if (avail < (MIN_UNDEQUEUED_BUFFERS-int(mSynchronousMode))) { #ifdef MISSING_GRALLOC_BUFFERS if (mClientBufferCount != 0) { mBufferCount++; mClientBufferCount = mServerBufferCount = mBufferCount; LOGD("SurfaceTexture::dequeuebuffer: MIN EXCEEDED " "mBuffer:%d bumped\n", mBufferCount); continue; } #endif ST_LOGE("dequeueBuffer: MIN_UNDEQUEUED_BUFFERS=%d exceeded " "(dequeued=%d)", MIN_UNDEQUEUED_BUFFERS-int(mSynchronousMode), dequeuedCount); return -EBUSY; } } // we're in synchronous mode and didn't find a buffer, we need to // wait for some buffers to be consumed tryAgain = mSynchronousMode && (foundSync == INVALID_BUFFER_SLOT); if (tryAgain) { mDequeueCondition.wait(mMutex); } } if (mSynchronousMode && found == INVALID_BUFFER_SLOT) { // foundSync guaranteed to be != INVALID_BUFFER_SLOT found = foundSync; } if (found == INVALID_BUFFER_SLOT) { // This should not happen. ST_LOGE("dequeueBuffer: no available buffer slots"); return -EBUSY; } const int buf = found; *outBuf = found; const bool useDefaultSize = !w && !h; if (useDefaultSize) { // use the default size w = mDefaultWidth; h = mDefaultHeight; } const bool updateFormat = (format != 0); if (!updateFormat) { // keep the current (or default) format format = mPixelFormat; } // buffer is now in DEQUEUED (but can also be current at the same time, // if we're in synchronous mode) mSlots[buf].mBufferState = BufferSlot::DEQUEUED; const sp<GraphicBuffer>& buffer(mSlots[buf].mGraphicBuffer); #ifdef QCOM_HARDWARE qBufGeometry currentGeometry; if (buffer != NULL) currentGeometry.set(buffer->width, buffer->height, buffer->format); else currentGeometry.set(0, 0, 0); qBufGeometry requiredGeometry; requiredGeometry.set(w, h, format); qBufGeometry updatedGeometry; updatedGeometry.set(mNextBufferInfo.width, mNextBufferInfo.height, mNextBufferInfo.format); #endif if ((buffer == NULL) || #ifdef QCOM_HARDWARE needNewBuffer(currentGeometry, requiredGeometry, updatedGeometry) || #else (uint32_t(buffer->width) != w) || (uint32_t(buffer->height) != h) || (uint32_t(buffer->format) != format) || #endif ((uint32_t(buffer->usage) & usage) != usage)) { #ifdef QCOM_HARDWARE if (buffer != NULL) { mGraphicBufferAlloc->freeGraphicBufferAtIndex(buf); } #endif usage |= GraphicBuffer::USAGE_HW_TEXTURE; status_t error; sp<GraphicBuffer> graphicBuffer( mGraphicBufferAlloc->createGraphicBuffer( w, h, format, usage, &error)); if (graphicBuffer == 0) { ST_LOGE("dequeueBuffer: SurfaceComposer::createGraphicBuffer " "failed"); return error; } if (updateFormat) { mPixelFormat = format; } #ifdef QCOM_HARDWARE checkBuffer((native_handle_t *)graphicBuffer->handle, mReqSize, usage); #endif mSlots[buf].mGraphicBuffer = graphicBuffer; mSlots[buf].mRequestBufferCalled = false; mSlots[buf].mFence = EGL_NO_SYNC_KHR; if (mSlots[buf].mEglImage != EGL_NO_IMAGE_KHR) { eglDestroyImageKHR(mSlots[buf].mEglDisplay, mSlots[buf].mEglImage); mSlots[buf].mEglImage = EGL_NO_IMAGE_KHR; mSlots[buf].mEglDisplay = EGL_NO_DISPLAY; } if (mCurrentTexture == buf) { // The current texture no longer references the buffer in this slot // since we just allocated a new buffer. mCurrentTexture = INVALID_BUFFER_SLOT; } returnFlags |= ISurfaceTexture::BUFFER_NEEDS_REALLOCATION; } dpy = mSlots[buf].mEglDisplay; fence = mSlots[buf].mFence; mSlots[buf].mFence = EGL_NO_SYNC_KHR; } if (fence != EGL_NO_SYNC_KHR) { EGLint result = eglClientWaitSyncKHR(dpy, fence, 0, 1000000000); // If something goes wrong, log the error, but return the buffer without // synchronizing access to it. It's too late at this point to abort the // dequeue operation. if (result == EGL_FALSE) { LOGE("dequeueBuffer: error waiting for fence: %#x", eglGetError()); } else if (result == EGL_TIMEOUT_EXPIRED_KHR) { LOGE("dequeueBuffer: timeout waiting for fence"); } eglDestroySyncKHR(dpy, fence); } ST_LOGV("dequeueBuffer: returning slot=%d buf=%p flags=%#x", *outBuf, mSlots[*outBuf].mGraphicBuffer->handle, returnFlags); return returnFlags; }
status_t BufferQueue::dequeueBuffer(int *outBuf, sp<Fence>* outFence, uint32_t w, uint32_t h, uint32_t format, uint32_t usage) { ATRACE_CALL(); ST_LOGV("dequeueBuffer: w=%d h=%d fmt=%#x usage=%#x", w, h, format, usage); if ((w && !h) || (!w && h)) { ST_LOGE("dequeueBuffer: invalid size: w=%u, h=%u", w, h); return BAD_VALUE; } status_t returnFlags(OK); EGLDisplay dpy = EGL_NO_DISPLAY; EGLSyncKHR eglFence = EGL_NO_SYNC_KHR; { // Scope for the lock Mutex::Autolock lock(mMutex); if (format == 0) { format = mDefaultBufferFormat; } // turn on usage bits the consumer requested usage |= mConsumerUsageBits; int found = -1; int dequeuedCount = 0; bool tryAgain = true; while (tryAgain) { if (mAbandoned) { ST_LOGE("dequeueBuffer: BufferQueue has been abandoned!"); return NO_INIT; } const int maxBufferCount = getMaxBufferCountLocked(); // Free up any buffers that are in slots beyond the max buffer // count. for (int i = maxBufferCount; i < NUM_BUFFER_SLOTS; i++) { assert(mSlots[i].mBufferState == BufferSlot::FREE); if (mSlots[i].mGraphicBuffer != NULL) { freeBufferLocked(i); returnFlags |= IGraphicBufferProducer::RELEASE_ALL_BUFFERS; } } // look for a free buffer to give to the client found = INVALID_BUFFER_SLOT; dequeuedCount = 0; for (int i = 0; i < maxBufferCount; i++) { const int state = mSlots[i].mBufferState; if (state == BufferSlot::DEQUEUED) { dequeuedCount++; } if (state == BufferSlot::FREE) { /* We return the oldest of the free buffers to avoid * stalling the producer if possible. This is because * the consumer may still have pending reads of the * buffers in flight. */ if ((found < 0) || mSlots[i].mFrameNumber < mSlots[found].mFrameNumber) { found = i; } } } // clients are not allowed to dequeue more than one buffer // if they didn't set a buffer count. if (!mOverrideMaxBufferCount && dequeuedCount) { ST_LOGE("dequeueBuffer: can't dequeue multiple buffers without " "setting the buffer count"); return -EINVAL; } // See whether a buffer has been queued since the last // setBufferCount so we know whether to perform the min undequeued // buffers check below. if (mBufferHasBeenQueued) { // make sure the client is not trying to dequeue more buffers // than allowed. const int newUndequeuedCount = maxBufferCount - (dequeuedCount+1); const int minUndequeuedCount = getMinUndequeuedBufferCountLocked(); if (newUndequeuedCount < minUndequeuedCount) { ST_LOGE("dequeueBuffer: min undequeued buffer count (%d) " "exceeded (dequeued=%d undequeudCount=%d)", minUndequeuedCount, dequeuedCount, newUndequeuedCount); return -EBUSY; } } // If no buffer is found, wait for a buffer to be released or for // the max buffer count to change. tryAgain = found == INVALID_BUFFER_SLOT; if (tryAgain) { mDequeueCondition.wait(mMutex); } } if (found == INVALID_BUFFER_SLOT) { // This should not happen. ST_LOGE("dequeueBuffer: no available buffer slots"); return -EBUSY; } const int buf = found; *outBuf = found; ATRACE_BUFFER_INDEX(buf); const bool useDefaultSize = !w && !h; if (useDefaultSize) { // use the default size w = mDefaultWidth; h = mDefaultHeight; } mSlots[buf].mBufferState = BufferSlot::DEQUEUED; const sp<GraphicBuffer>& buffer(mSlots[buf].mGraphicBuffer); if ((buffer == NULL) || (uint32_t(buffer->width) != w) || (uint32_t(buffer->height) != h) || (uint32_t(buffer->format) != format) || ((uint32_t(buffer->usage) & usage) != usage)) { mSlots[buf].mAcquireCalled = false; mSlots[buf].mGraphicBuffer = NULL; mSlots[buf].mRequestBufferCalled = false; mSlots[buf].mEglFence = EGL_NO_SYNC_KHR; mSlots[buf].mFence = Fence::NO_FENCE; mSlots[buf].mEglDisplay = EGL_NO_DISPLAY; returnFlags |= IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION; } dpy = mSlots[buf].mEglDisplay; eglFence = mSlots[buf].mEglFence; *outFence = mSlots[buf].mFence; mSlots[buf].mEglFence = EGL_NO_SYNC_KHR; mSlots[buf].mFence = Fence::NO_FENCE; } // end lock scope if (returnFlags & IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION) { status_t error; sp<GraphicBuffer> graphicBuffer( mGraphicBufferAlloc->createGraphicBuffer( w, h, format, usage, &error)); if (graphicBuffer == 0) { ST_LOGE("dequeueBuffer: SurfaceComposer::createGraphicBuffer " "failed"); return error; } { // Scope for the lock Mutex::Autolock lock(mMutex); if (mAbandoned) { ST_LOGE("dequeueBuffer: BufferQueue has been abandoned!"); return NO_INIT; } mSlots[*outBuf].mGraphicBuffer = graphicBuffer; } } if (eglFence != EGL_NO_SYNC_KHR) { EGLint result = eglClientWaitSyncKHR(dpy, eglFence, 0, 1000000000); // If something goes wrong, log the error, but return the buffer without // synchronizing access to it. It's too late at this point to abort the // dequeue operation. if (result == EGL_FALSE) { ST_LOGE("dequeueBuffer: error waiting for fence: %#x", eglGetError()); } else if (result == EGL_TIMEOUT_EXPIRED_KHR) { ST_LOGE("dequeueBuffer: timeout waiting for fence"); } eglDestroySyncKHR(dpy, eglFence); } ST_LOGV("dequeueBuffer: returning slot=%d buf=%p flags=%#x", *outBuf, mSlots[*outBuf].mGraphicBuffer->handle, returnFlags); return returnFlags; }
// conversion function should format by format, chip by chip // currently input is MTK_I420, and output is IMG_YV12/ABGR status_t SurfaceTexture::convertToAuxSlotLocked(bool isForce) { // check invalid buffer if (BufferQueue::INVALID_BUFFER_SLOT == mCurrentTexture) { mAuxSlotConvert = false; return INVALID_OPERATION; } ATRACE_CALL(); // 1) normal BufferQueue needs conversion now // 2) SurfaceTextureLayer neesd conversion after HWC bool isNeedConversionNow = (BufferQueue::TYPE_BufferQueue == mBufferQueue->getType()) || ((true == isForce) && (BufferQueue::TYPE_SurfaceTextureLayer == mBufferQueue->getType())); //if ((true == isNeedConversionNow) && (BufferQueue::NO_CONNECTED_API != getConnectedApi())) { if (true == isNeedConversionNow) { XLOGI("do convertToAuxSlot..."); Slot &src = mSlots[mCurrentTexture]; AuxSlot &dst = *mBackAuxSlot; // fence sync here for buffer not used by G3D EGLSyncKHR fence = mFrontAuxSlot->eglSlot.mEglFence; if (fence != EGL_NO_SYNC_KHR) { EGLint result = eglClientWaitSyncKHR(mEglDisplay, fence, 0, 1000000000); if (result == EGL_FALSE) { XLOGW("[%s] FAILED waiting for front fence: %#x, tearing risk", __func__, eglGetError()); } else if (result == EGL_TIMEOUT_EXPIRED_KHR) { XLOGW("[%s] TIMEOUT waiting for front fence, tearing risk", __func__); } eglDestroySyncKHR(mEglDisplay, fence); mFrontAuxSlot->eglSlot.mEglFence = EGL_NO_SYNC_KHR; } #ifdef USE_MDP uint32_t hal_out_fmt; uint32_t mdp_in_fmt; uint32_t mdp_out_fmt; //if (NATIVE_WINDOW_API_CAMERA == getConnectedApi()) { hal_out_fmt = HAL_PIXEL_FORMAT_RGBA_8888; // camera path needs RGBA for MDP resource mdp_out_fmt = MHAL_FORMAT_ABGR_8888; //} else { // hal_out_fmt = HAL_PIXEL_FORMAT_YV12; // mdp_out_fmt = MHAL_FORMAT_IMG_YV12; //} // !!! only convert for I420 now !!! mdp_in_fmt = MHAL_FORMAT_YUV_420; // source graphic buffer sp<GraphicBuffer> sg = src.mGraphicBuffer; // destination graphic buffer sp<GraphicBuffer> dg = dst.slot.mGraphicBuffer; // free if current aux slot exist and not fit if ((EGL_NO_IMAGE_KHR != dst.eglSlot.mEglImage && dg != NULL) && ((sg->width != dg->width) || (sg->height != dg->height) || (hal_out_fmt != (uint32_t)dg->format))) { XLOGI("[%s] free old aux slot ", __func__); XLOGI(" src[w:%d, h:%d, f:0x%x] dst[w:%d, h:%d, f:0x%x] required format:0x%x", sg->width, sg->height, sg->format, dg->width, dg->height, dg->format, hal_out_fmt); freeAuxSlotLocked(dst); } // create aux buffer if current is NULL if ((EGL_NO_IMAGE_KHR == dst.eglSlot.mEglImage) && (dst.slot.mGraphicBuffer == NULL)) { XLOGI("[%s] create dst buffer and image", __func__); XLOGI(" before create new aux buffer: %p", __func__, dg.get()); dg = dst.slot.mGraphicBuffer = new GraphicBuffer(sg->width, sg->height, hal_out_fmt, sg->usage); if (dg == NULL) { XLOGE(" create aux GraphicBuffer FAILED", __func__); freeAuxSlotLocked(dst); return BAD_VALUE; } else { XLOGI(" create aux GraphicBuffer: %p", __func__, dg.get()); } dst.eglSlot.mEglImage = createImage(mEglDisplay, dg); if (EGL_NO_IMAGE_KHR == dst.eglSlot.mEglImage) { XLOGE("[%s] create aux eglImage FAILED", __func__); freeAuxSlotLocked(dst); return BAD_VALUE; } XLOGI("[%s] create aux slot success", __func__); XLOGI(" src[w:%d, h:%d, f:0x%x], dst[w:%d, h:%d, f:0x%x]", sg->width, sg->height, sg->format, dg->width, dg->height, dg->format); dst.mMva = registerMva(dg); } status_t lockret; uint8_t *src_yp, *dst_yp; lockret = sg->lock(LOCK_FOR_MDP, (void**)&src_yp); if (NO_ERROR != lockret) { XLOGE("[%s] buffer lock fail: %s", __func__, strerror(lockret)); return INVALID_OPERATION; } lockret = dg->lock(LOCK_FOR_MDP, (void**)&dst_yp); if (NO_ERROR != lockret) { XLOGE("[%s] buffer lock fail: %s", __func__, strerror(lockret)); return INVALID_OPERATION; } { mHalBltParam_t bltParam; memset(&bltParam, 0, sizeof(bltParam)); bltParam.srcAddr = (MUINT32)src_yp; bltParam.srcX = 0; bltParam.srcY = 0; bltParam.srcW = sg->width; // !!! I420 content is forced 16 align !!! bltParam.srcWStride = ALIGN(sg->width, 16); bltParam.srcH = sg->height; bltParam.srcHStride = sg->height; bltParam.srcFormat = mdp_in_fmt; bltParam.dstAddr = (MUINT32)dst_yp; bltParam.dstW = dg->width; // already 32 align bltParam.pitch = dg->stride; bltParam.dstH = dg->height; bltParam.dstFormat = mdp_out_fmt; #ifdef MTK_75DISPLAY_ENHANCEMENT_SUPPORT bltParam.doImageProcess = (NATIVE_WINDOW_API_MEDIA == getConnectedApi()) ? 1 : 0; #endif // mdp bitblt and check if (MHAL_NO_ERROR != ipcBitBlt(&bltParam)) { if (1 == bltParam.doImageProcess) { XLOGW("[%s] bitblt FAILED with PQ, disable and try again", __func__); bltParam.doImageProcess = 0; if (MHAL_NO_ERROR != ipcBitBlt(&bltParam)) { XLOGE("[%s] bitblt FAILED, unlock buffer and return", __func__); dst.slot.mGraphicBuffer->unlock(); src.mGraphicBuffer->unlock(); return INVALID_OPERATION; } } else { XLOGE("[%s] bitblt FAILED, unlock buffer and return", __func__); dst.slot.mGraphicBuffer->unlock(); src.mGraphicBuffer->unlock(); return INVALID_OPERATION; } } else { // for drawing debug line if (true == mLine) { int _stride = bltParam.pitch; uint8_t *_ptr = (uint8_t*)bltParam.dstAddr; static uint32_t offset = bltParam.dstH / 4; //ST_XLOGI("!!!!! draw line, ptr: %p, offset: %d, stride: %d, height: %d", _ptr, offset, _stride, bltParam.dstH); if (NULL != _ptr) { memset((void*)(_ptr + offset * _stride * 3 / 2), 0xFF, _stride * 20 * 3 / 2); } offset += 20; if (offset >= bltParam.dstH * 3 / 4) offset = bltParam.dstH / 4; } } } dg->unlock(); sg->unlock(); #else // ! USE_MDP status_t err = swConversionLocked(src, dst); if (NO_ERROR != err) return err; #endif // USE_MDP mAuxSlotConvert = false; mAuxSlotDirty = true; } return NO_ERROR; }
void EglManager::fence() { EGLSyncKHR fence = eglCreateSyncKHR(mEglDisplay, EGL_SYNC_FENCE_KHR, NULL); eglClientWaitSyncKHR(mEglDisplay, fence, EGL_SYNC_FLUSH_COMMANDS_BIT_KHR, EGL_FOREVER_KHR); eglDestroySyncKHR(mEglDisplay, fence); }