status_t GraphicBufferMapper::unlock(buffer_handle_t handle) { ATRACE_CALL(); status_t err; err = mAllocMod->unlock(mAllocMod, handle); ALOGW_IF(err, "unlock(...) failed %d (%s)", err, strerror(-err)); return err; }
int Surface::queueBuffer(android_native_buffer_t* buffer, int fenceFd) { ATRACE_CALL(); ALOGV("Surface::queueBuffer"); Mutex::Autolock lock(mMutex); int64_t timestamp; bool isAutoTimestamp = false; if (mTimestamp == NATIVE_WINDOW_TIMESTAMP_AUTO) { timestamp = systemTime(SYSTEM_TIME_MONOTONIC); isAutoTimestamp = true; ALOGV("Surface::queueBuffer making up timestamp: %.2f ms", timestamp / 1000000.f); } else { timestamp = mTimestamp; } int i = getSlotFromBufferLocked(buffer); if (i < 0) { return i; } // Make sure the crop rectangle is entirely inside the buffer. Rect crop; mCrop.intersect(Rect(buffer->width, buffer->height), &crop); #ifdef QCOM_BSP Rect dirtyRect = mDirtyRect; if(dirtyRect.isEmpty()) { int drWidth = mUserWidth ? mUserWidth : mDefaultWidth; int drHeight = mUserHeight ? mUserHeight : mDefaultHeight; dirtyRect = Rect(drWidth, drHeight); } #endif sp<Fence> fence(fenceFd >= 0 ? new Fence(fenceFd) : Fence::NO_FENCE); IGraphicBufferProducer::QueueBufferOutput output; IGraphicBufferProducer::QueueBufferInput input(timestamp, isAutoTimestamp, crop, #ifdef QCOM_BSP dirtyRect, #endif mScalingMode, mTransform, mSwapIntervalZero,fence); status_t err = mGraphicBufferProducer->queueBuffer(i, input, &output); if (err != OK) { ALOGE("queueBuffer: error queuing buffer to SurfaceTexture, %d", err); } uint32_t numPendingBuffers = 0; output.deflate(&mDefaultWidth, &mDefaultHeight, &mTransformHint, &numPendingBuffers); mConsumerRunningBehind = (numPendingBuffers >= 2); #ifdef QCOM_BSP mDirtyRect.clear(); #endif return err; }
status_t Camera3ZslStream::getInputBufferLocked(camera3_stream_buffer *buffer) { ATRACE_CALL(); status_t res; // TODO: potentially register from inputBufferLocked // this should be ok, registerBuffersLocked only calls getBuffer for now // register in output mode instead of input mode for ZSL streams. if (mState == STATE_IN_CONFIG || mState == STATE_IN_RECONFIG) { ALOGE("%s: Stream %d: Buffer registration for input streams" " not implemented (state %d)", __FUNCTION__, mId, mState); return INVALID_OPERATION; } if ((res = getBufferPreconditionCheckLocked()) != OK) { return res; } ANativeWindowBuffer* anb; int fenceFd; assert(mProducer != 0); sp<PinnedBufferItem> bufferItem; { List<sp<RingBufferConsumer::PinnedBufferItem> >::iterator it, end; it = mInputBufferQueue.begin(); end = mInputBufferQueue.end(); // Need to call enqueueInputBufferByTimestamp as a prerequisite if (it == end) { ALOGE("%s: Stream %d: No input buffer was queued", __FUNCTION__, mId); return INVALID_OPERATION; } bufferItem = *it; mInputBufferQueue.erase(it); } anb = bufferItem->getBufferItem().mGraphicBuffer->getNativeBuffer(); assert(anb != NULL); fenceFd = bufferItem->getBufferItem().mFence->dup(); /** * FenceFD now owned by HAL except in case of error, * in which case we reassign it to acquire_fence */ handoutBufferLocked(*buffer, &(anb->handle), /*acquireFence*/fenceFd, /*releaseFence*/-1, CAMERA3_BUFFER_STATUS_OK); mBuffersInFlight.push_back(bufferItem); return OK; }
int SurfaceTextureClient::cancelBuffer(android_native_buffer_t* buffer) { ATRACE_CALL(); ALOGV("SurfaceTextureClient::cancelBuffer"); Mutex::Autolock lock(mMutex); int i = getSlotFromBufferLocked(buffer); if (i < 0) { return i; } mSurfaceTexture->cancelBuffer(i); return OK; }
void BufferQueueConsumer::setConsumerName(const String8& name) { ATRACE_CALL(); BQ_LOGV("setConsumerName: '%s'", name.string()); Mutex::Autolock lock(mCore->mMutex); mCore->mConsumerName = name; mConsumerName = name; #ifdef MTK_AOSP_ENHANCEMENT // update dump info and prepare for drawing debug line mCore->debugger.onSetConsumerName(name); #endif }
status_t Camera3Stream::getInputBuffer(camera3_stream_buffer *buffer) { ATRACE_CALL(); Mutex::Autolock l(mLock); status_t res = getInputBufferLocked(buffer); if (res == OK) { fireBufferListenersLocked(*buffer, /*acquired*/true, /*output*/false); } return res; }
status_t GLConsumer::attachToContext(uint32_t tex) { ATRACE_CALL(); #ifndef MTK_DEFAULT_AOSP ST_LOGI("attachToContext"); #else ST_LOGV("attachToContext"); #endif Mutex::Autolock lock(mMutex); if (mAbandoned) { ST_LOGE("attachToContext: abandoned GLConsumer"); return NO_INIT; } if (mAttached) { ST_LOGE("attachToContext: GLConsumer is already attached to a " "context"); return INVALID_OPERATION; } EGLDisplay dpy = eglGetCurrentDisplay(); EGLContext ctx = eglGetCurrentContext(); if (dpy == EGL_NO_DISPLAY) { ST_LOGE("attachToContext: invalid current EGLDisplay"); return INVALID_OPERATION; } if (ctx == EGL_NO_CONTEXT) { ST_LOGE("attachToContext: invalid current EGLContext"); return INVALID_OPERATION; } // We need to bind the texture regardless of whether there's a current // buffer. glBindTexture(mTexTarget, GLuint(tex)); if (mCurrentTextureBuf != NULL) { // The EGLImageKHR that was associated with the slot was destroyed when // the GLConsumer was detached from the old context, so we need to // recreate it here. status_t err = bindUnslottedBufferLocked(dpy); if (err != NO_ERROR) { return err; } } mEglDisplay = dpy; mEglContext = ctx; mTexName = tex; mAttached = true; return OK; }
status_t Camera3Stream::tearDown() { ATRACE_CALL(); Mutex::Autolock l(mLock); status_t res = OK; // This function should be only called when the stream is configured. if (mState != STATE_CONFIGURED) { ALOGE("%s: Stream %d: Can't tear down stream if stream is not in " "CONFIGURED state %d", __FUNCTION__, mId, mState); return INVALID_OPERATION; } // If any buffers have been handed to the HAL, the stream cannot be torn down. if (getHandoutOutputBufferCountLocked() > 0) { ALOGE("%s: Stream %d: Can't tear down a stream that has outstanding buffers", __FUNCTION__, mId); return INVALID_OPERATION; } // Free buffers by disconnecting and then reconnecting to the buffer queue // Only unused buffers will be dropped immediately; buffers that have been filled // and are waiting to be acquired by the consumer and buffers that are currently // acquired will be freed once they are released by the consumer. res = disconnectLocked(); if (res != OK) { if (res == -ENOTCONN) { // queue has been disconnected, nothing left to do, so exit with success return OK; } ALOGE("%s: Stream %d: Unable to disconnect to tear down buffers: %s (%d)", __FUNCTION__, mId, strerror(-res), res); return res; } mState = STATE_IN_CONFIG; res = configureQueueLocked(); if (res != OK) { ALOGE("%s: Unable to configure stream %d queue: %s (%d)", __FUNCTION__, mId, strerror(-res), res); mState = STATE_ERROR; return res; } // Reset prepared state, since we've reconnected to the queue and can prepare again. mPrepared = false; mStreamUnpreparable = false; mState = STATE_CONFIGURED; return OK; }
status_t GraphicBufferMapper::registerBuffer(buffer_handle_t handle) { ATRACE_CALL(); status_t err; err = mAllocMod->registerBuffer(mAllocMod, handle); ALOGW_IF(err, "registerBuffer(%p) failed %d (%s)", handle, err, strerror(-err)); return err; }
int Surface::dequeueBuffer(android_native_buffer_t** buffer, int* fenceFd) { ATRACE_CALL(); ALOGV("Surface::dequeueBuffer"); Mutex::Autolock lock(mMutex); int buf = -1; int reqW = mReqWidth ? mReqWidth : mUserWidth; int reqH = mReqHeight ? mReqHeight : mUserHeight; sp<Fence> fence; status_t result = mGraphicBufferProducer->dequeueBuffer(&buf, &fence, mSwapIntervalZero, reqW, reqH, mReqFormat, mReqUsage); if (result < 0) { ALOGV("dequeueBuffer: IGraphicBufferProducer::dequeueBuffer(%d, %d, %d, %d)" "failed: %d", mReqWidth, mReqHeight, mReqFormat, mReqUsage, result); return result; } sp<GraphicBuffer>& gbuf(mSlots[buf].buffer); // this should never happen ALOGE_IF(fence == NULL, "Surface::dequeueBuffer: received null Fence! buf=%d", buf); if (result & IGraphicBufferProducer::RELEASE_ALL_BUFFERS) { freeAllBuffers(); } if ((result & IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION) || gbuf == 0) { result = mGraphicBufferProducer->requestBuffer(buf, &gbuf); if (result != NO_ERROR) { ALOGE("dequeueBuffer: IGraphicBufferProducer::requestBuffer failed: %d", result); return result; } else if (gbuf == 0) { ALOGE("dequeueBuffer: Buffer is null return"); return INVALID_OPERATION; } } if ((fence != NULL) && fence->isValid()) { *fenceFd = fence->dup(); if (*fenceFd == -1) { ALOGE("dequeueBuffer: error duping fence: %d", errno); // dup() should never fail; something is badly wrong. Soldier on // and hope for the best; the worst that should happen is some // visible corruption that lasts until the next frame. } } else { *fenceFd = -1; } *buffer = gbuf.get(); #ifdef SURFACE_SKIP_FIRST_DEQUEUE if (!mDequeuedOnce) mDequeuedOnce = true; #endif return OK; }
status_t Camera3Stream::returnBuffer(const camera3_stream_buffer &buffer, nsecs_t timestamp) { ATRACE_CALL(); Mutex::Autolock l(mLock); status_t res = returnBufferLocked(buffer, timestamp); if (res == OK) { fireBufferListenersLocked(buffer, /*acquired*/false, /*output*/true); } return res; }
status_t ZslProcessor::processNewZslBuffer() { ATRACE_CALL(); status_t res; sp<BufferItemConsumer> zslConsumer; { Mutex::Autolock l(mInputMutex); if (mZslConsumer == 0) return OK; zslConsumer = mZslConsumer; } ALOGVV("Trying to get next buffer"); BufferItemConsumer::BufferItem item; res = zslConsumer->acquireBuffer(&item, 0); if (res != OK) { if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) { ALOGE("%s: Camera %d: Error receiving ZSL image buffer: " "%s (%d)", __FUNCTION__, mId, strerror(-res), res); } else { ALOGVV(" No buffer"); } return res; } Mutex::Autolock l(mInputMutex); if (mState == LOCKED) { ALOGVV("In capture, discarding new ZSL buffers"); zslConsumer->releaseBuffer(item); return OK; } ALOGVV("Got ZSL buffer: head: %d, tail: %d", mZslQueueHead, mZslQueueTail); if ( (mZslQueueHead + 1) % kZslBufferDepth == mZslQueueTail) { ALOGVV("Releasing oldest buffer"); zslConsumer->releaseBuffer(mZslQueue[mZslQueueTail].buffer); mZslQueue.replaceAt(mZslQueueTail); mZslQueueTail = (mZslQueueTail + 1) % kZslBufferDepth; } ZslPair &queueHead = mZslQueue.editItemAt(mZslQueueHead); queueHead.buffer = item; queueHead.frame.release(); mZslQueueHead = (mZslQueueHead + 1) % kZslBufferDepth; ALOGVV(" Acquired buffer, timestamp %" PRId64, queueHead.buffer.mTimestamp); findMatchesLocked(); return OK; }
void ShadowTessellator::tessellateSpotShadow(bool isCasterOpaque, const Vector3* casterPolygon, int casterVertexCount, const Vector3& casterCentroid, const mat4& receiverTransform, const Vector3& lightCenter, int lightRadius, const Rect& casterBounds, const Rect& localClip, VertexBuffer& shadowVertexBuffer) { ATRACE_CALL(); Caches& caches = Caches::getInstance(); Vector3 adjustedLightCenter(lightCenter); if (CC_UNLIKELY(caches.propertyLightPosY > 0)) { adjustedLightCenter.y = - caches.propertyLightPosY; // negated since this shifts up } if (CC_UNLIKELY(caches.propertyLightPosZ > 0)) { adjustedLightCenter.z = caches.propertyLightPosZ; } #if DEBUG_SHADOW ALOGD("light center %f %f %f", adjustedLightCenter.x, adjustedLightCenter.y, adjustedLightCenter.z); #endif // light position (because it's in local space) needs to compensate for receiver transform // TODO: should apply to light orientation, not just position Matrix4 reverseReceiverTransform; reverseReceiverTransform.loadInverse(receiverTransform); reverseReceiverTransform.mapPoint3d(adjustedLightCenter); const int lightVertexCount = 8; if (CC_UNLIKELY(caches.propertyLightDiameter > 0)) { lightRadius = caches.propertyLightDiameter; } // Now light and caster are both in local space, we will check whether // the shadow is within the clip area. Rect lightRect = Rect(adjustedLightCenter.x - lightRadius, adjustedLightCenter.y - lightRadius, adjustedLightCenter.x + lightRadius, adjustedLightCenter.y + lightRadius); lightRect.unionWith(localClip); if (!lightRect.intersects(casterBounds)) { #if DEBUG_SHADOW ALOGD("Spot shadow is out of clip rect!"); #endif return; } SpotShadow::createSpotShadow(isCasterOpaque, adjustedLightCenter, lightRadius, casterPolygon, casterVertexCount, casterCentroid, shadowVertexBuffer); #if DEBUG_SHADOW if(shadowVertexBuffer.getVertexCount() <= 0) { ALOGD("Spot shadow generation failed %d", shadowVertexBuffer.getVertexCount()); } #endif }
void CanvasContext::trimMemory(RenderThread& thread, int level) { // No context means nothing to free if (!thread.eglManager().hasEglContext()) return; ATRACE_CALL(); if (level >= TRIM_MEMORY_COMPLETE) { Caches::getInstance().flush(Caches::kFlushMode_Full); thread.eglManager().destroy(); } else if (level >= TRIM_MEMORY_UI_HIDDEN) { Caches::getInstance().flush(Caches::kFlushMode_Moderate); } }
/** * Organizes the DisplayList hierarchy to prepare for background projection reordering. * * This should be called before a call to defer() or drawDisplayList() * * Each DisplayList that serves as a 3d root builds its list of composited children, * which are flagged to not draw in the standard draw loop. */ void RenderNode::computeOrdering() { ATRACE_CALL(); mProjectedNodes.clear(); // TODO: create temporary DDLOp and call computeOrderingImpl on top DisplayList so that // transform properties are applied correctly to top level children if (mDisplayList == nullptr) return; for (unsigned int i = 0; i < mDisplayList->getChildren().size(); i++) { renderNodeOp_t* childOp = mDisplayList->getChildren()[i]; childOp->renderNode->computeOrderingImpl(childOp, &mProjectedNodes, &mat4::identity()); } }
static int hwc_set(struct hwc_composer_device_1 *dev, size_t numDisplays, hwc_display_contents_1_t **displays) { ATRACE_CALL(); GET_HWC_RETURN_ERROR_IF_NULL(); if (!hwc->commit(numDisplays, displays)) { ELOGTRACE("failed to commit"); return -EINVAL; } return 0; }
static int hwc_setActiveConfig(hwc_composer_device_1_t *dev, int disp, int index) { ATRACE_CALL(); GET_HWC_RETURN_ERROR_IF_NULL(); bool ret = hwc->setActiveConfig(disp, index); if (ret == false) { WLOGTRACE("failed to set active config of disp %d", disp); return -EINVAL; } return 0; }
static int hwc_setCursorPositionAsync(hwc_composer_device_1_t *dev, int disp, int x, int y) { ATRACE_CALL(); GET_HWC_RETURN_ERROR_IF_NULL(); bool ret = hwc->setCursorPositionAsync(disp, x, y); if (ret == false) { WLOGTRACE("failed to set cursor position of disp %d", disp); return -EINVAL; } return 0; }
static int hwc_setPowerMode(hwc_composer_device_1_t *dev, int disp, int mode) { ATRACE_CALL(); GET_HWC_RETURN_ERROR_IF_NULL(); bool ret = hwc->setPowerMode(disp, mode); if (ret == false) { WLOGTRACE("failed to set power mode of disp %d", disp); return -EINVAL; } return 0; }
static int hwc_getActiveConfig(hwc_composer_device_1_t *dev, int disp) { ATRACE_CALL(); GET_HWC_RETURN_ERROR_IF_NULL(); int ret = hwc->getActiveConfig(disp); if (ret == -1) { WLOGTRACE("failed to get active config of disp %d", disp); return -EINVAL; } return ret; }
static int hwc_blank(hwc_composer_device_1_t *dev, int disp, int blank) { ATRACE_CALL(); GET_HWC_RETURN_ERROR_IF_NULL(); bool ret = hwc->blank(disp, blank); if (ret == false) { ELOGTRACE("failed to blank disp %d, blank %d", disp, blank); return -EINVAL; } return 0; }
static int hwc_compositionComplete(hwc_composer_device_1_t *dev, int disp) { ATRACE_CALL(); GET_HWC_RETURN_ERROR_IF_NULL(); bool ret = hwc->compositionComplete(disp); if (ret == false) { ELOGTRACE("failed for disp %d", disp); return -EINVAL; } return 0; }
status_t Camera3Stream::startPrepare(int maxCount) { ATRACE_CALL(); Mutex::Autolock l(mLock); if (maxCount < 0) { ALOGE("%s: Stream %d: Can't prepare stream if max buffer count (%d) is < 0", __FUNCTION__, mId, maxCount); return BAD_VALUE; } // This function should be only called when the stream is configured already. if (mState != STATE_CONFIGURED) { ALOGE("%s: Stream %d: Can't prepare stream if stream is not in CONFIGURED " "state %d", __FUNCTION__, mId, mState); return INVALID_OPERATION; } // This function can't be called if the stream has already received filled // buffers if (mStreamUnpreparable) { ALOGE("%s: Stream %d: Can't prepare stream that's already in use", __FUNCTION__, mId); return INVALID_OPERATION; } if (getHandoutOutputBufferCountLocked() > 0) { ALOGE("%s: Stream %d: Can't prepare stream that has outstanding buffers", __FUNCTION__, mId); return INVALID_OPERATION; } size_t pipelineMax = getBufferCountLocked(); size_t clampedCount = (pipelineMax < static_cast<size_t>(maxCount)) ? pipelineMax : static_cast<size_t>(maxCount); size_t bufferCount = (maxCount == Camera3StreamInterface::ALLOCATE_PIPELINE_MAX) ? pipelineMax : clampedCount; mPrepared = bufferCount <= mLastMaxCount; if (mPrepared) return OK; mLastMaxCount = bufferCount; mPreparedBuffers.insertAt(camera3_stream_buffer_t(), /*index*/0, bufferCount); mPreparedBufferIdx = 0; mState = STATE_PREPARING; return NOT_ENOUGH_DATA; }
status_t GraphicBufferAllocator::alloc(uint32_t w, uint32_t h, PixelFormat format, int usage, buffer_handle_t* handle, int32_t* stride) { ATRACE_CALL(); // make sure to not allocate a N x 0 or 0 x N buffer, since this is // allowed from an API stand-point allocate a 1x1 buffer instead. if (!w || !h) w = h = 1; // we have a h/w allocator and h/w buffer is requested status_t err; // If too many async frees are queued up then wait for some of them to // complete before attempting to allocate more memory. This is exercised // by the android.opengl.cts.GLSurfaceViewTest CTS test. BufferLiberatorThread::maybeWaitForLiberation(); err = mAllocDev->alloc(mAllocDev, w, h, format, usage, handle, stride); if (err != NO_ERROR) { ALOGW("WOW! gralloc alloc failed, waiting for pending frees!"); BufferLiberatorThread::waitForLiberation(); err = mAllocDev->alloc(mAllocDev, w, h, format, usage, handle, stride); } ALOGW_IF(err, "alloc(%u, %u, %d, %08x, ...) failed %d (%s)", w, h, format, usage, err, strerror(-err)); if (err == NO_ERROR) { Mutex::Autolock _l(sLock); KeyedVector<buffer_handle_t, alloc_rec_t>& list(sAllocList); int bpp = bytesPerPixel(format); if (bpp < 0) { // probably a HAL custom format. in any case, we don't know // what its pixel size is. bpp = 0; } alloc_rec_t rec; rec.w = w; rec.h = h; rec.s = *stride; rec.format = format; rec.usage = usage; rec.size = h * stride[0] * bpp; // [MTK] {{{ rec.pid = IPCThreadState::self()->getCallingPid(); // [MTK] }}} list.add(*handle, rec); } return err; }
int SurfaceTextureClient::cancelBuffer(android_native_buffer_t* buffer, int fenceFd) { ATRACE_CALL(); ALOGV("SurfaceTextureClient::cancelBuffer"); Mutex::Autolock lock(mMutex); int i = getSlotFromBufferLocked(buffer); if (i < 0) { return i; } sp<Fence> fence(fenceFd >= 0 ? new Fence(fenceFd) : NULL); mSurfaceTexture->cancelBuffer(i, fence); return OK; }
status_t Camera3Stream::disconnect() { ATRACE_CALL(); Mutex::Autolock l(mLock); ALOGV("%s: Stream %d: Disconnecting...", __FUNCTION__, mId); status_t res = disconnectLocked(); if (res == -ENOTCONN) { // "Already disconnected" -- not an error return OK; } else { return res; } }
sp<Fence> Fence::merge(const String8& name, const sp<Fence>& f1, const sp<Fence>& f2) { ATRACE_CALL(); int result = sync_merge(name.string(), f1->mFenceFd, f2->mFenceFd); if (result == -1) { status_t err = -errno; ALOGE("merge: sync_merge(\"%s\", %d, %d) returned an error: %s (%d)", name.string(), f1->mFenceFd, f2->mFenceFd, strerror(-err), err); return NO_FENCE; } return sp<Fence>(new Fence(result)); }
int Surface::cancelBuffer(android_native_buffer_t* buffer, int fenceFd) { ATRACE_CALL(); ALOGV("Surface::cancelBuffer"); Mutex::Autolock lock(mMutex); int i = getSlotFromBufferLocked(buffer); if (i < 0) { return i; } sp<Fence> fence(fenceFd >= 0 ? new Fence(fenceFd) : Fence::NO_FENCE); mGraphicBufferProducer->cancelBuffer(i, fence); return OK; }
status_t StreamingProcessor::setPreviewWindow(sp<ANativeWindow> window) { ATRACE_CALL(); status_t res; res = deletePreviewStream(); if (res != OK) return res; Mutex::Autolock m(mMutex); mPreviewWindow = window; return OK; }
status_t GraphicBufferMapper::lock(buffer_handle_t handle, int usage, const Rect& bounds, void** vaddr) { ATRACE_CALL(); status_t err; err = mAllocMod->lock(mAllocMod, handle, usage, bounds.left, bounds.top, bounds.width(), bounds.height(), vaddr); ALOGW_IF(err, "lock(...) failed %d (%s)", err, strerror(-err)); return err; }