/* * If the concurrent GC is running, wait for it to finish. The caller * must hold the heap lock. * * Note: the second dvmChangeStatus() could stall if we were in RUNNING * on entry, and some other thread has asked us to suspend. In that * case we will be suspended with the heap lock held, which can lead to * deadlock if the other thread tries to do something with the managed heap. * For example, the debugger might suspend us and then execute a method that * allocates memory. We can avoid this situation by releasing the lock * before self-suspending. (The developer can work around this specific * situation by single-stepping the VM. Alternatively, we could disable * concurrent GC when the debugger is attached, but that might change * behavior more than is desirable.) * * This should not be a problem in production, because any GC-related * activity will grab the lock before issuing a suspend-all. (We may briefly * suspend when the GC thread calls dvmUnlockHeap before dvmResumeAllThreads, * but there's no risk of deadlock.) */ bool dvmWaitForConcurrentGcToComplete() { ATRACE_BEGIN("GC: Wait For Concurrent"); bool waited = gDvm.gcHeap->gcRunning; Thread *self = dvmThreadSelf(); assert(self != NULL); u4 start = dvmGetRelativeTimeMsec(); #ifdef FASTIVA // Ensure no Java-object reference is used in local-stack. // and save Java-object reference maybe in registers. FASTIVA_SUSPEND_STACK_unsafe(self); ThreadStatus oldStatus = dvmChangeStatus(self, THREAD_VMWAIT); while (gDvm.gcHeap->gcRunning) { dvmWaitCond(&gDvm.gcHeapCond, &gDvm.gcHeapLock); } dvmChangeStatus(self, oldStatus); FASTIVA_RESUME_STACK_unsafe(self); #else while (gDvm.gcHeap->gcRunning) { ThreadStatus oldStatus = dvmChangeStatus(self, THREAD_VMWAIT); dvmWaitCond(&gDvm.gcHeapCond, &gDvm.gcHeapLock); dvmChangeStatus(self, oldStatus); } #endif u4 end = dvmGetRelativeTimeMsec(); if (end - start > 0) { ALOGD("WAIT_FOR_CONCURRENT_GC blocked %ums", end - start); } ATRACE_END(); return waited; }
void LayerBuilder::replayBakedOpsImpl(void* arg, BakedOpReceiver* unmergedReceivers, MergedOpReceiver* mergedReceivers) const { if (renderNode) { ATRACE_FORMAT_BEGIN("Issue HW Layer DisplayList %s %ux%u", renderNode->getName(), width, height); } else { ATRACE_BEGIN("flush drawing commands"); } for (const BatchBase* batch : mBatches) { size_t size = batch->getOps().size(); if (size > 1 && batch->isMerging()) { int opId = batch->getOps()[0]->op->opId; const MergingOpBatch* mergingBatch = static_cast<const MergingOpBatch*>(batch); MergedBakedOpList data = { batch->getOps().data(), size, mergingBatch->getClipSideFlags(), mergingBatch->getClipRect() }; mergedReceivers[opId](arg, data); } else { for (const BakedOpState* op : batch->getOps()) { unmergedReceivers[op->op->opId](arg, *op); } } } ATRACE_END(); }
status_t Camera3Stream::registerBuffersLocked(camera3_device *hal3Device) { ATRACE_CALL(); status_t res; size_t bufferCount = getBufferCountLocked(); Vector<buffer_handle_t*> buffers; buffers.insertAt(NULL, 0, bufferCount); camera3_stream_buffer_set bufferSet = camera3_stream_buffer_set(); bufferSet.stream = this; bufferSet.num_buffers = bufferCount; bufferSet.buffers = buffers.editArray(); Vector<camera3_stream_buffer_t> streamBuffers; streamBuffers.insertAt(camera3_stream_buffer_t(), 0, bufferCount); // Register all buffers with the HAL. This means getting all the buffers // from the stream, providing them to the HAL with the // register_stream_buffers() method, and then returning them back to the // stream in the error state, since they won't have valid data. // // Only registered buffers can be sent to the HAL. uint32_t bufferIdx = 0; for (; bufferIdx < bufferCount; bufferIdx++) { res = getBufferLocked( &streamBuffers.editItemAt(bufferIdx) ); if (res != OK) { ALOGE("%s: Unable to get buffer %d for registration with HAL", __FUNCTION__, bufferIdx); // Skip registering, go straight to cleanup break; } sp<Fence> fence = new Fence(streamBuffers[bufferIdx].acquire_fence); fence->waitForever("Camera3Stream::registerBuffers"); buffers.editItemAt(bufferIdx) = streamBuffers[bufferIdx].buffer; } if (bufferIdx == bufferCount) { // Got all buffers, register with HAL ALOGV("%s: Registering %zu buffers with camera HAL", __FUNCTION__, bufferCount); ATRACE_BEGIN("camera3->register_stream_buffers"); res = hal3Device->ops->register_stream_buffers(hal3Device, &bufferSet); ATRACE_END(); } // Return all valid buffers to stream, in ERROR state to indicate // they weren't filled. for (size_t i = 0; i < bufferIdx; i++) { streamBuffers.editItemAt(i).release_fence = -1; streamBuffers.editItemAt(i).status = CAMERA3_BUFFER_STATUS_ERROR; returnBufferLocked(streamBuffers[i], 0); } return res; }
void FastCapture::onWork() { const FastCaptureState * const current = (const FastCaptureState *) mCurrent; FastCaptureDumpState * const dumpState = (FastCaptureDumpState *) mDumpState; const FastCaptureState::Command command = mCommand; const size_t frameCount = current->mFrameCount; if ((command & FastCaptureState::READ) /*&& isWarm*/) { ALOG_ASSERT(mInputSource != NULL); ALOG_ASSERT(mReadBuffer != NULL); dumpState->mReadSequence++; ATRACE_BEGIN("read"); ssize_t framesRead = mInputSource->read(mReadBuffer, frameCount, AudioBufferProvider::kInvalidPTS); ATRACE_END(); dumpState->mReadSequence++; if (framesRead >= 0) { LOG_ALWAYS_FATAL_IF((size_t) framesRead > frameCount); mTotalNativeFramesRead += framesRead; dumpState->mFramesRead = mTotalNativeFramesRead; mReadBufferState = framesRead; } else { dumpState->mReadErrors++; mReadBufferState = 0; } // FIXME rename to attemptedIO mAttemptedWrite = true; } if (command & FastCaptureState::WRITE) { ALOG_ASSERT(mPipeSink != NULL); ALOG_ASSERT(mReadBuffer != NULL); if (mReadBufferState < 0) { unsigned channelCount = Format_channelCount(mFormat); memset(mReadBuffer, 0, frameCount * Format_frameSize(mFormat)); mReadBufferState = frameCount; } if (mReadBufferState > 0) { ssize_t framesWritten = mPipeSink->write(mReadBuffer, mReadBufferState); // FIXME This supports at most one fast capture client. // To handle multiple clients this could be converted to an array, // or with a lot more work the control block could be shared by all clients. audio_track_cblk_t* cblk = current->mCblk; if (cblk != NULL && framesWritten > 0) { int32_t rear = cblk->u.mStreaming.mRear; android_atomic_release_store(framesWritten + rear, &cblk->u.mStreaming.mRear); cblk->mServer += framesWritten; int32_t old = android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex); if (!(old & CBLK_FUTEX_WAKE)) { // client is never in server process, so don't use FUTEX_WAKE_PRIVATE (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, 1); } } } } }
void setBrightnessValueBg_notifyRenderTime(float time) { static void *handle; void *func; int boost_level = LEVEL_BOOST_NOP, first_frame = 0; static nsecs_t mPreviousTime = 0; char buff[64]; nsecs_t now = systemTime(CLOCK_MONOTONIC); //init(); setBrightnessValueBg_notifyFrameUpdate(1); //ALOGI("setBrightnessValueBg_notifyRenderTime: time:%f", time); #if 0 if(handle == NULL) { handle = dlopen("libperfservice.so", RTLD_NOW); func = dlsym(handle, "perfCalcBoostLevel"); perfCalcBoostLevel = reinterpret_cast<calc_boost_level>(func); if (perfCalcBoostLevel == NULL) { ALOGE("perfCalcBoostLevel init fail!"); } } if(mPreviousTime == 0 || (now - mPreviousTime) > RENDER_THREAD_CHECK_DURATION) { // exceed RENDER_THREAD_CHECK_DURATION => first frame first_frame = 1; } mPreviousTime = now; if(first_frame) { //ALOGI("setBrightnessValueBg_notifyRenderTime: first_frame"); if(perfCalcBoostLevel) perfCalcBoostLevel(0); return; } if(perfCalcBoostLevel) { boost_level = perfCalcBoostLevel(time); } // init value //sprintf(buff, "notifyRenderTime:%.2f", time); if(boost_level == LEVEL_BOOST_NOP) return; sprintf(buff, "levelBoost:%d", boost_level); ATRACE_BEGIN(buff); #if defined(MTK_LEVEL_BOOST_SUPPORT) setBrightnessValueBg_levelBoost(boost_level); #endif ATRACE_END(); #endif }
Program::Program(const ProgramDescription& description, const char* vertex, const char* fragment) { mInitialized = false; mHasColorUniform = false; mHasSampler = false; mUse = false; // No need to cache compiled shaders, rely instead on Android's // persistent shaders cache mVertexShader = buildShader(vertex, GL_VERTEX_SHADER); if (mVertexShader) { mFragmentShader = buildShader(fragment, GL_FRAGMENT_SHADER); if (mFragmentShader) { mProgramId = glCreateProgram(); glAttachShader(mProgramId, mVertexShader); glAttachShader(mProgramId, mFragmentShader); bindAttrib("position", kBindingPosition); if (description.hasTexture || description.hasExternalTexture) { texCoords = bindAttrib("texCoords", kBindingTexCoords); } else { texCoords = -1; } ATRACE_BEGIN("linkProgram"); glLinkProgram(mProgramId); ATRACE_END(); GLint status; glGetProgramiv(mProgramId, GL_LINK_STATUS, &status); if (status != GL_TRUE) { GLint infoLen = 0; glGetProgramiv(mProgramId, GL_INFO_LOG_LENGTH, &infoLen); if (infoLen > 1) { GLchar log[infoLen]; glGetProgramInfoLog(mProgramId, infoLen, nullptr, &log[0]); ALOGE("%s", log); } LOG_ALWAYS_FATAL("Error while linking shaders"); } else { mInitialized = true; } } else { glDeleteShader(mVertexShader); } } if (mInitialized) { transform = addUniform("transform"); projection = addUniform("projection"); } }
int Camera::processCaptureRequest(camera3_capture_request_t *request) { ALOGV("%s:%d: request=%p", __func__, mId, request); ATRACE_BEGIN(__func__); if (request == NULL) { ALOGE("%s:%d: NULL request recieved", __func__, mId); ATRACE_END(); return -EINVAL; } // TODO: verify request; submit request to hardware ATRACE_END(); return 0; }
int Camera::close() { ALOGI("%s:%d: Closing camera device", __func__, mId); ATRACE_BEGIN(__func__); pthread_mutex_lock(&mMutex); if (!mBusy) { pthread_mutex_unlock(&mMutex); ATRACE_END(); ALOGE("%s:%d: Error! Camera device not open", __func__, mId); return -EINVAL; } // TODO: close camera dev nodes, etc mBusy = false; pthread_mutex_unlock(&mMutex); ATRACE_END(); return 0; }
int Camera::open(const hw_module_t *module, hw_device_t **device) { ALOGI("%s:%d: Opening camera device", __func__, mId); ATRACE_BEGIN(__func__); pthread_mutex_lock(&mMutex); if (mBusy) { pthread_mutex_unlock(&mMutex); ATRACE_END(); ALOGE("%s:%d: Error! Camera device already opened", __func__, mId); return -EBUSY; } // TODO: open camera dev nodes, etc mBusy = true; mDevice.common.module = const_cast<hw_module_t*>(module); *device = &mDevice.common; pthread_mutex_unlock(&mMutex); ATRACE_END(); return 0; }
void MediaPuller::onMessageReceived(const sp<AMessage> &msg) { switch (msg->what()) { case kWhatStart: { status_t err; ALOGI("start mIsAudio=%d",mIsAudio); if (mIsAudio) { // This atrocity causes AudioSource to deliver absolute // systemTime() based timestamps (off by 1 us). #ifdef MTB_SUPPORT ATRACE_BEGIN("AudioPuller, kWhatStart"); #endif sp<MetaData> params = new MetaData; params->setInt64(kKeyTime, 1ll); err = mSource->start(params.get()); } else { #ifdef MTB_SUPPORT ATRACE_BEGIN("VideoPuller, kWhatStart"); #endif err = mSource->start(); if (err != OK) { ALOGE("source failed to start w/ err %d", err); } } if (err == OK) { ALOGI("start done, start to schedulePull data"); schedulePull(); } sp<AMessage> response = new AMessage; response->setInt32("err", err); uint32_t replyID; CHECK(msg->senderAwaitsResponse(&replyID)); response->postReply(replyID); #ifdef MTB_SUPPORT ATRACE_END(); #endif break; } case kWhatStop: { sp<MetaData> meta = mSource->getFormat(); const char *tmp; CHECK(meta->findCString(kKeyMIMEType, &tmp)); AString mime = tmp; ALOGI("MediaPuller(%s) stopping.", mime.c_str()); mSource->stop(); ALOGI("MediaPuller(%s) stopped.", mime.c_str()); ++mPullGeneration; sp<AMessage> notify; CHECK(msg->findMessage("notify", ¬ify)); notify->post(); break; } case kWhatPull: { int32_t generation; #ifdef MTB_SUPPORT if (mIsAudio) { ATRACE_BEGIN("AudioPuller, kWhatPull"); } else { ATRACE_BEGIN("VideoPuller, kWhatPull"); } #endif CHECK(msg->findInt32("generation", &generation)); if (generation != mPullGeneration) { break; } MediaBuffer *mbuf; status_t err = mSource->read(&mbuf); if (mPaused) { if (err == OK) { mbuf->release(); mbuf = NULL; } schedulePull(); break; } if (err != OK) { if (err == ERROR_END_OF_STREAM) { ALOGI("stream ended."); } else { ALOGE("error %d reading stream.", err); } ALOGI("err=%d.post kWhatEOS",err); sp<AMessage> notify = mNotify->dup(); notify->setInt32("what", kWhatEOS); notify->post(); } else { int64_t timeUs; CHECK(mbuf->meta_data()->findInt64(kKeyTime, &timeUs)); #ifdef MTB_SUPPORT if (mIsAudio) { ATRACE_ONESHOT(ATRACE_ONESHOT_ADATA, "AudioPuller, TS: %lld ms", timeUs/1000); } else { ATRACE_ONESHOT(ATRACE_ONESHOT_VDATA, "VideoPuller, TS: %lld ms", timeUs/1000); } #endif sp<ABuffer> accessUnit = new ABuffer(mbuf->range_length()); memcpy(accessUnit->data(), (const uint8_t *)mbuf->data() + mbuf->range_offset(), mbuf->range_length()); accessUnit->meta()->setInt64("timeUs", timeUs); #ifndef ANDROID_DEFAULT_CODE sp<WfdDebugInfo> debugInfo= defaultWfdDebugInfo(); int64_t MpMs = ALooper::GetNowUs(); debugInfo->addTimeInfoByKey(!mIsAudio , timeUs, "MpIn", MpMs/1000); int64_t NowMpDelta =0; NowMpDelta = (MpMs - timeUs)/1000; if(mFirstDeltaMs == -1){ mFirstDeltaMs = NowMpDelta; ALOGE("[check Input 1th][%s] ,timestamp=%lld ms,[ts and now delta change]=%lld ms", mIsAudio?"audio":"video",timeUs/1000,NowMpDelta); } NowMpDelta = NowMpDelta - mFirstDeltaMs; if(NowMpDelta > 500ll || NowMpDelta < -500ll ){ ALOGE("[check Input][%s] ,timestamp=%lld ms,[ts and now delta change]=%lld ms", mIsAudio?"audio":"video",timeUs/1000,NowMpDelta); } #endif if (mIsAudio) { mbuf->release(); mbuf = NULL; ALOGI("[WFDP][%s] ,timestamp=%lld ms",mIsAudio?"audio":"video",timeUs/1000); } else { // video encoder will release MediaBuffer when done // with underlying data. accessUnit->meta()->setPointer("mediaBuffer", mbuf); ALOGI("[WFDP][%s] ,mediaBuffer=%p,timestamp=%lld ms",mIsAudio?"audio":"video",mbuf,timeUs/1000); } sp<AMessage> notify = mNotify->dup(); notify->setInt32("what", kWhatAccessUnit); notify->setBuffer("accessUnit", accessUnit); notify->post(); if (mbuf != NULL) { ALOGV("posted mbuf %p", mbuf); } schedulePull(); #ifdef MTB_SUPPORT ATRACE_END(); #endif } break; } case kWhatPause: { mPaused = true; break; } case kWhatResume: { mPaused = false; break; } default: TRESPASS(); } }
status_t Camera3Stream::registerBuffersLocked(camera3_device *hal3Device) { ATRACE_CALL(); /** * >= CAMERA_DEVICE_API_VERSION_3_2: * * camera3_device_t->ops->register_stream_buffers() is not called and must * be NULL. */ if (hal3Device->common.version >= CAMERA_DEVICE_API_VERSION_3_2) { ALOGV("%s: register_stream_buffers unused as of HAL3.2", __FUNCTION__); if (hal3Device->ops->register_stream_buffers != NULL) { ALOGE("%s: register_stream_buffers is deprecated in HAL3.2; " "must be set to NULL in camera3_device::ops", __FUNCTION__); return INVALID_OPERATION; } return OK; } ALOGV("%s: register_stream_buffers using deprecated code path", __FUNCTION__); status_t res; size_t bufferCount = getBufferCountLocked(); Vector<buffer_handle_t*> buffers; buffers.insertAt(/*prototype_item*/NULL, /*index*/0, bufferCount); camera3_stream_buffer_set bufferSet = camera3_stream_buffer_set(); bufferSet.stream = this; bufferSet.num_buffers = bufferCount; bufferSet.buffers = buffers.editArray(); Vector<camera3_stream_buffer_t> streamBuffers; streamBuffers.insertAt(camera3_stream_buffer_t(), /*index*/0, bufferCount); // Register all buffers with the HAL. This means getting all the buffers // from the stream, providing them to the HAL with the // register_stream_buffers() method, and then returning them back to the // stream in the error state, since they won't have valid data. // // Only registered buffers can be sent to the HAL. uint32_t bufferIdx = 0; for (; bufferIdx < bufferCount; bufferIdx++) { res = getBufferLocked( &streamBuffers.editItemAt(bufferIdx) ); if (res != OK) { ALOGE("%s: Unable to get buffer %d for registration with HAL", __FUNCTION__, bufferIdx); // Skip registering, go straight to cleanup break; } sp<Fence> fence = new Fence(streamBuffers[bufferIdx].acquire_fence); fence->waitForever("Camera3Stream::registerBuffers"); buffers.editItemAt(bufferIdx) = streamBuffers[bufferIdx].buffer; } if (bufferIdx == bufferCount) { // Got all buffers, register with HAL ALOGV("%s: Registering %zu buffers with camera HAL", __FUNCTION__, bufferCount); ATRACE_BEGIN("camera3->register_stream_buffers"); res = hal3Device->ops->register_stream_buffers(hal3Device, &bufferSet); ATRACE_END(); } // Return all valid buffers to stream, in ERROR state to indicate // they weren't filled. for (size_t i = 0; i < bufferIdx; i++) { streamBuffers.editItemAt(i).release_fence = -1; streamBuffers.editItemAt(i).status = CAMERA3_BUFFER_STATUS_ERROR; returnBufferLocked(streamBuffers[i], 0); } mPrepared = true; return res; }
Return<void> CameraDeviceSession::configureStreams_3_3( const StreamConfiguration& requestedConfiguration, ICameraDeviceSession::configureStreams_3_3_cb _hidl_cb) { Status status = initStatus(); HalStreamConfiguration outStreams; // hold the inflight lock for entire configureStreams scope since there must not be any // inflight request/results during stream configuration. Mutex::Autolock _l(mInflightLock); if (!mInflightBuffers.empty()) { ALOGE("%s: trying to configureStreams while there are still %zu inflight buffers!", __FUNCTION__, mInflightBuffers.size()); _hidl_cb(Status::INTERNAL_ERROR, outStreams); return Void(); } if (!mInflightAETriggerOverrides.empty()) { ALOGE("%s: trying to configureStreams while there are still %zu inflight" " trigger overrides!", __FUNCTION__, mInflightAETriggerOverrides.size()); _hidl_cb(Status::INTERNAL_ERROR, outStreams); return Void(); } if (!mInflightRawBoostPresent.empty()) { ALOGE("%s: trying to configureStreams while there are still %zu inflight" " boost overrides!", __FUNCTION__, mInflightRawBoostPresent.size()); _hidl_cb(Status::INTERNAL_ERROR, outStreams); return Void(); } if (status != Status::OK) { _hidl_cb(status, outStreams); return Void(); } camera3_stream_configuration_t stream_list; hidl_vec<camera3_stream_t*> streams; stream_list.operation_mode = (uint32_t) requestedConfiguration.operationMode; stream_list.num_streams = requestedConfiguration.streams.size(); streams.resize(stream_list.num_streams); stream_list.streams = streams.data(); for (uint32_t i = 0; i < stream_list.num_streams; i++) { int id = requestedConfiguration.streams[i].id; if (mStreamMap.count(id) == 0) { Camera3Stream stream; V3_2::implementation::convertFromHidl(requestedConfiguration.streams[i], &stream); mStreamMap[id] = stream; mStreamMap[id].data_space = mapToLegacyDataspace( mStreamMap[id].data_space); mCirculatingBuffers.emplace(stream.mId, CirculatingBuffers{}); } else { // width/height/format must not change, but usage/rotation might need to change if (mStreamMap[id].stream_type != (int) requestedConfiguration.streams[i].streamType || mStreamMap[id].width != requestedConfiguration.streams[i].width || mStreamMap[id].height != requestedConfiguration.streams[i].height || mStreamMap[id].format != (int) requestedConfiguration.streams[i].format || mStreamMap[id].data_space != mapToLegacyDataspace( static_cast<android_dataspace_t> ( requestedConfiguration.streams[i].dataSpace))) { ALOGE("%s: stream %d configuration changed!", __FUNCTION__, id); _hidl_cb(Status::INTERNAL_ERROR, outStreams); return Void(); } mStreamMap[id].rotation = (int) requestedConfiguration.streams[i].rotation; mStreamMap[id].usage = (uint32_t) requestedConfiguration.streams[i].usage; } streams[i] = &mStreamMap[id]; } ATRACE_BEGIN("camera3->configure_streams"); status_t ret = mDevice->ops->configure_streams(mDevice, &stream_list); ATRACE_END(); // In case Hal returns error most likely it was not able to release // the corresponding resources of the deleted streams. if (ret == OK) { // delete unused streams, note we do this after adding new streams to ensure new stream // will not have the same address as deleted stream, and HAL has a chance to reference // the to be deleted stream in configure_streams call for(auto it = mStreamMap.begin(); it != mStreamMap.end();) { int id = it->first; bool found = false; for (const auto& stream : requestedConfiguration.streams) { if (id == stream.id) { found = true; break; } } if (!found) { // Unmap all buffers of deleted stream // in case the configuration call succeeds and HAL // is able to release the corresponding resources too. cleanupBuffersLocked(id); it = mStreamMap.erase(it); } else { ++it; } } // Track video streams mVideoStreamIds.clear(); for (const auto& stream : requestedConfiguration.streams) { if (stream.streamType == V3_2::StreamType::OUTPUT && stream.usage & graphics::common::V1_0::BufferUsage::VIDEO_ENCODER) { mVideoStreamIds.push_back(stream.id); } } mResultBatcher.setBatchedStreams(mVideoStreamIds); } if (ret == -EINVAL) { status = Status::ILLEGAL_ARGUMENT; } else if (ret != OK) { status = Status::INTERNAL_ERROR; } else { convertToHidl(stream_list, &outStreams); mFirstRequest = true; } _hidl_cb(status, outStreams); return Void(); }
/* * Initiate garbage collection. * * NOTES: * - If we don't hold gDvm.threadListLock, it's possible for a thread to * be added to the thread list while we work. The thread should NOT * start executing, so this is only interesting when we start chasing * thread stacks. (Before we do so, grab the lock.) * * We are not allowed to GC when the debugger has suspended the VM, which * is awkward because debugger requests can cause allocations. The easiest * way to enforce this is to refuse to GC on an allocation made by the * JDWP thread -- we have to expand the heap or fail. */ void dvmCollectGarbageInternal(const GcSpec* spec) { GcHeap *gcHeap = gDvm.gcHeap; u4 gcEnd = 0; u4 rootStart = 0 , rootEnd = 0; u4 dirtyStart = 0, dirtyEnd = 0; size_t numObjectsFreed, numBytesFreed; size_t currAllocated, currFootprint; size_t percentFree; int oldThreadPriority = INT_MAX; /* The heap lock must be held. */ if (gcHeap->gcRunning) { LOGW_HEAP("Attempted recursive GC"); return; } #ifdef FASTIVA // Ensure no Java-object reference is used in local-stack. // and save Java-object reference maybe in registers. Thread* self = dvmThreadSelf(); #ifdef _DEBUG gc_start_threadId = dvmGetSysThreadId(); gc_start_thread = pthread_self(); //ALOGE("##### GC_START %i", dvmGetSysThreadId()); #endif void* fastiva_old_sp; ThreadStatus oldStatus = THREAD_RUNNING; if (self != NULL) { oldStatus = self->status; jmp_buf* fastiva_buf$ = (jmp_buf*)alloca(sizeof(jmp_buf)); fastiva_old_sp = fastiva_lockStack(self, fastiva_buf$); self->status = THREAD_NATIVE; } #endif // Trace the beginning of the top-level GC. if (spec == GC_FOR_MALLOC) { ATRACE_BEGIN("GC (alloc)"); } else if (spec == GC_CONCURRENT) { ATRACE_BEGIN("GC (concurrent)"); } else if (spec == GC_EXPLICIT) { ATRACE_BEGIN("GC (explicit)"); } else if (spec == GC_BEFORE_OOM) { ATRACE_BEGIN("GC (before OOM)"); } else { ATRACE_BEGIN("GC (unknown)"); } gcHeap->gcRunning = true; rootStart = dvmGetRelativeTimeMsec(); ATRACE_BEGIN("GC: Threads Suspended"); // Suspend A dvmSuspendAllThreads(SUSPEND_FOR_GC); /* * If we are not marking concurrently raise the priority of the * thread performing the garbage collection. */ if (!spec->isConcurrent) { oldThreadPriority = os_raiseThreadPriority(); } if (gDvm.preVerify) { LOGV_HEAP("Verifying roots and heap before GC"); verifyRootsAndHeap(); } dvmMethodTraceGCBegin(); /* Set up the marking context. */ if (!dvmHeapBeginMarkStep(spec->isPartial)) { ATRACE_END(); // Suspend A ATRACE_END(); // Top-level GC LOGE_HEAP("dvmHeapBeginMarkStep failed; aborting"); dvmAbort(); } /* Mark the set of objects that are strongly reachable from the roots. */ LOGD_HEAP("Marking..."); dvmHeapMarkRootSet(); /* dvmHeapScanMarkedObjects() will build the lists of known * instances of the Reference classes. */ assert(gcHeap->softReferences == NULL); assert(gcHeap->weakReferences == NULL); assert(gcHeap->finalizerReferences == NULL); assert(gcHeap->phantomReferences == NULL); assert(gcHeap->clearedReferences == NULL); if (spec->isConcurrent) { /* * Resume threads while tracing from the roots. We unlock the * heap to allow mutator threads to allocate from free space. */ dvmClearCardTable(); dvmUnlockHeap(); dvmResumeAllThreads(SUSPEND_FOR_GC); ATRACE_END(); // Suspend A rootEnd = dvmGetRelativeTimeMsec(); } /* Recursively mark any objects that marked objects point to strongly. * If we're not collecting soft references, soft-reachable * objects will also be marked. */ LOGD_HEAP("Recursing..."); dvmHeapScanMarkedObjects(); if (spec->isConcurrent) { /* * Re-acquire the heap lock and perform the final thread * suspension. */ dirtyStart = dvmGetRelativeTimeMsec(); dvmLockHeap(); ATRACE_BEGIN("GC: Threads Suspended"); // Suspend B dvmSuspendAllThreads(SUSPEND_FOR_GC); /* * As no barrier intercepts root updates, we conservatively * assume all roots may be gray and re-mark them. */ #ifndef FASTIVA_PRELOAD_STATIC_INSTANCE dvmHeapReMarkRootSet(); #endif /* * With the exception of reference objects and weak interned * strings, all gray objects should now be on dirty cards. */ if (gDvm.verifyCardTable) { dvmVerifyCardTable(); } /* * Recursively mark gray objects pointed to by the roots or by * heap objects dirtied during the concurrent mark. */ dvmHeapReScanMarkedObjects(); } #ifdef FASTIVA_PRELOAD_STATIC_INSTANCE u4 staticScanStart = dvmGetRelativeTimeMsec(); dvmHeapReMarkRootSet(); //void fastiva_dvmHeapReScanRootObjects(); //fastiva_dvmHeapReScanRootObjects(); u4 staticScanEnd = dvmGetRelativeTimeMsec(); #endif /* * All strongly-reachable objects have now been marked. Process * weakly-reachable objects discovered while tracing. */ dvmHeapProcessReferences(&gcHeap->softReferences, spec->doPreserve == false, &gcHeap->weakReferences, &gcHeap->finalizerReferences, &gcHeap->phantomReferences); #if defined(WITH_JIT) /* * Patching a chaining cell is very cheap as it only updates 4 words. It's * the overhead of stopping all threads and synchronizing the I/D cache * that makes it expensive. * * Therefore we batch those work orders in a queue and go through them * when threads are suspended for GC. */ dvmCompilerPerformSafePointChecks(); #endif LOGD_HEAP("Sweeping..."); dvmHeapSweepSystemWeaks(); /* * Live objects have a bit set in the mark bitmap, swap the mark * and live bitmaps. The sweep can proceed concurrently viewing * the new live bitmap as the old mark bitmap, and vice versa. */ dvmHeapSourceSwapBitmaps(); if (gDvm.postVerify) { LOGV_HEAP("Verifying roots and heap after GC"); verifyRootsAndHeap(); } if (spec->isConcurrent) { dvmUnlockHeap(); dvmResumeAllThreads(SUSPEND_FOR_GC); ATRACE_END(); // Suspend B dirtyEnd = dvmGetRelativeTimeMsec(); } dvmHeapSweepUnmarkedObjects(spec->isPartial, spec->isConcurrent, &numObjectsFreed, &numBytesFreed); LOGD_HEAP("Cleaning up..."); dvmHeapFinishMarkStep(); if (spec->isConcurrent) { dvmLockHeap(); } LOGD_HEAP("Done."); /* Now's a good time to adjust the heap size, since * we know what our utilization is. * * This doesn't actually resize any memory; * it just lets the heap grow more when necessary. */ dvmHeapSourceGrowForUtilization(); currAllocated = dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0); currFootprint = dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0); dvmMethodTraceGCEnd(); LOGV_HEAP("GC finished"); gcHeap->gcRunning = false; #ifdef FASTIVA if (self != NULL) { self->status = oldStatus; fastiva_releaseStack(self, fastiva_old_sp); } #endif LOGV_HEAP("Resuming threads"); if (spec->isConcurrent) { /* * Wake-up any threads that blocked after a failed allocation * request. */ dvmBroadcastCond(&gDvm.gcHeapCond); } if (!spec->isConcurrent) { dvmResumeAllThreads(SUSPEND_FOR_GC); ATRACE_END(); // Suspend A dirtyEnd = dvmGetRelativeTimeMsec(); /* * Restore the original thread scheduling priority if it was * changed at the start of the current garbage collection. */ if (oldThreadPriority != INT_MAX) { os_lowerThreadPriority(oldThreadPriority); } } /* * Move queue of pending references back into Java. */ dvmEnqueueClearedReferences(&gDvm.gcHeap->clearedReferences); gcEnd = dvmGetRelativeTimeMsec(); percentFree = 100 - (size_t)(100.0f * (float)currAllocated / currFootprint); if (!spec->isConcurrent) { u4 markSweepTime = dirtyEnd - rootStart; u4 gcTime = gcEnd - rootStart; bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024; ALOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums, total %ums", spec->reason, isSmall ? "<" : "", numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0, percentFree, currAllocated / 1024, currFootprint / 1024, markSweepTime, gcTime); } else { u4 rootTime = rootEnd - rootStart; u4 dirtyTime = dirtyEnd - dirtyStart; u4 gcTime = gcEnd - rootStart; bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024; ALOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums+%ums, total %ums", spec->reason, isSmall ? "<" : "", numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0, percentFree, currAllocated / 1024, currFootprint / 1024, rootTime, dirtyTime, gcTime); } if (gcHeap->ddmHpifWhen != 0) { LOGD_HEAP("Sending VM heap info to DDM"); dvmDdmSendHeapInfo(gcHeap->ddmHpifWhen, false); } if (gcHeap->ddmHpsgWhen != 0) { LOGD_HEAP("Dumping VM heap to DDM"); dvmDdmSendHeapSegments(false, false); } if (gcHeap->ddmNhsgWhen != 0) { LOGD_HEAP("Dumping native heap to DDM"); dvmDdmSendHeapSegments(false, true); } ATRACE_END(); // Top-level GC }
/*============================================================================== * Function : CompleteMainImage * Parameters: QImage * Return Value : int * Description: Completes main image encoding. ==============================================================================*/ int OMXJpegEncoder::CompleteMainImage() { OMX_ERRORTYPE lret = OMX_ErrorNone; /* Post ETB Done and FTB Done to the queue since we dont want to do a callback with the Event thread from the codec layer */ QI_LOCK(&m_abortlock); if (!m_abort_flag && (OMX_FALSE == m_releaseFlag)) { //Post EBD to the message queue QIMessage *lebdMessage = new QIMessage(); if (!lebdMessage) { QIDBG_ERROR("%s:%d] Could not alloate QIMessage", __func__, __LINE__); QI_UNLOCK(&m_abortlock); return QI_ERR_NO_MEMORY; } lebdMessage->m_qMessage = OMX_MESSAGE_ETB_DONE; //The i/p buffer has been consumed completely. Set the nFilledLen to 0x0 m_currentInBuffHdr->nFilledLen = 0; lebdMessage->pData = m_currentInBuffHdr; //Post FBD message to the message queue QIMessage *lfbdMessage = new QIMessage(); if (!lfbdMessage) { QIDBG_ERROR("%s:%d] Could not allocate QIMessage", __func__, __LINE__); QI_UNLOCK(&m_abortlock); return QI_ERR_NO_MEMORY; } if (NULL != m_memOps.get_memory) { omx_jpeg_ouput_buf_t *jpeg_out = (omx_jpeg_ouput_buf_t *)m_outputQIBuffer->Addr(); if ( (m_outputMainImage->FilledLen() + getEstimatedExifSize()) < m_outputMainImage->Length() ) { QIBuffer lApp1Buf = QIBuffer((uint8_t*)m_outputMainImage->BaseAddr() + m_outputMainImage->FilledLen(), m_outputMainImage->Length() - m_outputMainImage->FilledLen()); if ((m_thumbnailInfo.input_height != 0) && (m_thumbnailInfo.input_width != 0)) { lret = writeExifData(m_outThumbImage, &lApp1Buf); } else { lret = writeExifData(NULL, &lApp1Buf); } if (lret != OMX_ErrorNone) { QIDBG_ERROR("%s:%d ", __func__, __LINE__); QI_UNLOCK(&m_abortlock); return QI_ERR_GENERAL; } jpeg_out->size = lApp1Buf.FilledLen() + m_outputMainImage->FilledLen(); m_memOps.get_memory(jpeg_out); if (!jpeg_out->vaddr) { QIDBG_ERROR("%s:%d get_memory failed", __func__, __LINE__); return QI_ERR_GENERAL; } ATRACE_BEGIN("Camera:JPEG:memcpy"); memcpy(jpeg_out->vaddr, m_outputMainImage->BaseAddr() + m_outputMainImage->FilledLen(), lApp1Buf.FilledLen()); memcpy(jpeg_out->vaddr + lApp1Buf.FilledLen(), m_outputMainImage->BaseAddr(), m_outputMainImage->FilledLen()); ATRACE_END(); //Set the filled length of the ouput buffer m_currentOutBuffHdr->nFilledLen = m_outputMainImage->FilledLen() + lApp1Buf.FilledLen(); } else { QIDBG_HIGH("%s:%d Allocating extra temp buffer for Exif ", __func__, __LINE__); uint8_t *exif_buf = (uint8_t*)malloc (getEstimatedExifSize()); if (exif_buf == NULL) { QIDBG_ERROR("%s:%d exif mem alloc failed", __func__, __LINE__); return QI_ERR_GENERAL; } QIBuffer lApp1Buf = QIBuffer((uint8_t*)exif_buf, getEstimatedExifSize()); if ((m_thumbnailInfo.input_height != 0) && (m_thumbnailInfo.input_width != 0)) { lret = writeExifData(m_outThumbImage, &lApp1Buf); } else { lret = writeExifData(NULL, &lApp1Buf); } if (lret != OMX_ErrorNone) { QIDBG_ERROR("%s:%d ", __func__, __LINE__); QI_UNLOCK(&m_abortlock); free (exif_buf); return QI_ERR_GENERAL; } jpeg_out->size = lApp1Buf.FilledLen() + m_outputMainImage->FilledLen(); m_memOps.get_memory(jpeg_out); if (!jpeg_out->vaddr) { QIDBG_ERROR("%s:%d get_memory failed", __func__, __LINE__); free (exif_buf); return QI_ERR_GENERAL; } ATRACE_BEGIN("Camera:JPEG:memcpy"); memcpy(jpeg_out->vaddr, exif_buf, lApp1Buf.FilledLen()); memcpy(jpeg_out->vaddr + lApp1Buf.FilledLen(), m_outputMainImage->BaseAddr(), m_outputMainImage->FilledLen()); ATRACE_END(); //Set the filled length of the ouput buffer m_currentOutBuffHdr->nFilledLen = m_outputMainImage->FilledLen() + lApp1Buf.FilledLen(); free (exif_buf); } } else if (m_outputQIBuffer->Addr() != m_outputMainImage->BaseAddr()) { ATRACE_BEGIN("Camera:JPEG:memcpyQI"); memcpy(m_outputQIBuffer->Addr() + m_outputQIBuffer->FilledLen(), m_outputMainImage->BaseAddr(), m_outputMainImage->FilledLen()); ATRACE_END(); //Set the filled length of the ouput buffer m_currentOutBuffHdr->nFilledLen = m_outputMainImage->FilledLen() + m_outputQIBuffer->FilledLen(); } QIDBG_HIGH("%s:%d] Encoded image length %d", __func__, __LINE__, (int)m_currentOutBuffHdr->nFilledLen); lfbdMessage->m_qMessage = OMX_MESSAGE_FTB_DONE; lfbdMessage->pData = m_currentOutBuffHdr; lret = postMessage(lebdMessage); if (QI_ERROR(lret)) { QIDBG_ERROR("%s:%d] Could not send EBD", __func__, __LINE__); delete lebdMessage; delete lfbdMessage; QI_UNLOCK(&m_abortlock); return QI_ERR_NO_MEMORY; } lret = postMessage(lfbdMessage); if (QI_ERROR(lret)) { QIDBG_ERROR("%s:%d] Could not send FBD", __func__, __LINE__); delete lfbdMessage; QI_UNLOCK(&m_abortlock); return QI_ERR_NO_MEMORY; } // Send message for a new encode process QIMessage *lEncodeMessage = new QIMessage(); if (!lebdMessage) { QIDBG_ERROR("%s:%d] Could not alloate QIMessage", __func__, __LINE__); QI_UNLOCK(&m_abortlock); return QI_ERR_NO_MEMORY; } lEncodeMessage->m_qMessage = OMX_MESSAGE_START_NEW_ENCODE; lret = postMessage(lEncodeMessage); if (QI_ERROR(lret)) { QIDBG_ERROR("%s:%d] Could not send Start encode", __func__, __LINE__); delete lEncodeMessage; QI_UNLOCK(&m_abortlock); return QI_ERR_NO_MEMORY; } } QI_UNLOCK(&m_abortlock); return QI_SUCCESS; }