void NuCachedSource2::onRead(const sp<AMessage> &msg) { ALOGV("onRead"); int64_t offset; CHECK(msg->findInt64("offset", &offset)); void *data; CHECK(msg->findPointer("data", &data)); size_t size; CHECK(msg->findSize("size", &size)); ssize_t result = readInternal(offset, data, size); if (result == -EAGAIN) { msg->post(50000); return; } Mutex::Autolock autoLock(mLock); if (mDisconnecting) { mCondition.signal(); return; } CHECK(mAsyncResult == NULL); mAsyncResult = new AMessage; mAsyncResult->setInt32("result", result); mCondition.signal(); }
void DirectRenderer::onDecoderNotify(const sp<AMessage> &msg) { size_t trackIndex; CHECK(msg->findSize("trackIndex", &trackIndex)); int32_t what; CHECK(msg->findInt32("what", &what)); switch (what) { case DecoderContext::kWhatOutputBufferReady: { size_t index; CHECK(msg->findSize("index", &index)); int64_t timeUs; CHECK(msg->findInt64("timeUs", &timeUs)); sp<ABuffer> buffer; CHECK(msg->findBuffer("buffer", &buffer)); queueOutputBuffer(trackIndex, index, timeUs, buffer); break; } default: TRESPASS(); } }
static int64_t FindInt64(sp<MetaData>& mMetaData, uint32_t mKey) { int64_t value; if (!mMetaData->findInt64(mKey, &value)) return 0; return value; }
void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { switch (msg->what()) { case kWhatSetDataSource: { ALOGV("kWhatSetDataSource"); CHECK(mSource == NULL); sp<RefBase> obj; CHECK(msg->findObject("source", &obj)); mSource = static_cast<Source *>(obj.get()); looper()->registerHandler(mSource); CHECK(mDriver != NULL); sp<NuPlayerDriver> driver = mDriver.promote(); if (driver != NULL) { driver->notifySetDataSourceCompleted(OK); } break; } case kWhatPrepare: { mSource->prepareAsync(); break; } case kWhatGetTrackInfo: { uint32_t replyID; CHECK(msg->senderAwaitsResponse(&replyID)); status_t err = INVALID_OPERATION; if (mSource != NULL) { Parcel* reply; CHECK(msg->findPointer("reply", (void**)&reply)); err = mSource->getTrackInfo(reply); } sp<AMessage> response = new AMessage; response->setInt32("err", err); response->postReply(replyID); break; } case kWhatSelectTrack: { uint32_t replyID; CHECK(msg->senderAwaitsResponse(&replyID)); status_t err = INVALID_OPERATION; if (mSource != NULL) { size_t trackIndex; int32_t select; CHECK(msg->findSize("trackIndex", &trackIndex)); CHECK(msg->findInt32("select", &select)); err = mSource->selectTrack(trackIndex, select); } sp<AMessage> response = new AMessage; response->setInt32("err", err); response->postReply(replyID); break; } case kWhatPollDuration: { int32_t generation; CHECK(msg->findInt32("generation", &generation)); if (generation != mPollDurationGeneration) { // stale break; } int64_t durationUs; if (mDriver != NULL && mSource->getDuration(&durationUs) == OK) { sp<NuPlayerDriver> driver = mDriver.promote(); if (driver != NULL) { driver->notifyDuration(durationUs); } } msg->post(1000000ll); // poll again in a second. break; } case kWhatSetVideoNativeWindow: { ALOGV("kWhatSetVideoNativeWindow"); mDeferredActions.push_back( new ShutdownDecoderAction( false /* audio */, true /* video */)); sp<RefBase> obj; CHECK(msg->findObject("native-window", &obj)); mDeferredActions.push_back( new SetSurfaceAction( static_cast<NativeWindowWrapper *>(obj.get()))); if (obj != NULL) { // If there is a new surface texture, instantiate decoders // again if possible. mDeferredActions.push_back( new SimpleAction(&NuPlayer::performScanSources)); } processDeferredActions(); break; } case kWhatSetAudioSink: { ALOGV("kWhatSetAudioSink"); sp<RefBase> obj; CHECK(msg->findObject("sink", &obj)); mAudioSink = static_cast<MediaPlayerBase::AudioSink *>(obj.get()); break; } case kWhatStart: { ALOGV("kWhatStart"); mVideoIsAVC = false; mAudioEOS = false; mVideoEOS = false; mSkipRenderingAudioUntilMediaTimeUs = -1; mSkipRenderingVideoUntilMediaTimeUs = -1; mVideoLateByUs = 0; mNumFramesTotal = 0; mNumFramesDropped = 0; mStarted = true; mSource->start(); uint32_t flags = 0; if (mSource->isRealTime()) { flags |= Renderer::FLAG_REAL_TIME; } mRenderer = new Renderer( mAudioSink, new AMessage(kWhatRendererNotify, id()), flags); looper()->registerHandler(mRenderer); postScanSources(); break; } case kWhatScanSources: { int32_t generation; CHECK(msg->findInt32("generation", &generation)); if (generation != mScanSourcesGeneration) { // Drop obsolete msg. break; } mScanSourcesPending = false; ALOGV("scanning sources haveAudio=%d, haveVideo=%d", mAudioDecoder != NULL, mVideoDecoder != NULL); bool mHadAnySourcesBefore = (mAudioDecoder != NULL) || (mVideoDecoder != NULL); if (mNativeWindow != NULL) { instantiateDecoder(false, &mVideoDecoder); } if (mAudioSink != NULL) { instantiateDecoder(true, &mAudioDecoder); } if (!mHadAnySourcesBefore && (mAudioDecoder != NULL || mVideoDecoder != NULL)) { // This is the first time we've found anything playable. if (mSourceFlags & Source::FLAG_DYNAMIC_DURATION) { schedulePollDuration(); } } status_t err; if ((err = mSource->feedMoreTSData()) != OK) { if (mAudioDecoder == NULL && mVideoDecoder == NULL) { // We're not currently decoding anything (no audio or // video tracks found) and we just ran out of input data. if (err == ERROR_END_OF_STREAM) { notifyListener(MEDIA_PLAYBACK_COMPLETE, 0, 0); } else { notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err); } } break; } if ((mAudioDecoder == NULL && mAudioSink != NULL) || (mVideoDecoder == NULL && mNativeWindow != NULL)) { msg->post(100000ll); mScanSourcesPending = true; } break; } case kWhatVideoNotify: case kWhatAudioNotify: { bool audio = msg->what() == kWhatAudioNotify; sp<AMessage> codecRequest; CHECK(msg->findMessage("codec-request", &codecRequest)); int32_t what; CHECK(codecRequest->findInt32("what", &what)); if (what == ACodec::kWhatFillThisBuffer) { status_t err = feedDecoderInputData( audio, codecRequest); if (err == -EWOULDBLOCK) { if (mSource->feedMoreTSData() == OK) { msg->post(10000ll); } } } else if (what == ACodec::kWhatEOS) { int32_t err; CHECK(codecRequest->findInt32("err", &err)); if (err == ERROR_END_OF_STREAM) { ALOGV("got %s decoder EOS", audio ? "audio" : "video"); } else { ALOGV("got %s decoder EOS w/ error %d", audio ? "audio" : "video", err); } mRenderer->queueEOS(audio, err); } else if (what == ACodec::kWhatFlushCompleted) { bool needShutdown; if (audio) { CHECK(IsFlushingState(mFlushingAudio, &needShutdown)); mFlushingAudio = FLUSHED; } else { CHECK(IsFlushingState(mFlushingVideo, &needShutdown)); mFlushingVideo = FLUSHED; mVideoLateByUs = 0; } ALOGV("decoder %s flush completed", audio ? "audio" : "video"); if (needShutdown) { ALOGV("initiating %s decoder shutdown", audio ? "audio" : "video"); (audio ? mAudioDecoder : mVideoDecoder)->initiateShutdown(); if (audio) { mFlushingAudio = SHUTTING_DOWN_DECODER; } else { mFlushingVideo = SHUTTING_DOWN_DECODER; } } finishFlushIfPossible(); } else if (what == ACodec::kWhatOutputFormatChanged) { if (audio) { int32_t numChannels; CHECK(codecRequest->findInt32( "channel-count", &numChannels)); int32_t sampleRate; CHECK(codecRequest->findInt32("sample-rate", &sampleRate)); ALOGV("Audio output format changed to %d Hz, %d channels", sampleRate, numChannels); mAudioSink->close(); audio_output_flags_t flags; int64_t durationUs; // FIXME: we should handle the case where the video decoder // is created after we receive the format change indication. // Current code will just make that we select deep buffer // with video which should not be a problem as it should // not prevent from keeping A/V sync. if (mVideoDecoder == NULL && mSource->getDuration(&durationUs) == OK && durationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US) { flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER; } else { flags = AUDIO_OUTPUT_FLAG_NONE; } int32_t channelMask; if (!codecRequest->findInt32("channel-mask", &channelMask)) { channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER; } CHECK_EQ(mAudioSink->open( sampleRate, numChannels, (audio_channel_mask_t)channelMask, AUDIO_FORMAT_PCM_16_BIT, 8 /* bufferCount */, NULL, NULL, flags), (status_t)OK); mAudioSink->start(); mRenderer->signalAudioSinkChanged(); } else { // video int32_t width, height; CHECK(codecRequest->findInt32("width", &width)); CHECK(codecRequest->findInt32("height", &height)); int32_t cropLeft, cropTop, cropRight, cropBottom; CHECK(codecRequest->findRect( "crop", &cropLeft, &cropTop, &cropRight, &cropBottom)); int32_t displayWidth = cropRight - cropLeft + 1; int32_t displayHeight = cropBottom - cropTop + 1; ALOGV("Video output format changed to %d x %d " "(crop: %d x %d @ (%d, %d))", width, height, displayWidth, displayHeight, cropLeft, cropTop); sp<AMessage> videoInputFormat = mSource->getFormat(false /* audio */); // Take into account sample aspect ratio if necessary: int32_t sarWidth, sarHeight; if (videoInputFormat->findInt32("sar-width", &sarWidth) && videoInputFormat->findInt32( "sar-height", &sarHeight)) { ALOGV("Sample aspect ratio %d : %d", sarWidth, sarHeight); displayWidth = (displayWidth * sarWidth) / sarHeight; ALOGV("display dimensions %d x %d", displayWidth, displayHeight); } notifyListener( MEDIA_SET_VIDEO_SIZE, displayWidth, displayHeight); } } else if (what == ACodec::kWhatShutdownCompleted) { ALOGV("%s shutdown completed", audio ? "audio" : "video"); if (audio) { mAudioDecoder.clear(); CHECK_EQ((int)mFlushingAudio, (int)SHUTTING_DOWN_DECODER); mFlushingAudio = SHUT_DOWN; } else { mVideoDecoder.clear(); CHECK_EQ((int)mFlushingVideo, (int)SHUTTING_DOWN_DECODER); mFlushingVideo = SHUT_DOWN; } finishFlushIfPossible(); } else if (what == ACodec::kWhatError) { ALOGE("Received error from %s decoder, aborting playback.", audio ? "audio" : "video"); mRenderer->queueEOS(audio, UNKNOWN_ERROR); } else if (what == ACodec::kWhatDrainThisBuffer) { renderBuffer(audio, codecRequest); } else if (what != ACodec::kWhatComponentAllocated && what != ACodec::kWhatComponentConfigured && what != ACodec::kWhatBuffersAllocated) { ALOGV("Unhandled codec notification %d '%c%c%c%c'.", what, what >> 24, (what >> 16) & 0xff, (what >> 8) & 0xff, what & 0xff); } break; } case kWhatRendererNotify: { int32_t what; CHECK(msg->findInt32("what", &what)); if (what == Renderer::kWhatEOS) { int32_t audio; CHECK(msg->findInt32("audio", &audio)); int32_t finalResult; CHECK(msg->findInt32("finalResult", &finalResult)); if (audio) { mAudioEOS = true; } else { mVideoEOS = true; } if (finalResult == ERROR_END_OF_STREAM) { ALOGV("reached %s EOS", audio ? "audio" : "video"); } else { ALOGE("%s track encountered an error (%d)", audio ? "audio" : "video", finalResult); notifyListener( MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, finalResult); } if ((mAudioEOS || mAudioDecoder == NULL) && (mVideoEOS || mVideoDecoder == NULL)) { notifyListener(MEDIA_PLAYBACK_COMPLETE, 0, 0); } } else if (what == Renderer::kWhatPosition) { int64_t positionUs; CHECK(msg->findInt64("positionUs", &positionUs)); CHECK(msg->findInt64("videoLateByUs", &mVideoLateByUs)); if (mDriver != NULL) { sp<NuPlayerDriver> driver = mDriver.promote(); if (driver != NULL) { driver->notifyPosition(positionUs); driver->notifyFrameStats( mNumFramesTotal, mNumFramesDropped); } } } else if (what == Renderer::kWhatFlushComplete) { int32_t audio; CHECK(msg->findInt32("audio", &audio)); ALOGV("renderer %s flush completed.", audio ? "audio" : "video"); } else if (what == Renderer::kWhatVideoRenderingStart) { notifyListener(MEDIA_INFO, MEDIA_INFO_RENDERING_START, 0); } else if (what == Renderer::kWhatMediaRenderingStart) { ALOGV("media rendering started"); notifyListener(MEDIA_STARTED, 0, 0); } break; } case kWhatMoreDataQueued: { break; } case kWhatReset: { ALOGV("kWhatReset"); mDeferredActions.push_back( new ShutdownDecoderAction( true /* audio */, true /* video */)); mDeferredActions.push_back( new SimpleAction(&NuPlayer::performReset)); processDeferredActions(); break; } case kWhatSeek: { int64_t seekTimeUs; CHECK(msg->findInt64("seekTimeUs", &seekTimeUs)); ALOGV("kWhatSeek seekTimeUs=%lld us", seekTimeUs); mDeferredActions.push_back( new SimpleAction(&NuPlayer::performDecoderFlush)); mDeferredActions.push_back(new SeekAction(seekTimeUs)); processDeferredActions(); break; } case kWhatPause: { CHECK(mRenderer != NULL); mSource->pause(); mRenderer->pause(); break; } case kWhatResume: { CHECK(mRenderer != NULL); mSource->resume(); mRenderer->resume(); break; } case kWhatSourceNotify: { onSourceNotify(msg); break; } default: TRESPASS(); break; }
status_t ConvertMessageToMap( JNIEnv *env, const sp<AMessage> &msg, jobject *map) { ScopedLocalRef<jclass> hashMapClazz( env, env->FindClass("java/util/HashMap")); if (hashMapClazz.get() == NULL) { return -EINVAL; } jmethodID hashMapConstructID = env->GetMethodID(hashMapClazz.get(), "<init>", "()V"); if (hashMapConstructID == NULL) { return -EINVAL; } jmethodID hashMapPutID = env->GetMethodID( hashMapClazz.get(), "put", "(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;"); if (hashMapPutID == NULL) { return -EINVAL; } jobject hashMap = env->NewObject(hashMapClazz.get(), hashMapConstructID); for (size_t i = 0; i < msg->countEntries(); ++i) { AMessage::Type valueType; const char *key = msg->getEntryNameAt(i, &valueType); jobject valueObj = NULL; switch (valueType) { case AMessage::kTypeInt32: { int32_t val; CHECK(msg->findInt32(key, &val)); valueObj = makeIntegerObject(env, val); break; } case AMessage::kTypeInt64: { int64_t val; CHECK(msg->findInt64(key, &val)); valueObj = makeLongObject(env, val); break; } case AMessage::kTypeFloat: { float val; CHECK(msg->findFloat(key, &val)); valueObj = makeFloatObject(env, val); break; } case AMessage::kTypeString: { AString val; CHECK(msg->findString(key, &val)); valueObj = env->NewStringUTF(val.c_str()); break; } case AMessage::kTypeBuffer: { sp<ABuffer> buffer; CHECK(msg->findBuffer(key, &buffer)); valueObj = makeByteBufferObject( env, buffer->data(), buffer->size()); break; } case AMessage::kTypeRect: { int32_t left, top, right, bottom; CHECK(msg->findRect(key, &left, &top, &right, &bottom)); SetMapInt32( env, hashMap, hashMapPutID, StringPrintf("%s-left", key).c_str(), left); SetMapInt32( env, hashMap, hashMapPutID, StringPrintf("%s-top", key).c_str(), top); SetMapInt32( env, hashMap, hashMapPutID, StringPrintf("%s-right", key).c_str(), right); SetMapInt32( env, hashMap, hashMapPutID, StringPrintf("%s-bottom", key).c_str(), bottom); break; } default: break; } if (valueObj != NULL) { jstring keyObj = env->NewStringUTF(key); env->CallObjectMethod(hashMap, hashMapPutID, keyObj, valueObj); env->DeleteLocalRef(keyObj); keyObj = NULL; env->DeleteLocalRef(valueObj); valueObj = NULL; } } *map = hashMap; return OK; }
void RTSPSource::onMessageReceived(const sp<AMessage> &msg) { if (msg->what() == kWhatDisconnect) { uint32_t replyID; CHECK(msg->senderAwaitsResponse(&replyID)); mDisconnectReplyID = replyID; finishDisconnectIfPossible(); return; } else if (msg->what() == kWhatPerformSeek) { int32_t generation; CHECK(msg->findInt32("generation", &generation)); if (generation != mSeekGeneration) { // obsolete. return; } int64_t seekTimeUs; CHECK(msg->findInt64("timeUs", &seekTimeUs)); performSeek(seekTimeUs); return; } else if (msg->what() == kWhatPerformPlay) { int64_t playTimeUs; CHECK(msg->findInt64("timeUs", &playTimeUs)); performPlay(playTimeUs); return; } else if (msg->what() == kWhatPerformPause) { performPause(); return; } else if (msg->what() == kWhatPerformResume) { performResume(); return; } else if (msg->what() == kWhatPerformSuspend) { performSuspend(); return; } CHECK_EQ(msg->what(), (uint32_t)kWhatNotify); int32_t what; int32_t isSeekable = 0; CHECK(msg->findInt32("what", &what)); switch (what) { case RtspConnectionHandler::kWhatConnected: CHECK(msg->findInt32("isSeekable", &isSeekable)); onConnected((isSeekable ? true:false)); break; case RtspConnectionHandler::kWhatDisconnected: onDisconnected(msg); break; case RtspConnectionHandler::kWhatSeekDone: { mState = PLAYING; // Even if we have reset mLatestPausedUnit in performSeek(), // it's still possible that kWhatPausedDone event may arrive // because of previous performPause() command. for (size_t i = 0; i < mTracks.size(); ++i) { TrackInfo *info = &mTracks.editItemAt(i); info->mLatestPausedUnit = 0; } mLatestPausedUnit = 0; break; } case RtspConnectionHandler::kWhatPausedDone: { for (size_t i = 0; i < mTracks.size(); ++i) { TrackInfo *info = &mTracks.editItemAt(i); info->mLatestPausedUnit = info->mLatestReceivedUnit; } // The timestamp after a 'Pause' is done is the earliest // timestamp among all of the latest received units. TrackInfo *info = &mTracks.editItemAt(0); mLatestPausedUnit = info->mLatestReceivedUnit; for (size_t i = 1; i < mTracks.size(); ++i) { TrackInfo *info = &mTracks.editItemAt(i); if (mLatestPausedUnit > info->mLatestReceivedUnit) { mLatestPausedUnit = info->mLatestReceivedUnit; } } break; } case RtspConnectionHandler::kWhatAccessUnit: { size_t trackIndex; CHECK(msg->findSize("trackIndex", &trackIndex)); CHECK_LT(trackIndex, mTracks.size()); sp<RefBase> obj; CHECK(msg->findObject("accessUnit", &obj)); sp<ABuffer> accessUnit = static_cast<ABuffer *>(obj.get()); int32_t damaged; if (accessUnit->meta()->findInt32("damaged", &damaged) && damaged) { LOGI("dropping damaged access unit."); break; } TrackInfo *info = &mTracks.editItemAt(trackIndex); sp<AnotherPacketSource> source = info->mSource; if (source != NULL) { uint32_t rtpTime; CHECK(accessUnit->meta()->findInt32("rtp-time", (int32_t *)&rtpTime)); if (!info->mNPTMappingValid) { // This is a live stream, we didn't receive any normal // playtime mapping. Assume the first packets correspond // to time 0. LOGV("This is a live stream, assuming time = 0"); info->mRTPTime = rtpTime; info->mNormalPlaytimeUs = 0ll; info->mNPTMappingValid = true; } int64_t nptUs = ((double)rtpTime - (double)info->mRTPTime) / info->mTimeScale * 1000000ll + info->mNormalPlaytimeUs; accessUnit->meta()->setInt64("timeUs", nptUs); info->mLatestReceivedUnit = nptUs; // Drop the frames that are older than the frames in the queue. if (info->mLatestPausedUnit && (int64_t)info->mLatestPausedUnit > nptUs) { break; } source->queueAccessUnit(accessUnit); } onTrackDataAvailable(trackIndex); break; } case RtspConnectionHandler::kWhatEOS: { size_t trackIndex; CHECK(msg->findSize("trackIndex", &trackIndex)); CHECK_LT(trackIndex, mTracks.size()); int32_t finalResult; CHECK(msg->findInt32("finalResult", &finalResult)); CHECK_NE(finalResult, (status_t)OK); TrackInfo *info = &mTracks.editItemAt(trackIndex); sp<AnotherPacketSource> source = info->mSource; if (source != NULL) { source->signalEOS(finalResult); } break; } case RtspConnectionHandler::kWhatSeekDiscontinuity: { size_t trackIndex; CHECK(msg->findSize("trackIndex", &trackIndex)); CHECK_LT(trackIndex, mTracks.size()); TrackInfo *info = &mTracks.editItemAt(trackIndex); sp<AnotherPacketSource> source = info->mSource; if (source != NULL) { source->queueDiscontinuity(ATSParser::DISCONTINUITY_SEEK, NULL); } break; } case RtspConnectionHandler::kWhatNormalPlayTimeMapping: { size_t trackIndex; CHECK(msg->findSize("trackIndex", &trackIndex)); CHECK_LT(trackIndex, mTracks.size()); uint32_t rtpTime; CHECK(msg->findInt32("rtpTime", (int32_t *)&rtpTime)); int64_t nptUs; CHECK(msg->findInt64("nptUs", &nptUs)); TrackInfo *info = &mTracks.editItemAt(trackIndex); info->mRTPTime = rtpTime; info->mNormalPlaytimeUs = nptUs; info->mNPTMappingValid = true; break; } case RtspConnectionHandler::kWhatTryTCPInterleaving: { // By default, we will request to deliver RTP over UDP. If the play // request timed out and we didn't receive any RTP packet, we will // fail back to use RTP interleaved in the existing RTSP/TCP // connection. And in this case, we have to explicitly perform // another play event to request the server to start streaming // again. int64_t playTimeUs; if (!msg->findInt64("timeUs", &playTimeUs)) { playTimeUs = 0; } performPlay(playTimeUs); break; } default: TRESPASS(); } }
status_t APESource::read( MediaBuffer **out, const ReadOptions *options) { *out = NULL; uint32_t newframe = 0 , firstbyte = 0; ///LOGV("APESource::read"); int64_t seekTimeUs; ReadOptions::SeekMode mode; int32_t bitrate = 0; if (!mMeta->findInt32(kKeyBitRate, &bitrate) || !mMeta->findInt32(kKeySampleRate, &mSampleRate)) { LOGI("no bitrate"); return ERROR_UNSUPPORTED; } if (options != NULL && options->getSeekTo(&seekTimeUs, &mode)) { { int64_t duration = 0; int64_t seektabletime = 0; if ((mTotalsample > 0) && (mTableOfContents[0] > 0) && (mSamplesPerFrame > 0) && mMeta->findInt64(kKeyDuration, &duration)) { ape_parser_ctx_t ape_ctx; uint32_t filepos, blocks_to_skip; ape_ctx.samplerate = mSampleRate; ape_ctx.blocksperframe = mSamplesPerFrame; ape_ctx.totalframes = mTotalFrame; ape_ctx.seektable = mTableOfContents; ape_ctx.firstframe = mTableOfContents[0]; if (ape_calc_seekpos_by_microsecond(&ape_ctx, seekTimeUs, &newframe, &filepos, &firstbyte, &blocks_to_skip) < 0) { LOGD("getseekto error exit"); return ERROR_UNSUPPORTED; } mCurrentPos = filepos; mCurrentTimeUs = (int64_t)newframe * mSamplesPerFrame * 1000000ll / mSampleRate; LOGD("getseekto seekTimeUs=%lld, Actual time%lld, filepos%x,frame %d, seekbyte %d", seekTimeUs, mCurrentTimeUs, mCurrentPos, newframe, firstbyte); } else { LOGD("getseekto parameter error exit"); return ERROR_UNSUPPORTED; } } } if ((mFileoffset != 0) && (mCurrentPos >= mFileoffset)) { LOGD("APESource::readAt to end filesize %x curr: %x", mFileoffset, mCurrentPos); return ERROR_END_OF_STREAM; } MediaBuffer *buffer; status_t err = mGroup->acquire_buffer(&buffer); if (err != OK) { LOGD("APESource::acquire_buffer fail"); return err; } size_t frame_size; frame_size = kMaxFrameSize; ssize_t n = 0; #ifdef ENABLE_MMRIOTHREAD if (options != NULL && options->getSeekTo(&seekTimeUs, &mode)) { ResetReadioPtr(mCurrentPos); } n = ReadBitsteam(buffer->data(), frame_size); #else ///frame_size = mMaxBufferSize; n = mDataSource->readAt(mCurrentPos, buffer->data(), frame_size); #endif ///LOGE("APESource::readAt %x, %x, %d, %d, %d, %d, %d", mCurrentPos, buffer->data(), buffer->size(), mTotalsample, bitrate, mSampleRate, frame_size); //ssize_t n = mDataSource->readAt(mCurrentPos, buffer->data(), frame_size); if ((mFileoffset != 0) && ((mCurrentPos + n) >= mFileoffset)) { frame_size = mFileoffset - mCurrentPos; memset(buffer->data() + frame_size, 0, n - frame_size); } else if ((n < (ssize_t)frame_size) && (n > 0)) { frame_size = n; off64_t fileoffset = 0; mDataSource->getSize(&fileoffset); LOGD("APESource::readAt not enough read %d frmsize %x, filepos %x, filesize %x", n, frame_size, mCurrentPos + frame_size, fileoffset); //if ((mCurrentPos + frame_size) >= fileoffset // && (mCurrentPos + frame_size) < mTableOfContents[mTotalFrame - 1]) if ((mCurrentPos + frame_size) >= fileoffset && (mCurrentPos + frame_size) < mTableOfContents[mSt_bound- 1]) { memset(buffer->data(), 0, buffer->size()); /// for this file is not complete error, frame buffer should not transfer to avoid decoding noise data. LOGD("APESource::file is not enough to end --> memset"); } } else if (n <= 0) { buffer->release(); buffer = NULL; LOGD("APESource::readAt EOS filepos %x frmsize %d", mCurrentPos, frame_size); return ERROR_END_OF_STREAM; } buffer->set_range(0, frame_size); if (options != NULL && options->getSeekTo(&seekTimeUs, &mode)) { buffer->meta_data()->setInt64(kKeyTime, mCurrentTimeUs); buffer->meta_data()->setInt32(kKeyNemFrame, newframe); buffer->meta_data()->setInt32(kKeySeekByte, firstbyte); } buffer->meta_data()->setInt32(kKeyIsSyncFrame, 1); mCurrentPos += frame_size; mCurrentTimeUs += (int64_t)(frame_size * 8000000ll) / bitrate ; #ifdef ENABLE_MMRIOTHREAD UpdateReadPtr(frame_size); #endif *out = buffer; ///LOGE("APESource::kKeyTime done %x %lld", mCurrentPos, mCurrentTimeUs); return OK; }
MP3Extractor::MP3Extractor( const sp<DataSource> &source, const sp<AMessage> &meta) : mInitCheck(NO_INIT), mDataSource(source), mFirstFramePos(-1), mFixedHeader(0) { off64_t pos = 0; off64_t post_id3_pos; uint32_t header; bool success; int64_t meta_offset; uint32_t meta_header; int64_t meta_post_id3_offset; if (meta != NULL && meta->findInt64("offset", &meta_offset) && meta->findInt32("header", (int32_t *)&meta_header) && meta->findInt64("post-id3-offset", &meta_post_id3_offset)) { // The sniffer has already done all the hard work for us, simply // accept its judgement. pos = (off64_t)meta_offset; header = meta_header; post_id3_pos = (off64_t)meta_post_id3_offset; success = true; } else { success = Resync(mDataSource, 0, &pos, &post_id3_pos, &header); } if (!success) { // mInitCheck will remain NO_INIT return; } mFirstFramePos = pos; mFixedHeader = header; size_t frame_size; int sample_rate; int num_channels; int bitrate; GetMPEGAudioFrameSize( header, &frame_size, &sample_rate, &num_channels, &bitrate); mMeta = new MetaData; mMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG); mMeta->setInt32(kKeySampleRate, sample_rate); mMeta->setInt32(kKeyBitRate, bitrate * 1000); mMeta->setInt32(kKeyChannelCount, num_channels); mSeeker = XINGSeeker::CreateFromSource(mDataSource, mFirstFramePos); if (mSeeker == NULL) { mSeeker = VBRISeeker::CreateFromSource(mDataSource, post_id3_pos); } int64_t durationUs; if (mSeeker == NULL || !mSeeker->getDuration(&durationUs)) { off64_t fileSize; if (mDataSource->getSize(&fileSize) == OK) { durationUs = 8000LL * (fileSize - mFirstFramePos) / bitrate; } else { durationUs = -1; } } if (durationUs >= 0) { mMeta->setInt64(kKeyDuration, durationUs); } mInitCheck = OK; }
void Converter::onMessageReceived(const sp<AMessage> &msg) { switch (msg->what()) { case kWhatMediaPullerNotify: { int32_t what; CHECK(msg->findInt32("what", &what)); if (!mIsPCMAudio && mEncoder == NULL) { ALOGV("got msg '%s' after encoder shutdown.", msg->debugString().c_str()); if (what == MediaPuller::kWhatAccessUnit) { sp<ABuffer> accessUnit; CHECK(msg->findBuffer("accessUnit", &accessUnit)); accessUnit->setMediaBufferBase(NULL); } break; } if (what == MediaPuller::kWhatEOS) { mInputBufferQueue.push_back(NULL); feedEncoderInputBuffers(); scheduleDoMoreWork(); } else { CHECK_EQ(what, MediaPuller::kWhatAccessUnit); sp<ABuffer> accessUnit; CHECK(msg->findBuffer("accessUnit", &accessUnit)); if (mNumFramesToDrop > 0 || mEncodingSuspended) { if (mNumFramesToDrop > 0) { --mNumFramesToDrop; ALOGI("dropping frame."); } accessUnit->setMediaBufferBase(NULL); break; } #if 0 MediaBuffer *mbuf = (MediaBuffer *)(accessUnit->getMediaBufferBase()); if (mbuf != NULL) { ALOGI("queueing mbuf %p", mbuf); mbuf->release(); } #endif #if ENABLE_SILENCE_DETECTION if (!mIsVideo) { if (IsSilence(accessUnit)) { if (mInSilentMode) { break; } int64_t nowUs = ALooper::GetNowUs(); if (mFirstSilentFrameUs < 0ll) { mFirstSilentFrameUs = nowUs; } else if (nowUs >= mFirstSilentFrameUs + 10000000ll) { mInSilentMode = true; ALOGI("audio in silent mode now."); break; } } else { if (mInSilentMode) { ALOGI("audio no longer in silent mode."); } mInSilentMode = false; mFirstSilentFrameUs = -1ll; } } #endif mInputBufferQueue.push_back(accessUnit); feedEncoderInputBuffers(); scheduleDoMoreWork(); } break; } case kWhatEncoderActivity: { #if 0 int64_t whenUs; if (msg->findInt64("whenUs", &whenUs)) { int64_t nowUs = ALooper::GetNowUs(); ALOGI("[%s] kWhatEncoderActivity after %lld us", mIsVideo ? "video" : "audio", nowUs - whenUs); } #endif mDoMoreWorkPending = false; if (mEncoder == NULL) { break; } status_t err = doMoreWork(); if (err != OK) { notifyError(err); } else { scheduleDoMoreWork(); } break; } case kWhatRequestIDRFrame: { if (mEncoder == NULL) { break; } if (mIsVideo) { ALOGV("requesting IDR frame"); mEncoder->requestIDRFrame(); } break; } case kWhatShutdown: { ALOGI("shutting down %s encoder", mIsVideo ? "video" : "audio"); releaseEncoder(); AString mime; CHECK(mOutputFormat->findString("mime", &mime)); ALOGI("encoder (%s) shut down.", mime.c_str()); sp<AMessage> notify = mNotify->dup(); notify->setInt32("what", kWhatShutdownCompleted); notify->post(); break; } case kWhatDropAFrame: { ++mNumFramesToDrop; break; } case kWhatReleaseOutputBuffer: { if (mEncoder != NULL) { size_t bufferIndex; CHECK(msg->findInt32("bufferIndex", (int32_t*)&bufferIndex)); CHECK(bufferIndex < mEncoderOutputBuffers.size()); mEncoder->releaseOutputBuffer(bufferIndex); } break; } case kWhatSuspendEncoding: { int32_t suspend; CHECK(msg->findInt32("suspend", &suspend)); mEncodingSuspended = suspend; if (mFlags & FLAG_USE_SURFACE_INPUT) { sp<AMessage> params = new AMessage; params->setInt32("drop-input-frames",suspend); mEncoder->setParameters(params); } break; } default: TRESPASS(); } }
void NuPlayer::RTSPSource::onMessageReceived(const sp<AMessage> &msg) { if (msg->what() == kWhatDisconnect) { sp<AReplyToken> replyID; CHECK(msg->senderAwaitsResponse(&replyID)); mDisconnectReplyID = replyID; finishDisconnectIfPossible(); return; } else if (msg->what() == kWhatPerformSeek) { int32_t generation; CHECK(msg->findInt32("generation", &generation)); CHECK(msg->senderAwaitsResponse(&mSeekReplyID)); if (generation != mSeekGeneration) { // obsolete. finishSeek(OK); return; } int64_t seekTimeUs; CHECK(msg->findInt64("timeUs", &seekTimeUs)); performSeek(seekTimeUs); return; } else if (msg->what() == kWhatPollBuffering) { onPollBuffering(); return; } else if (msg->what() == kWhatSignalEOS) { onSignalEOS(msg); return; } CHECK_EQ(msg->what(), (int)kWhatNotify); int32_t what; CHECK(msg->findInt32("what", &what)); switch (what) { case MyHandler::kWhatConnected: { onConnected(); notifyVideoSizeChanged(); uint32_t flags = 0; if (mHandler->isSeekable()) { flags = FLAG_CAN_PAUSE | FLAG_CAN_SEEK | FLAG_CAN_SEEK_BACKWARD | FLAG_CAN_SEEK_FORWARD; } notifyFlagsChanged(flags); schedulePollBuffering(); break; } case MyHandler::kWhatDisconnected: { onDisconnected(msg); break; } case MyHandler::kWhatSeekDone: { mState = CONNECTED; // Unblock seekTo here in case we attempted to seek in a live stream finishSeek(OK); break; } case MyHandler::kWhatSeekPaused: { sp<AnotherPacketSource> source = getSource(true /* audio */); if (source != NULL) { source->queueDiscontinuity(ATSParser::DISCONTINUITY_NONE, /* extra */ NULL, /* discard */ true); } source = getSource(false /* video */); if (source != NULL) { source->queueDiscontinuity(ATSParser::DISCONTINUITY_NONE, /* extra */ NULL, /* discard */ true); }; status_t err = OK; msg->findInt32("err", &err); if (err == OK) { int64_t timeUs; CHECK(msg->findInt64("time", &timeUs)); mHandler->continueSeekAfterPause(timeUs); } else { finishSeek(err); } break; } case MyHandler::kWhatAccessUnit: { size_t trackIndex; CHECK(msg->findSize("trackIndex", &trackIndex)); if (mTSParser == NULL) { CHECK_LT(trackIndex, mTracks.size()); } else { CHECK_EQ(trackIndex, 0u); } sp<ABuffer> accessUnit; CHECK(msg->findBuffer("accessUnit", &accessUnit)); int32_t damaged; if (accessUnit->meta()->findInt32("damaged", &damaged) && damaged) { ALOGI("dropping damaged access unit."); break; } if (mTSParser != NULL) { size_t offset = 0; status_t err = OK; while (offset + 188 <= accessUnit->size()) { err = mTSParser->feedTSPacket( accessUnit->data() + offset, 188); if (err != OK) { break; } offset += 188; } if (offset < accessUnit->size()) { err = ERROR_MALFORMED; } if (err != OK) { signalSourceEOS(err); } postSourceEOSIfNecessary(); break; } TrackInfo *info = &mTracks.editItemAt(trackIndex); sp<AnotherPacketSource> source = info->mSource; if (source != NULL) { uint32_t rtpTime; CHECK(accessUnit->meta()->findInt32("rtp-time", (int32_t *)&rtpTime)); if (!info->mNPTMappingValid) { // This is a live stream, we didn't receive any normal // playtime mapping. We won't map to npt time. source->queueAccessUnit(accessUnit); break; } int64_t nptUs = ((double)rtpTime - (double)info->mRTPTime) / info->mTimeScale * 1000000ll + info->mNormalPlaytimeUs; accessUnit->meta()->setInt64("timeUs", nptUs); source->queueAccessUnit(accessUnit); } postSourceEOSIfNecessary(); break; } case MyHandler::kWhatEOS: { int32_t finalResult; CHECK(msg->findInt32("finalResult", &finalResult)); CHECK_NE(finalResult, (status_t)OK); if (mTSParser != NULL) { signalSourceEOS(finalResult); } size_t trackIndex; CHECK(msg->findSize("trackIndex", &trackIndex)); CHECK_LT(trackIndex, mTracks.size()); TrackInfo *info = &mTracks.editItemAt(trackIndex); sp<AnotherPacketSource> source = info->mSource; if (source != NULL) { source->signalEOS(finalResult); } break; } case MyHandler::kWhatSeekDiscontinuity: { size_t trackIndex; CHECK(msg->findSize("trackIndex", &trackIndex)); CHECK_LT(trackIndex, mTracks.size()); TrackInfo *info = &mTracks.editItemAt(trackIndex); sp<AnotherPacketSource> source = info->mSource; if (source != NULL) { source->queueDiscontinuity( ATSParser::DISCONTINUITY_TIME, NULL, true /* discard */); } break; } case MyHandler::kWhatNormalPlayTimeMapping: { size_t trackIndex; CHECK(msg->findSize("trackIndex", &trackIndex)); CHECK_LT(trackIndex, mTracks.size()); uint32_t rtpTime; CHECK(msg->findInt32("rtpTime", (int32_t *)&rtpTime)); int64_t nptUs; CHECK(msg->findInt64("nptUs", &nptUs)); TrackInfo *info = &mTracks.editItemAt(trackIndex); info->mRTPTime = rtpTime; info->mNormalPlaytimeUs = nptUs; info->mNPTMappingValid = true; break; } case SDPLoader::kWhatSDPLoaded: { onSDPLoaded(msg); break; } default: TRESPASS(); } }
void TimedTextPlayer::onMessageReceived(const sp<AMessage> &msg) { switch (msg->what()) { case kWhatPause: { mPaused = true; break; } case kWhatResume: { mPaused = false; if (mPendingSeekTimeUs != kInvalidTimeUs) { seekToAsync(mPendingSeekTimeUs); mPendingSeekTimeUs = kInvalidTimeUs; } else { doRead(); } break; } case kWhatStart: { sp<MediaPlayerBase> listener = mListener.promote(); if (listener == NULL) { ALOGE("Listener is NULL when kWhatStart is received."); break; } mPaused = false; mPendingSeekTimeUs = kInvalidTimeUs; int32_t positionMs = 0; listener->getCurrentPosition(&positionMs); int64_t seekTimeUs = positionMs * 1000ll; notifyListener(); mSendSubtitleGeneration++; doSeekAndRead(seekTimeUs); break; } case kWhatRetryRead: { int32_t generation = -1; CHECK(msg->findInt32("generation", &generation)); if (generation != mSendSubtitleGeneration) { // Drop obsolete msg. break; } int64_t seekTimeUs; int seekMode; if (msg->findInt64("seekTimeUs", &seekTimeUs) && msg->findInt32("seekMode", &seekMode)) { MediaSource::ReadOptions options; options.setSeekTo( seekTimeUs, static_cast<MediaSource::ReadOptions::SeekMode>(seekMode)); doRead(&options); } else { doRead(); } break; } case kWhatSeek: { int64_t seekTimeUs = kInvalidTimeUs; // Clear a displayed timed text before seeking. notifyListener(); msg->findInt64("seekTimeUs", &seekTimeUs); if (seekTimeUs == kInvalidTimeUs) { sp<MediaPlayerBase> listener = mListener.promote(); if (listener != NULL) { int32_t positionMs = 0; listener->getCurrentPosition(&positionMs); seekTimeUs = positionMs * 1000ll; } } if (mPaused) { mPendingSeekTimeUs = seekTimeUs; break; } mSendSubtitleGeneration++; doSeekAndRead(seekTimeUs); break; } case kWhatSendSubtitle: { int32_t generation; CHECK(msg->findInt32("generation", &generation)); if (generation != mSendSubtitleGeneration) { // Drop obsolete msg. break; } // If current time doesn't reach to the fire time, // re-post the message with the adjusted delay time. int64_t fireTimeUs = kInvalidTimeUs; if (msg->findInt64("fireTimeUs", &fireTimeUs)) { // TODO: check if fireTimeUs is not kInvalidTimeUs. int64_t delayUs = delayUsFromCurrentTime(fireTimeUs); if (delayUs > 0) { msg->post(delayUs); break; } } sp<RefBase> obj; if (msg->findObject("subtitle", &obj)) { sp<ParcelEvent> parcelEvent; parcelEvent = static_cast<ParcelEvent*>(obj.get()); notifyListener(&(parcelEvent->parcel)); doRead(); } else { notifyListener(); } break; } case kWhatSetSource: { mSendSubtitleGeneration++; sp<RefBase> obj; msg->findObject("source", &obj); if (mSource != NULL) { mSource->stop(); mSource.clear(); mSource = NULL; } // null source means deselect track. if (obj == NULL) { mPendingSeekTimeUs = kInvalidTimeUs; mPaused = false; notifyListener(); break; } mSource = static_cast<TimedTextSource*>(obj.get()); status_t err = mSource->start(); if (err != OK) { notifyError(err); break; } Parcel parcel; err = mSource->extractGlobalDescriptions(&parcel); if (err != OK) { notifyError(err); break; } notifyListener(&parcel); break; } } }
void TimedTextPlayer::onMessageReceived(const sp<AMessage> &msg) { switch (msg->what()) { case kWhatPause: { mSendSubtitleGeneration++; break; } case kWhatRetryRead: { int64_t seekTimeUs; int seekMode; if (msg->findInt64("seekTimeUs", &seekTimeUs) && msg->findInt32("seekMode", &seekMode)) { MediaSource::ReadOptions options; options.setSeekTo( seekTimeUs, static_cast<MediaSource::ReadOptions::SeekMode>(seekMode)); doRead(&options); } else { doRead(); } break; } case kWhatSeek: { int64_t seekTimeUs = 0; msg->findInt64("seekTimeUs", &seekTimeUs); if (seekTimeUs < 0) { sp<MediaPlayerBase> listener = mListener.promote(); if (listener != NULL) { int32_t positionMs = 0; listener->getCurrentPosition(&positionMs); seekTimeUs = positionMs * 1000ll; } } doSeekAndRead(seekTimeUs); break; } case kWhatSendSubtitle: { int32_t generation; CHECK(msg->findInt32("generation", &generation)); if (generation != mSendSubtitleGeneration) { // Drop obsolete msg. break; } sp<RefBase> obj; if (msg->findObject("subtitle", &obj)) { sp<ParcelEvent> parcelEvent; parcelEvent = static_cast<ParcelEvent*>(obj.get()); notifyListener(&(parcelEvent->parcel)); doRead(); } else { notifyListener(); } break; } case kWhatSetSource: { sp<RefBase> obj; msg->findObject("source", &obj); if (obj == NULL) break; if (mSource != NULL) { mSource->stop(); } mSource = static_cast<TimedTextSource*>(obj.get()); status_t err = mSource->start(); if (err != OK) { notifyError(err); break; } Parcel parcel; err = mSource->extractGlobalDescriptions(&parcel); if (err != OK) { notifyError(err); break; } notifyListener(&parcel); break; } } }
void Converter::onMessageReceived(const sp<AMessage> &msg) { switch (msg->what()) { case kWhatMediaPullerNotify: { int32_t what; CHECK(msg->findInt32("what", &what)); if (!mIsPCMAudio && mEncoder == NULL) { ALOGV("got msg '%s' after encoder shutdown.", msg->debugString().c_str()); if (what == MediaPuller::kWhatAccessUnit) { sp<ABuffer> accessUnit; CHECK(msg->findBuffer("accessUnit", &accessUnit)); void *mbuf; if (accessUnit->meta()->findPointer("mediaBuffer", &mbuf) && mbuf != NULL) { ALOGV("releasing mbuf %p", mbuf); accessUnit->meta()->setPointer("mediaBuffer", NULL); static_cast<MediaBuffer *>(mbuf)->release(); mbuf = NULL; } } break; } if (what == MediaPuller::kWhatEOS) { mInputBufferQueue.push_back(NULL); feedEncoderInputBuffers(); scheduleDoMoreWork(); } else { CHECK_EQ(what, MediaPuller::kWhatAccessUnit); sp<ABuffer> accessUnit; CHECK(msg->findBuffer("accessUnit", &accessUnit)); #if 0 void *mbuf; if (accessUnit->meta()->findPointer("mediaBuffer", &mbuf) && mbuf != NULL) { ALOGI("queueing mbuf %p", mbuf); } #endif #if ENABLE_SILENCE_DETECTION if (!mIsVideo) { if (IsSilence(accessUnit)) { if (mInSilentMode) { break; } int64_t nowUs = ALooper::GetNowUs(); if (mFirstSilentFrameUs < 0ll) { mFirstSilentFrameUs = nowUs; } else if (nowUs >= mFirstSilentFrameUs + 10000000ll) { mInSilentMode = true; ALOGI("audio in silent mode now."); break; } } else { if (mInSilentMode) { ALOGI("audio no longer in silent mode."); } mInSilentMode = false; mFirstSilentFrameUs = -1ll; } } #endif mInputBufferQueue.push_back(accessUnit); feedEncoderInputBuffers(); scheduleDoMoreWork(); } break; } case kWhatEncoderActivity: { #if 0 int64_t whenUs; if (msg->findInt64("whenUs", &whenUs)) { int64_t nowUs = ALooper::GetNowUs(); ALOGI("[%s] kWhatEncoderActivity after %lld us", mIsVideo ? "video" : "audio", nowUs - whenUs); } #endif mDoMoreWorkPending = false; if (mEncoder == NULL) { break; } status_t err = doMoreWork(); if (err != OK) { notifyError(err); } else { scheduleDoMoreWork(); } break; } case kWhatRequestIDRFrame: { if (mEncoder == NULL) { break; } if (mIsVideo) { ALOGI("requesting IDR frame"); mEncoder->requestIDRFrame(); } break; } case kWhatShutdown: { ALOGI("shutting down encoder"); /*bugfix: release queue buffer,it may fall into blackhold. * when 4kplayer is floating in the dynamic desktop, * and someone disable wifi in the quicksetting, this will lead to UI deadlock. * It mainly let the source emit onDisplayDisconnect msg to framework. */ while (!mInputBufferQueue.empty()) { sp<ABuffer> accessUnit = *mInputBufferQueue.begin(); mInputBufferQueue.erase(mInputBufferQueue.begin()); void *mbuf = NULL; if (accessUnit->meta()->findPointer("mediaBuffer", &mbuf) && mbuf != NULL) { ALOGI(">>releasing mbuf %p", mbuf); accessUnit->meta()->setPointer("mediaBuffer", NULL); static_cast<MediaBuffer *>(mbuf)->release(); mbuf = NULL; } } if (mEncoder != NULL) { mEncoder->release(); mEncoder.clear(); } AString mime; CHECK(mInputFormat->findString("mime", &mime)); ALOGI("encoder (%s) shut down.", mime.c_str()); break; } default: TRESPASS(); } }
void ARTSPConnection::onCompleteConnection(const sp<AMessage> &msg) { sp<AMessage> reply; CHECK(msg->findMessage("reply", &reply)); int32_t connectionID; CHECK(msg->findInt32("connection-id", &connectionID)); if ((connectionID != mConnectionID) || mState != CONNECTING) { // While we were attempting to connect, the attempt was // cancelled. reply->setInt32("result", -ECONNABORTED); reply->post(); return; } struct timeval tv; tv.tv_sec = 0; tv.tv_usec = kSelectTimeoutUs; fd_set ws; FD_ZERO(&ws); FD_SET(mSocket, &ws); int res = select(mSocket + 1, NULL, &ws, NULL, &tv); CHECK_GE(res, 0); if (res == 0) { // Timed out. Not yet connected. #ifndef ANDROID_DEFAULT_CODE int64_t then, now = ALooper::GetNowUs(); if (msg->findInt64("timestamp", &then) && now - then > kRequestTimeout) { ALOGE("connection timeout %lld > %lld", now, then); reply->setInt32("result", -110 /*ETIMEDOUT*/); reply->post(); mState = DISCONNECTED; close(mSocket); mSocket = -1; return; } if(mExited) return; #endif // #ifndef ANDROID_DEFAULT_CODE msg->post(); return; } int err; socklen_t optionLen = sizeof(err); CHECK_EQ(getsockopt(mSocket, SOL_SOCKET, SO_ERROR, &err, &optionLen), 0); CHECK_EQ(optionLen, (socklen_t)sizeof(err)); if (err != 0) { ALOGE("err = %d (%s)", err, strerror(err)); reply->setInt32("result", -err); mState = DISCONNECTED; if (mUIDValid) { HTTPBase::UnRegisterSocketUserTag(mSocket); HTTPBase::UnRegisterSocketUserMark(mSocket); } close(mSocket); mSocket = -1; } else { reply->setInt32("result", OK); mState = CONNECTED; mNextCSeq = 1; postReceiveReponseEvent(); } reply->post(); }
void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { switch (msg->what()) { case kWhatSetDataSource: { LOGV("kWhatSetDataSource"); CHECK(mSource == NULL); sp<RefBase> obj; CHECK(msg->findObject("source", &obj)); mSource = static_cast<Source *>(obj.get()); break; } case kWhatSetVideoNativeWindow: { LOGV("kWhatSetVideoNativeWindow"); sp<RefBase> obj; CHECK(msg->findObject("native-window", &obj)); mNativeWindow = static_cast<NativeWindowWrapper *>(obj.get()); break; } case kWhatSetAudioSink: { LOGV("kWhatSetAudioSink"); sp<RefBase> obj; CHECK(msg->findObject("sink", &obj)); mAudioSink = static_cast<MediaPlayerBase::AudioSink *>(obj.get()); break; } case kWhatStart: { LOGV("kWhatStart"); mVideoIsAVC = false; mAudioEOS = false; mVideoEOS = false; mSkipRenderingAudioUntilMediaTimeUs = -1; mSkipRenderingVideoUntilMediaTimeUs = -1; mVideoLateByUs = 0; mNumFramesTotal = 0; mNumFramesDropped = 0; mSource->start(); mRenderer = new Renderer( mAudioSink, new AMessage(kWhatRendererNotify, id())); looper()->registerHandler(mRenderer); postScanSources(); break; } case kWhatScanSources: { int32_t generation; CHECK(msg->findInt32("generation", &generation)); if (generation != mScanSourcesGeneration) { // Drop obsolete msg. break; } mScanSourcesPending = false; LOGV("scanning sources haveAudio=%d, haveVideo=%d", mAudioDecoder != NULL, mVideoDecoder != NULL); instantiateDecoder(false, &mVideoDecoder); if (mAudioSink != NULL) { instantiateDecoder(true, &mAudioDecoder); } status_t err; if ((err = mSource->feedMoreTSData()) != OK) { if (mAudioDecoder == NULL && mVideoDecoder == NULL) { // We're not currently decoding anything (no audio or // video tracks found) and we just ran out of input data. if (err == ERROR_END_OF_STREAM) { notifyListener(MEDIA_PLAYBACK_COMPLETE, 0, 0); } else { notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err); } } break; } if (mAudioDecoder == NULL || mVideoDecoder == NULL) { msg->post(100000ll); mScanSourcesPending = true; } break; } case kWhatVideoNotify: case kWhatAudioNotify: { bool audio = msg->what() == kWhatAudioNotify; sp<AMessage> codecRequest; CHECK(msg->findMessage("codec-request", &codecRequest)); int32_t what; CHECK(codecRequest->findInt32("what", &what)); if (what == ACodec::kWhatFillThisBuffer) { status_t err = feedDecoderInputData( audio, codecRequest); if (err == -EWOULDBLOCK) { if (mSource->feedMoreTSData() == OK) { msg->post(10000ll); } } } else if (what == ACodec::kWhatEOS) { int32_t err; CHECK(codecRequest->findInt32("err", &err)); if (err == ERROR_END_OF_STREAM) { LOGV("got %s decoder EOS", audio ? "audio" : "video"); } else { LOGV("got %s decoder EOS w/ error %d", audio ? "audio" : "video", err); } mRenderer->queueEOS(audio, err); } else if (what == ACodec::kWhatFlushCompleted) { bool needShutdown; if (audio) { CHECK(IsFlushingState(mFlushingAudio, &needShutdown)); mFlushingAudio = FLUSHED; } else { CHECK(IsFlushingState(mFlushingVideo, &needShutdown)); mFlushingVideo = FLUSHED; mVideoLateByUs = 0; } LOGV("decoder %s flush completed", audio ? "audio" : "video"); if (needShutdown) { LOGV("initiating %s decoder shutdown", audio ? "audio" : "video"); (audio ? mAudioDecoder : mVideoDecoder)->initiateShutdown(); if (audio) { mFlushingAudio = SHUTTING_DOWN_DECODER; } else { mFlushingVideo = SHUTTING_DOWN_DECODER; } } finishFlushIfPossible(); } else if (what == ACodec::kWhatOutputFormatChanged) { if (audio) { int32_t numChannels; CHECK(codecRequest->findInt32("channel-count", &numChannels)); int32_t sampleRate; CHECK(codecRequest->findInt32("sample-rate", &sampleRate)); LOGV("Audio output format changed to %d Hz, %d channels", sampleRate, numChannels); mAudioSink->close(); CHECK_EQ(mAudioSink->open( sampleRate, numChannels, AUDIO_FORMAT_PCM_16_BIT, 8 /* bufferCount */), (status_t)OK); mAudioSink->start(); mRenderer->signalAudioSinkChanged(); } else { // video int32_t width, height; CHECK(codecRequest->findInt32("width", &width)); CHECK(codecRequest->findInt32("height", &height)); int32_t cropLeft, cropTop, cropRight, cropBottom; CHECK(codecRequest->findRect( "crop", &cropLeft, &cropTop, &cropRight, &cropBottom)); LOGV("Video output format changed to %d x %d " "(crop: %d x %d @ (%d, %d))", width, height, (cropRight - cropLeft + 1), (cropBottom - cropTop + 1), cropLeft, cropTop); notifyListener( MEDIA_SET_VIDEO_SIZE, cropRight - cropLeft + 1, cropBottom - cropTop + 1); } } else if (what == ACodec::kWhatShutdownCompleted) { LOGV("%s shutdown completed", audio ? "audio" : "video"); if (audio) { mAudioDecoder.clear(); CHECK_EQ((int)mFlushingAudio, (int)SHUTTING_DOWN_DECODER); mFlushingAudio = SHUT_DOWN; } else { mVideoDecoder.clear(); CHECK_EQ((int)mFlushingVideo, (int)SHUTTING_DOWN_DECODER); mFlushingVideo = SHUT_DOWN; } finishFlushIfPossible(); } else if (what == ACodec::kWhatError) { LOGE("Received error from %s decoder, aborting playback.", audio ? "audio" : "video"); mRenderer->queueEOS(audio, UNKNOWN_ERROR); } else { CHECK_EQ((int)what, (int)ACodec::kWhatDrainThisBuffer); renderBuffer(audio, codecRequest); } break; } case kWhatRendererNotify: { int32_t what; CHECK(msg->findInt32("what", &what)); if (what == Renderer::kWhatEOS) { int32_t audio; CHECK(msg->findInt32("audio", &audio)); int32_t finalResult; CHECK(msg->findInt32("finalResult", &finalResult)); if (audio) { mAudioEOS = true; } else { mVideoEOS = true; } if (finalResult == ERROR_END_OF_STREAM) { LOGV("reached %s EOS", audio ? "audio" : "video"); } else { LOGE("%s track encountered an error (%d)", audio ? "audio" : "video", finalResult); notifyListener( MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, finalResult); } if ((mAudioEOS || mAudioDecoder == NULL) && (mVideoEOS || mVideoDecoder == NULL)) { notifyListener(MEDIA_PLAYBACK_COMPLETE, 0, 0); } } else if (what == Renderer::kWhatPosition) { int64_t positionUs; CHECK(msg->findInt64("positionUs", &positionUs)); CHECK(msg->findInt64("videoLateByUs", &mVideoLateByUs)); if (mDriver != NULL) { sp<NuPlayerDriver> driver = mDriver.promote(); if (driver != NULL) { driver->notifyPosition(positionUs); driver->notifyFrameStats( mNumFramesTotal, mNumFramesDropped); } } } else if (what == Renderer::kWhatFlushComplete) { CHECK_EQ(what, (int32_t)Renderer::kWhatFlushComplete); int32_t audio; CHECK(msg->findInt32("audio", &audio)); LOGV("renderer %s flush completed.", audio ? "audio" : "video"); } break; } case kWhatMoreDataQueued: { break; } case kWhatReset: { LOGV("kWhatReset"); if (mRenderer != NULL) { // There's an edge case where the renderer owns all output // buffers and is paused, therefore the decoder will not read // more input data and will never encounter the matching // discontinuity. To avoid this, we resume the renderer. if (mFlushingAudio == AWAITING_DISCONTINUITY || mFlushingVideo == AWAITING_DISCONTINUITY) { mRenderer->resume(); } } if (mFlushingAudio != NONE || mFlushingVideo != NONE) { // We're currently flushing, postpone the reset until that's // completed. LOGV("postponing reset mFlushingAudio=%d, mFlushingVideo=%d", mFlushingAudio, mFlushingVideo); mResetPostponed = true; break; } if (mAudioDecoder == NULL && mVideoDecoder == NULL) { finishReset(); break; } mTimeDiscontinuityPending = true; if (mAudioDecoder != NULL) { flushDecoder(true /* audio */, true /* needShutdown */); } if (mVideoDecoder != NULL) { flushDecoder(false /* audio */, true /* needShutdown */); } mResetInProgress = true; break; } case kWhatSeek: { int64_t seekTimeUs; CHECK(msg->findInt64("seekTimeUs", &seekTimeUs)); LOGV("kWhatSeek seekTimeUs=%lld us (%.2f secs)", seekTimeUs, seekTimeUs / 1E6); mSource->seekTo(seekTimeUs); if (mDriver != NULL) { sp<NuPlayerDriver> driver = mDriver.promote(); if (driver != NULL) { driver->notifySeekComplete(); } } break; } case kWhatPause: { CHECK(mRenderer != NULL); mRenderer->pause(); break; } case kWhatResume: { CHECK(mRenderer != NULL); mRenderer->resume(); break; } default: TRESPASS(); break; } }