status_t MPEG2TSExtractor::seekBeyond(int64_t seekTimeUs) { // If we're seeking beyond where we know --- read until we reach there. size_t syncPointsSize = mSeekSyncPoints->size(); while (seekTimeUs > mSeekSyncPoints->keyAt( mSeekSyncPoints->size() - 1)) { status_t err; if (syncPointsSize < mSeekSyncPoints->size()) { syncPointsSize = mSeekSyncPoints->size(); int64_t syncTimeUs = mSeekSyncPoints->keyAt(syncPointsSize - 1); // Dequeue buffers before sync point in order to avoid too much // cache building up. sp<ABuffer> buffer; for (size_t i = 0; i < mSourceImpls.size(); ++i) { const sp<AnotherPacketSource> &impl = mSourceImpls[i]; int64_t timeUs; while ((err = impl->nextBufferTime(&timeUs)) == OK) { if (timeUs < syncTimeUs) { impl->dequeueAccessUnit(&buffer); } else { break; } } if (err != OK && err != -EWOULDBLOCK) { return err; } } } if (feedMore() != OK) { return ERROR_END_OF_STREAM; } } return OK; }
status_t MPEG2TSExtractor::feedUntilBufferAvailable( const sp<AnotherPacketSource> &impl) { status_t finalResult; while (!impl->hasBufferAvailable(&finalResult)) { if (finalResult != OK) { return finalResult; } status_t err = feedMore(); if (err != OK) { impl->signalEOS(err); } } return OK; }
void ESExtractor::init() { bool haveAudio = false; bool haveVideo = false; int numPacketsParsed = 0; ALOGD("*****************init in*************** \n"); mOffset = 0; while (feedMore() == OK) { if (++numPacketsParsed > 10) { break; } if (mTrack == NULL) { continue; } if (mTrack->getFormat() == NULL) { continue; } if (mTrack->isVideo() &&(mTrack->getFormat() != NULL)) { haveVideo = true; } if (mTrack->isAudio() &&(mTrack->getFormat() != NULL)) { haveAudio = true; } if (haveAudio == 1 || haveVideo == 1) { ALOGD("bisplayable is true"); bisPlayable = true; break; } } mFinalResult = OK; mBuffer->setRange(0, 0); ALOGD("************ init out *****************\n"); }
void MPEG2TSExtractor::init() { bool haveAudio = false; bool haveVideo = false; int numPacketsParsed = 0; while (feedMore() == OK) { ATSParser::SourceType type; if (haveAudio && haveVideo) { break; } if (!haveVideo) { sp<AnotherPacketSource> impl = (AnotherPacketSource *)mParser->getSource( ATSParser::VIDEO).get(); if (impl != NULL) { haveVideo = true; mSourceImpls.push(impl); } } if (!haveAudio) { sp<AnotherPacketSource> impl = (AnotherPacketSource *)mParser->getSource( ATSParser::AUDIO).get(); if (impl != NULL) { haveAudio = true; mSourceImpls.push(impl); } } if (++numPacketsParsed > 10000) { break; } } LOGI("haveAudio=%d, haveVideo=%d", haveAudio, haveVideo); }
void MPEG2TSExtractor::init() { bool haveAudio = false; bool haveVideo = false; int64_t startTime = ALooper::GetNowUs(); while (feedMore() == OK) { if (haveAudio && haveVideo) { break; } if (!haveVideo) { sp<AnotherPacketSource> impl = (AnotherPacketSource *)mParser->getSource( ATSParser::VIDEO).get(); if (impl != NULL) { haveVideo = true; mSourceImpls.push(impl); mSyncPoints.push(); mSeekSyncPoints = &mSyncPoints.editTop(); } } if (!haveAudio) { sp<AnotherPacketSource> impl = (AnotherPacketSource *)mParser->getSource( ATSParser::AUDIO).get(); if (impl != NULL) { haveAudio = true; mSourceImpls.push(impl); mSyncPoints.push(); if (!haveVideo) { mSeekSyncPoints = &mSyncPoints.editTop(); } } } // Wait only for 2 seconds to detect audio/video streams. if (ALooper::GetNowUs() - startTime > 2000000ll) { break; } } off64_t size; if (mDataSource->getSize(&size) == OK && (haveAudio || haveVideo)) { sp<AnotherPacketSource> impl = haveVideo ? (AnotherPacketSource *)mParser->getSource( ATSParser::VIDEO).get() : (AnotherPacketSource *)mParser->getSource( ATSParser::AUDIO).get(); size_t prevSyncSize = 1; int64_t durationUs = -1; List<int64_t> durations; // Estimate duration --- stabilize until you get <500ms deviation. while (feedMore() == OK && ALooper::GetNowUs() - startTime <= 2000000ll) { if (mSeekSyncPoints->size() > prevSyncSize) { prevSyncSize = mSeekSyncPoints->size(); int64_t diffUs = mSeekSyncPoints->keyAt(prevSyncSize - 1) - mSeekSyncPoints->keyAt(0); off64_t diffOffset = mSeekSyncPoints->valueAt(prevSyncSize - 1) - mSeekSyncPoints->valueAt(0); durationUs = size * diffUs / diffOffset; durations.push_back(durationUs); if (durations.size() > 5) { durations.erase(durations.begin()); int64_t min = *durations.begin(); int64_t max = *durations.begin(); for (List<int64_t>::iterator i = durations.begin(); i != durations.end(); ++i) { if (min > *i) { min = *i; } if (max < *i) { max = *i; } } if (max - min < 500 * 1000) { break; } } } } status_t err; int64_t bufferedDurationUs; bufferedDurationUs = impl->getBufferedDurationUs(&err); if (err == ERROR_END_OF_STREAM) { durationUs = bufferedDurationUs; } if (durationUs > 0) { const sp<MetaData> meta = impl->getFormat(); meta->setInt64(kKeyDuration, durationUs); impl->setFormat(meta); } } ALOGI("haveAudio=%d, haveVideo=%d, elaspedTime=%" PRId64, haveAudio, haveVideo, ALooper::GetNowUs() - startTime); }
void MPEG2TSExtractor::init() { bool haveAudio = false; bool haveVideo = false; int numPacketsParsed = 0; sp<MPEG2TSSource> audioSource = NULL; sp<MPEG2TSSource> videoSource = NULL; while (feedMore() == OK) { ATSParser::SourceType type; if (haveAudio && haveVideo) { break; } if (!haveVideo) { sp<AnotherPacketSource> impl = (AnotherPacketSource *)mParser->getSource( ATSParser::VIDEO).get(); if (impl != NULL) { haveVideo = true; videoSource = new MPEG2TSSource(this, impl, mDataSource, true); if (videoSource == NULL) { ALOGE("Unable to create video TS source"); } else { mSourceList.push(videoSource); } } } if (!haveAudio) { sp<AnotherPacketSource> impl = (AnotherPacketSource *)mParser->getSource( ATSParser::AUDIO).get(); if (impl != NULL) { haveAudio = true; sp<MetaData> meta = impl->getFormat(); const char *mime; CHECK(meta->findCString(kKeyMIMEType, &mime)); //if this audio/mpeg* then drop the audio //we are intrested in only audio/mpeg (size ==10) if audio is mpeg format (mp3) if ((!strncasecmp("audio/mpeg", mime, 10)) && (strlen(mime) > 10)) { ALOGE("Audio is %s - Droping this",mime); } else{ ALOGI("Audio is %s - keeping this",mime); audioSource = new MPEG2TSSource(this, impl, mDataSource, false); if (audioSource != NULL) { mSourceList.push(audioSource); } else { ALOGE("Unable to create audio TS source"); } } } } if (++numPacketsParsed > MAX_NUM_TS_PACKETS_FOR_META_DATA) { ALOGW("Parsed more than 10000 TS packets and could not find AV data"); break; } } ALOGI("haveAudio=%d, haveVideo=%d", haveAudio, haveVideo); if (!haveAudio && !haveVideo) { mSeekable = false; ALOGE("Could not find any audio/video data"); return; } bool mAudioSeekable = true ,mvideoSeekable = true; if (audioSource != NULL) { if (audioSource->findStreamDuration() != OK) { mAudioSeekable = false; } } if (videoSource != NULL) { if (videoSource->findStreamDuration() != OK){ mvideoSeekable = false; } } char value[PROPERTY_VALUE_MAX]; if(property_get("TSParser.disable.seek", value, NULL) && (!strcasecmp(value, "true") || !strcmp(value, "1"))) { mSeekable = false; } else if(mAudioSeekable && mvideoSeekable) { mSeekable = true; } //Disable seek for streaming cases if (mDataSource->flags() & (DataSource::kWantsPrefetching | DataSource::kIsCachingDataSource)) { mSeekable = false; ALOGW("Disable seek for streaming clips"); } }