void MediaSourceReader::OnNotDecoded(MediaData::Type aType, RequestSampleCallback::NotDecodedReason aReason) { MSE_DEBUG("MediaSourceReader(%p)::OnNotDecoded aType=%u aReason=%u IsEnded: %d", this, aType, aReason, IsEnded()); if (aReason == RequestSampleCallback::DECODE_ERROR) { GetCallback()->OnNotDecoded(aType, aReason); return; } // End of stream. Force switching past this stream to another reader by // switching to the end of the buffered range. MOZ_ASSERT(aReason == RequestSampleCallback::END_OF_STREAM); nsRefPtr<MediaDecoderReader> reader = aType == MediaData::AUDIO_DATA ? mAudioReader : mVideoReader; // Find the closest approximation to the end time for this stream. // mLast{Audio,Video}Time differs from the actual end time because of // Bug 1065207 - the duration of a WebM fragment is an estimate not the // actual duration. In the case of audio time an example of where they // differ would be the actual sample duration being small but the // previous sample being large. The buffered end time uses that last // sample duration as an estimate of the end time duration giving an end // time that is greater than mLastAudioTime, which is the actual sample // end time. // Reader switching is based on the buffered end time though so they can be // quite different. By using the EOS_FUZZ_US and the buffered end time we // attempt to account for this difference. int64_t* time = aType == MediaData::AUDIO_DATA ? &mLastAudioTime : &mLastVideoTime; if (reader) { nsRefPtr<dom::TimeRanges> ranges = new dom::TimeRanges(); reader->GetBuffered(ranges); if (ranges->Length() > 0) { // End time is a double so we convert to nearest by adding 0.5. int64_t end = ranges->GetEndTime() * USECS_PER_S + 0.5; *time = std::max(*time, end); } } // See if we can find a different reader that can pick up where we left off. We use the // EOS_FUZZ_US to allow for the fact that our end time can be inaccurate due to bug // 1065207 - the duration of a WebM frame is an estimate. if (aType == MediaData::AUDIO_DATA && SwitchAudioReader(*time + EOS_FUZZ_US)) { RequestAudioData(); return; } if (aType == MediaData::VIDEO_DATA && SwitchVideoReader(*time + EOS_FUZZ_US)) { RequestVideoData(false, 0); return; } // If the entire MediaSource is done, generate an EndOfStream. if (IsEnded()) { GetCallback()->OnNotDecoded(aType, RequestSampleCallback::END_OF_STREAM); return; } // We don't have the data the caller wants. Tell that we're waiting for JS to // give us more data. GetCallback()->OnNotDecoded(aType, RequestSampleCallback::WAITING_FOR_DATA); }
void AccurateSeekTask::OnNotDecoded(MediaData::Type aType, MediaDecoderReader::NotDecodedReason aReason) { AssertOwnerThread(); MOZ_ASSERT(!mSeekTaskPromise.IsEmpty(), "Seek shouldn't be finished"); SAMPLE_LOG("OnNotDecoded type=%d reason=%u", aType, aReason); // Ignore pending requests from video-only seek. if (aType == MediaData::AUDIO_DATA && mTarget.IsVideoOnly()) { return; } if (aReason == MediaDecoderReader::DECODE_ERROR) { // If this is a decode error, delegate to the generic error path. CancelCallbacks(); RejectIfExist(__func__); return; } // If the decoder is waiting for data, we tell it to call us back when the // data arrives. if (aReason == MediaDecoderReader::WAITING_FOR_DATA) { mReader->WaitForData(aType); return; } if (aReason == MediaDecoderReader::CANCELED) { if (aType == MediaData::AUDIO_DATA) { RequestAudioData(); } else { RequestVideoData(); } return; } if (aReason == MediaDecoderReader::END_OF_STREAM) { if (aType == MediaData::AUDIO_DATA) { mIsAudioQueueFinished = true; mDoneAudioSeeking = true; } else { mIsVideoQueueFinished = true; mDoneVideoSeeking = true; if (mFirstVideoFrameAfterSeek) { // Hit the end of stream. Move mFirstVideoFrameAfterSeek into // mSeekedVideoData so we have something to display after seeking. mSeekedVideoData = mFirstVideoFrameAfterSeek.forget(); } } MaybeFinishSeek(); } }
void OnVideoEOS() { // End of stream. See if we can switch to another video decoder. MSE_DEBUG("%p MSR::OnVideoEOS %d (%p) (readers=%u)", this, mActiveVideoDecoder, mDecoders[mActiveVideoDecoder].get(), mDecoders.Length()); if (SwitchVideoReaders(SWITCH_FORCED)) { // Success! Resume decoding with next video decoder. RequestVideoData(false, mTimeThreshold); } else { // End of stream. MSE_DEBUG("%p MSR::OnVideoEOS %d (%p) EOS (readers=%u)", this, mActiveVideoDecoder, mDecoders[mActiveVideoDecoder].get(), mDecoders.Length()); GetCallback()->OnVideoEOS(); } }
void AccurateSeekTask::OnSeekResolved(media::TimeUnit) { AssertOwnerThread(); mSeekRequest.Complete(); // We must decode the first samples of active streams, so we can determine // the new stream time. So dispatch tasks to do that. if (!mDoneVideoSeeking) { RequestVideoData(); } if (!mDoneAudioSeeking) { RequestAudioData(); } }
void MediaSourceReader::OnVideoEOS() { // End of stream. See if we can switch to another video decoder. MSE_DEBUG("MediaSourceReader(%p)::OnVideoEOS reader=%p (decoders=%u)", this, mVideoReader.get(), mVideoTrack->Decoders().Length()); if (SwitchVideoReader(mLastVideoTime)) { // Success! Resume decoding with next video decoder. RequestVideoData(false, 0); } else if (IsEnded()) { // End of stream. MSE_DEBUG("MediaSourceReader(%p)::OnVideoEOS reader=%p EOS (decoders=%u)", this, mVideoReader.get(), mVideoTrack->Decoders().Length()); GetCallback()->OnVideoEOS(); } }
nsresult SeekTask::EnsureVideoDecodeTaskQueued() { AssertOwnerThread(); SAMPLE_LOG("EnsureVideoDecodeTaskQueued isDecoding=%d status=%s", IsVideoDecoding(), VideoRequestStatus()); if (!IsVideoDecoding() || mReader->IsRequestingVideoData() || mReader->IsWaitingVideoData() || mSeekRequest.Exists()) { return NS_OK; } RequestVideoData(); return NS_OK; }
void AccurateSeekTask::SetCallbacks() { AssertOwnerThread(); mAudioCallback = mReader->AudioCallback().Connect( OwnerThread(), [this] (AudioCallbackData aData) { if (aData.is<MediaData*>()) { OnAudioDecoded(aData.as<MediaData*>()); } else { OnNotDecoded(MediaData::AUDIO_DATA, aData.as<MediaDecoderReader::NotDecodedReason>()); } }); mVideoCallback = mReader->VideoCallback().Connect( OwnerThread(), [this] (VideoCallbackData aData) { typedef Tuple<MediaData*, TimeStamp> Type; if (aData.is<Type>()) { OnVideoDecoded(Get<0>(aData.as<Type>())); } else { OnNotDecoded(MediaData::VIDEO_DATA, aData.as<MediaDecoderReader::NotDecodedReason>()); } }); mAudioWaitCallback = mReader->AudioWaitCallback().Connect( OwnerThread(), [this] (WaitCallbackData aData) { // Ignore pending requests from video-only seek. if (mTarget.IsVideoOnly()) { return; } if (aData.is<MediaData::Type>()) { RequestAudioData(); } }); mVideoWaitCallback = mReader->VideoWaitCallback().Connect( OwnerThread(), [this] (WaitCallbackData aData) { if (aData.is<MediaData::Type>()) { RequestVideoData(); } }); }
void AccurateSeekTask::OnVideoDecoded(MediaData* aVideoSample) { AssertOwnerThread(); MOZ_ASSERT(!mSeekTaskPromise.IsEmpty(), "Seek shouldn't be finished"); RefPtr<MediaData> video(aVideoSample); MOZ_ASSERT(video); // The MDSM::mDecodedVideoEndTime will be updated once the whole SeekTask is // resolved. SAMPLE_LOG("OnVideoDecoded [%lld,%lld] disc=%d", video->mTime, video->GetEndTime(), video->mDiscontinuity); if (mFirstVideoSample) { mFirstVideoSample = false; MOZ_ASSERT(video->mDiscontinuity); } AdjustFastSeekIfNeeded(video); if (mTarget.IsFast()) { // Non-precise seek. We can stop the seek at the first sample. mSeekedVideoData = video; mDoneVideoSeeking = true; } else if (NS_FAILED(DropVideoUpToSeekTarget(video.get()))) { CancelCallbacks(); RejectIfExist(__func__); return; } if (!mDoneVideoSeeking) { RequestVideoData(); return; } MaybeFinishSeek(); }