RefPtr<MediaDecoderReader::SeekPromise> MediaOmxReader::Seek(SeekTarget aTarget, int64_t aEndTime) { MOZ_ASSERT(OnTaskQueue()); EnsureActive(); RefPtr<SeekPromise> p = mSeekPromise.Ensure(__func__); if (mHasAudio && mHasVideo) { // The OMXDecoder seeks/demuxes audio and video streams separately. So if // we seek both audio and video to aTarget, the audio stream can typically // seek closer to the seek target, since typically every audio block is // a sync point, whereas for video there are only keyframes once every few // seconds. So if we have both audio and video, we must seek the video // stream to the preceeding keyframe first, get the stream time, and then // seek the audio stream to match the video stream's time. Otherwise, the // audio and video streams won't be in sync after the seek. mVideoSeekTimeUs = aTarget.GetTime().ToMicroseconds(); RefPtr<MediaOmxReader> self = this; mSeekRequest.Begin(DecodeToFirstVideoData()->Then(OwnerThread(), __func__, [self] (MediaData* v) { self->mSeekRequest.Complete(); self->mAudioSeekTimeUs = v->mTime; self->mSeekPromise.Resolve(media::TimeUnit::FromMicroseconds(self->mAudioSeekTimeUs), __func__); }, [self, aTarget] () { self->mSeekRequest.Complete(); self->mAudioSeekTimeUs = aTarget.GetTime().ToMicroseconds(); self->mSeekPromise.Resolve(aTarget.GetTime(), __func__); })); } else { mAudioSeekTimeUs = mVideoSeekTimeUs = aTarget.GetTime().ToMicroseconds(); mSeekPromise.Resolve(aTarget.GetTime(), __func__); } return p; }
RefPtr<MediaDecoderReader::SeekPromise> MediaDecoderReaderWrapper::Seek(SeekTarget aTarget, media::TimeUnit aEndTime) { MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn()); aTarget.SetTime(aTarget.GetTime() + StartTime()); return InvokeAsync(mReader->OwnerThread(), mReader.get(), __func__, &MediaDecoderReader::Seek, aTarget, aEndTime.ToMicroseconds()); }
RefPtr<ReaderProxy::SeekPromise> ReaderProxy::SeekInternal(const SeekTarget& aTarget) { MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn()); SeekTarget adjustedTarget = aTarget; adjustedTarget.SetTime(adjustedTarget.GetTime() + StartTime()); return InvokeAsync(mReader->OwnerThread(), mReader.get(), __func__, &MediaFormatReader::Seek, std::move(adjustedTarget)); }
RefPtr<MediaDecoderReader::SeekPromise> RtspOmxReader::Seek(SeekTarget aTarget, int64_t aEndTime) { // The seek function of Rtsp is time-based, we call the SeekTime function in // RtspMediaResource. The SeekTime function finally send a seek command to // Rtsp stream server through network and also clear the buffer data in // RtspMediaResource. if (mRtspResource) { mRtspResource->SeekTime(aTarget.GetTime().ToMicroseconds()); mRtspResource->EnablePlayoutDelay(); } // Call |MediaOmxReader::Seek| to notify the OMX decoder we are performing a // seek operation. The function will clear the |mVideoQueue| and |mAudioQueue| // that store the decoded data and also call the |DecodeToTarget| to pass // the seek time to OMX a/v decoders. mEnsureActiveFromSeek = true; return MediaOmxReader::Seek(aTarget, aEndTime); }
AccurateSeekTask::AccurateSeekTask(const void* aDecoderID, AbstractThread* aThread, MediaDecoderReaderWrapper* aReader, const SeekTarget& aTarget, const MediaInfo& aInfo, const media::TimeUnit& aEnd, int64_t aCurrentMediaTime) : SeekTask(aDecoderID, aThread, aReader, aTarget) , mCurrentTimeBeforeSeek(media::TimeUnit::FromMicroseconds(aCurrentMediaTime)) , mAudioRate(aInfo.mAudio.mRate) , mDoneAudioSeeking(!aInfo.HasAudio() || aTarget.IsVideoOnly()) , mDoneVideoSeeking(!aInfo.HasVideo()) { AssertOwnerThread(); // Bound the seek time to be inside the media range. NS_ASSERTION(aEnd.ToMicroseconds() != -1, "Should know end time by now"); mTarget.SetTime(std::max(media::TimeUnit(), std::min(mTarget.GetTime(), aEnd))); // Configure MediaDecoderReaderWrapper. SetCallbacks(); }