bool MediaSourceDemuxer::ScanSourceBuffersForContent() { MOZ_ASSERT(OnTaskQueue()); if (mSourceBuffers.IsEmpty()) { return false; } MonitorAutoLock mon(mMonitor); bool haveEmptySourceBuffer = false; for (const auto& sourceBuffer : mSourceBuffers) { MediaInfo info = sourceBuffer->GetMetadata(); if (!info.HasAudio() && !info.HasVideo()) { haveEmptySourceBuffer = true; } if (info.HasAudio() && !mAudioTrack) { mInfo.mAudio = info.mAudio; mAudioTrack = sourceBuffer; } if (info.HasVideo() && !mVideoTrack) { mInfo.mVideo = info.mVideo; mVideoTrack = sourceBuffer; } if (info.IsEncrypted() && !mInfo.IsEncrypted()) { mInfo.mCrypto = info.mCrypto; } } if (mInfo.HasAudio() && mInfo.HasVideo()) { // We have both audio and video. We can ignore non-ready source buffer. return true; } return !haveEmptySourceBuffer; }
void MediaSourceReader::OnTrackBufferConfigured(TrackBuffer* aTrackBuffer, const MediaInfo& aInfo) { ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); MOZ_ASSERT(aTrackBuffer->IsReady()); MOZ_ASSERT(mTrackBuffers.Contains(aTrackBuffer)); if (aInfo.HasAudio() && !mAudioTrack) { MSE_DEBUG("MediaSourceReader(%p)::OnTrackBufferConfigured %p audio", this, aTrackBuffer); mAudioTrack = aTrackBuffer; } if (aInfo.HasVideo() && !mVideoTrack) { MSE_DEBUG("MediaSourceReader(%p)::OnTrackBufferConfigured %p video", this, aTrackBuffer); mVideoTrack = aTrackBuffer; } mDecoder->NotifyWaitingForResourcesStatusChanged(); }
void VideoSink::Start(int64_t aStartTime, const MediaInfo& aInfo) { AssertOwnerThread(); VSINK_LOG("[%s]", __func__); mAudioSink->Start(aStartTime, aInfo); mHasVideo = aInfo.HasVideo(); if (mHasVideo) { mEndPromise = mEndPromiseHolder.Ensure(__func__); ConnectListener(); TryUpdateRenderedVideoFrames(); } }
nsresult VideoSink::Start(const TimeUnit& aStartTime, const MediaInfo& aInfo) { AssertOwnerThread(); VSINK_LOG("[%s]", __func__); nsresult rv = mAudioSink->Start(aStartTime, aInfo); mHasVideo = aInfo.HasVideo(); if (mHasVideo) { mEndPromise = mEndPromiseHolder.Ensure(__func__); // If the underlying MediaSink has an end promise for the video track (which // happens when mAudioSink refers to a DecodedStream), we must wait for it // to complete before resolving our own end promise. Otherwise, MDSM might // stop playback before DecodedStream plays to the end and cause // test_streams_element_capture.html to time out. RefPtr<EndedPromise> p = mAudioSink->OnEnded(TrackInfo::kVideoTrack); if (p) { RefPtr<VideoSink> self = this; p->Then(mOwnerThread, __func__, [self]() { self->mVideoSinkEndRequest.Complete(); self->TryUpdateRenderedVideoFrames(); // It is possible the video queue size is 0 and we have no // frames to render. However, we need to call // MaybeResolveEndPromise() to ensure mEndPromiseHolder is // resolved. self->MaybeResolveEndPromise(); }, [self]() { self->mVideoSinkEndRequest.Complete(); self->TryUpdateRenderedVideoFrames(); self->MaybeResolveEndPromise(); }) ->Track(mVideoSinkEndRequest); } ConnectListener(); // Run the render loop at least once so we can resolve the end promise // when video duration is 0. UpdateRenderedVideoFrames(); } return rv; }
SeekTask::SeekTask(const void* aDecoderID, AbstractThread* aThread, MediaDecoderReaderWrapper* aReader, SeekJob&& aSeekJob, const MediaInfo& aInfo, const media::TimeUnit& aDuration, int64_t aCurrentMediaTime) : mDecoderID(aDecoderID) , mOwnerThread(aThread) , mReader(aReader) , mSeekJob(Move(aSeekJob)) , mCurrentTimeBeforeSeek(aCurrentMediaTime) , mAudioRate(aInfo.mAudio.mRate) , mHasAudio(aInfo.HasAudio()) , mHasVideo(aInfo.HasVideo()) , mDropAudioUntilNextDiscontinuity(false) , mDropVideoUntilNextDiscontinuity(false) , mIsDiscarded(false) , mIsAudioQueueFinished(false) , mIsVideoQueueFinished(false) , mNeedToStopPrerollingAudio(false) , mNeedToStopPrerollingVideo(false) { // Bound the seek time to be inside the media range. int64_t end = aDuration.ToMicroseconds(); NS_ASSERTION(end != -1, "Should know end time by now"); int64_t seekTime = mSeekJob.mTarget.GetTime().ToMicroseconds(); seekTime = std::min(seekTime, end); seekTime = std::max(int64_t(0), seekTime); NS_ASSERTION(seekTime >= 0 && seekTime <= end, "Can only seek in range [0,duration]"); mSeekJob.mTarget.SetTime(media::TimeUnit::FromMicroseconds(seekTime)); mDropAudioUntilNextDiscontinuity = HasAudio(); mDropVideoUntilNextDiscontinuity = HasVideo(); // Configure MediaDecoderReaderWrapper. SetMediaDecoderReaderWrapperCallback(); }
AccurateSeekTask::AccurateSeekTask(const void* aDecoderID, AbstractThread* aThread, MediaDecoderReaderWrapper* aReader, const SeekTarget& aTarget, const MediaInfo& aInfo, const media::TimeUnit& aEnd, int64_t aCurrentMediaTime) : SeekTask(aDecoderID, aThread, aReader, aTarget) , mCurrentTimeBeforeSeek(media::TimeUnit::FromMicroseconds(aCurrentMediaTime)) , mAudioRate(aInfo.mAudio.mRate) , mDoneAudioSeeking(!aInfo.HasAudio() || aTarget.IsVideoOnly()) , mDoneVideoSeeking(!aInfo.HasVideo()) { AssertOwnerThread(); // Bound the seek time to be inside the media range. NS_ASSERTION(aEnd.ToMicroseconds() != -1, "Should know end time by now"); mTarget.SetTime(std::max(media::TimeUnit(), std::min(mTarget.GetTime(), aEnd))); // Configure MediaDecoderReaderWrapper. SetCallbacks(); }
void VideoSink::Start(int64_t aStartTime, const MediaInfo& aInfo) { AssertOwnerThread(); VSINK_LOG("[%s]", __func__); mAudioSink->Start(aStartTime, aInfo); mHasVideo = aInfo.HasVideo(); if (mHasVideo) { mEndPromise = mEndPromiseHolder.Ensure(__func__); // If the underlying MediaSink has an end promise for the video track (which // happens when mAudioSink refers to a DecodedStream), we must wait for it // to complete before resolving our own end promise. Otherwise, MDSM might // stop playback before DecodedStream plays to the end and cause // test_streams_element_capture.html to time out. RefPtr<GenericPromise> p = mAudioSink->OnEnded(TrackInfo::kVideoTrack); if (p) { RefPtr<VideoSink> self = this; mVideoSinkEndRequest.Begin(p->Then(mOwnerThread, __func__, [self] () { self->mVideoSinkEndRequest.Complete(); self->TryUpdateRenderedVideoFrames(); }, [self] () { self->mVideoSinkEndRequest.Complete(); self->TryUpdateRenderedVideoFrames(); })); } ConnectListener(); // Run the render loop at least once so we can resolve the end promise // when video duration is 0. UpdateRenderedVideoFrames(); } }