void DecodedStream::Start(int64_t aStartTime, const MediaInfo& aInfo) { AssertOwnerThread(); MOZ_ASSERT(mStartTime.isNothing(), "playback already started."); mStartTime.emplace(aStartTime); mInfo = aInfo; mPlaying = true; ConnectListener(); class R : public Runnable { typedef MozPromiseHolder<GenericPromise> Promise; public: R(PlaybackInfoInit&& aInit, Promise&& aPromise, OutputStreamManager* aManager) : mInit(Move(aInit)), mOutputStreamManager(aManager) { mPromise = Move(aPromise); } NS_IMETHOD Run() override { MOZ_ASSERT(NS_IsMainThread()); // No need to create a source stream when there are no output streams. This // happens when RemoveOutput() is called immediately after StartPlayback(). if (!mOutputStreamManager->Graph()) { // Resolve the promise to indicate the end of playback. mPromise.Resolve(true, __func__); return NS_OK; } mData = MakeUnique<DecodedStreamData>( mOutputStreamManager, Move(mInit), Move(mPromise)); return NS_OK; } UniquePtr<DecodedStreamData> ReleaseData() { return Move(mData); } private: PlaybackInfoInit mInit; Promise mPromise; RefPtr<OutputStreamManager> mOutputStreamManager; UniquePtr<DecodedStreamData> mData; }; MozPromiseHolder<GenericPromise> promise; mFinishPromise = promise.Ensure(__func__); PlaybackInfoInit init { aStartTime, aInfo }; nsCOMPtr<nsIRunnable> r = new R(Move(init), Move(promise), mOutputStreamManager); nsCOMPtr<nsIThread> mainThread = do_GetMainThread(); SyncRunnable::DispatchToThread(mainThread, r); mData = static_cast<R*>(r.get())->ReleaseData(); if (mData) { mData->SetPlaying(mPlaying); SendData(); } }
void VideoSink::Start(int64_t aStartTime, const MediaInfo& aInfo) { AssertOwnerThread(); VSINK_LOG("[%s]", __func__); mAudioSink->Start(aStartTime, aInfo); mHasVideo = aInfo.HasVideo(); if (mHasVideo) { mEndPromise = mEndPromiseHolder.Ensure(__func__); ConnectListener(); TryUpdateRenderedVideoFrames(); } }
void VideoSink::Start(int64_t aStartTime, const MediaInfo& aInfo) { AssertOwnerThread(); VSINK_LOG("[%s]", __func__); mAudioSink->Start(aStartTime, aInfo); mHasVideo = aInfo.HasVideo(); if (mHasVideo) { mEndPromise = mEndPromiseHolder.Ensure(__func__); // If the underlying MediaSink has an end promise for the video track (which // happens when mAudioSink refers to a DecodedStream), we must wait for it // to complete before resolving our own end promise. Otherwise, MDSM might // stop playback before DecodedStream plays to the end and cause // test_streams_element_capture.html to time out. RefPtr<GenericPromise> p = mAudioSink->OnEnded(TrackInfo::kVideoTrack); if (p) { RefPtr<VideoSink> self = this; mVideoSinkEndRequest.Begin(p->Then(mOwnerThread, __func__, [self] () { self->mVideoSinkEndRequest.Complete(); self->TryUpdateRenderedVideoFrames(); // It is possible the video queue size is 0 and we have no frames to // render. However, we need to call MaybeResolveEndPromise() to ensure // mEndPromiseHolder is resolved. self->MaybeResolveEndPromise(); }, [self] () { self->mVideoSinkEndRequest.Complete(); self->TryUpdateRenderedVideoFrames(); self->MaybeResolveEndPromise(); })); } ConnectListener(); // Run the render loop at least once so we can resolve the end promise // when video duration is 0. UpdateRenderedVideoFrames(); } }
void DecodedStream::Start(int64_t aStartTime, const MediaInfo& aInfo) { AssertOwnerThread(); MOZ_ASSERT(mStartTime.isNothing(), "playback already started."); mStartTime.emplace(aStartTime); mInfo = aInfo; mPlaying = true; ConnectListener(); class R : public nsRunnable { typedef MozPromiseHolder<GenericPromise> Promise; typedef void(DecodedStream::*Method)(Promise&&); public: R(DecodedStream* aThis, Method aMethod, Promise&& aPromise) : mThis(aThis), mMethod(aMethod) { mPromise = Move(aPromise); } NS_IMETHOD Run() override { (mThis->*mMethod)(Move(mPromise)); return NS_OK; } private: nsRefPtr<DecodedStream> mThis; Method mMethod; Promise mPromise; }; MozPromiseHolder<GenericPromise> promise; mFinishPromise = promise.Ensure(__func__); nsCOMPtr<nsIRunnable> r = new R(this, &DecodedStream::CreateData, Move(promise)); AbstractThread::MainThread()->Dispatch(r.forget()); }
void DecodedAudioDataSink::AudioLoop() { AssertOnAudioThread(); mAudioLoopScheduled = false; switch (mState) { case AUDIOSINK_STATE_INIT: { SINK_LOG("AudioLoop started"); nsresult rv = InitializeAudioStream(); if (NS_FAILED(rv)) { NS_WARNING("Initializing AudioStream failed."); mEndPromise.Reject(rv, __func__); SetState(AUDIOSINK_STATE_ERROR); break; } SetState(AUDIOSINK_STATE_PLAYING); ConnectListener(); break; } case AUDIOSINK_STATE_PLAYING: { if (WaitingForAudioToPlay()) { // OnAudioQueueEvent() will schedule next loop. break; } if (!IsPlaybackContinuing()) { SetState(AUDIOSINK_STATE_COMPLETE); break; } if (!PlayAudio()) { SetState(AUDIOSINK_STATE_COMPLETE); break; } // Schedule next loop to play next sample. ScheduleNextLoop(); break; } case AUDIOSINK_STATE_COMPLETE: { DisconnectListener(); FinishAudioLoop(); SetState(AUDIOSINK_STATE_SHUTDOWN); break; } case AUDIOSINK_STATE_SHUTDOWN: break; case AUDIOSINK_STATE_ERROR: break; } // end of switch // We want mState to stay stable during AudioLoop to keep things simple. // Therefore, we only do state transition at the end of AudioLoop. if (mPendingState.isSome()) { MOZ_ASSERT(mState != mPendingState.ref()); SINK_LOG("change mState, %d -> %d", mState, mPendingState.ref()); mState = mPendingState.ref(); mPendingState.reset(); // Schedule next loop when state changes. ScheduleNextLoop(); } }