void AudioSink::AudioLoop() { AssertOnAudioThread(); SINK_LOG("AudioLoop started"); if (NS_FAILED(InitializeAudioStream())) { NS_WARNING("Initializing AudioStream failed."); mStateMachine->DispatchOnAudioSinkError(); return; } while (1) { { ReentrantMonitorAutoEnter mon(GetReentrantMonitor()); WaitForAudioToPlay(); if (!IsPlaybackContinuing()) { break; } } // See if there's a gap in the audio. If there is, push silence into the // audio hardware, so we can play across the gap. // Calculate the timestamp of the next chunk of audio in numbers of // samples. NS_ASSERTION(AudioQueue().GetSize() > 0, "Should have data to play"); CheckedInt64 sampleTime = UsecsToFrames(AudioQueue().PeekFront()->mTime, mInfo.mRate); // Calculate the number of frames that have been pushed onto the audio hardware. CheckedInt64 playedFrames = UsecsToFrames(mStartTime, mInfo.mRate) + mWritten; CheckedInt64 missingFrames = sampleTime - playedFrames; if (!missingFrames.isValid() || !sampleTime.isValid()) { NS_WARNING("Int overflow adding in AudioLoop"); break; } if (missingFrames.value() > AUDIO_FUZZ_FRAMES) { // The next audio chunk begins some time after the end of the last chunk // we pushed to the audio hardware. We must push silence into the audio // hardware so that the next audio chunk begins playback at the correct // time. missingFrames = std::min<int64_t>(UINT32_MAX, missingFrames.value()); mWritten += PlaySilence(static_cast<uint32_t>(missingFrames.value())); } else { mWritten += PlayFromAudioQueue(); } int64_t endTime = GetEndTime(); if (endTime != -1) { mOnAudioEndTimeUpdateTask->Dispatch(endTime); } } ReentrantMonitorAutoEnter mon(GetReentrantMonitor()); MOZ_ASSERT(mStopAudioThread || AudioQueue().AtEndOfStream()); if (!mStopAudioThread && mPlaying) { Drain(); } SINK_LOG("AudioLoop complete"); Cleanup(); SINK_LOG("AudioLoop exit"); }
void DecodedAudioDataSink::DisconnectListener() { AssertOnAudioThread(); mPushListener.Disconnect(); mFinishListener.Disconnect(); }
void DecodedAudioDataSink::OnAudioQueueEvent() { AssertOnAudioThread(); if (!mAudioLoopScheduled) { AudioLoop(); } }
void DecodedAudioDataSink::ConnectListener() { AssertOnAudioThread(); mPushListener = AudioQueue().PushEvent().Connect( mThread, this, &DecodedAudioDataSink::OnAudioQueueEvent); mFinishListener = AudioQueue().FinishEvent().Connect( mThread, this, &DecodedAudioDataSink::OnAudioQueueEvent); }
void DecodedAudioDataSink::Cleanup() { AssertOnAudioThread(); mEndPromise.Resolve(true, __func__); // Since the promise if resolved asynchronously, we don't shutdown // AudioStream here so MDSM::ResyncAudioClock can get the correct // audio position. }
void DecodedAudioDataSink::Drain() { AssertOnAudioThread(); MOZ_ASSERT(mPlaying && !mAudioStream->IsPaused()); // If the media was too short to trigger the start of the audio stream, // start it now. mAudioStream->Start(); mAudioStream->Drain(); }
void DecodedAudioDataSink::ScheduleNextLoop() { AssertOnAudioThread(); if (mAudioLoopScheduled) { return; } mAudioLoopScheduled = true; nsCOMPtr<nsIRunnable> r = NS_NewRunnableMethod(this, &DecodedAudioDataSink::AudioLoop); DispatchTask(r.forget()); }
bool DecodedAudioDataSink::WaitingForAudioToPlay() { AssertOnAudioThread(); // Return true if we're not playing, and we're not shutting down, or we're // playing and we've got no audio to play. if (!mStopAudioThread && (!mPlaying || ExpectMoreAudioData())) { return true; } return false; }
void DecodedAudioDataSink::FinishAudioLoop() { AssertOnAudioThread(); MOZ_ASSERT(mStopAudioThread || AudioQueue().AtEndOfStream()); if (!mStopAudioThread && mPlaying) { Drain(); } SINK_LOG("AudioLoop complete"); Cleanup(); SINK_LOG("AudioLoop exit"); }
bool DecodedAudioDataSink::IsPlaybackContinuing() { AssertOnAudioThread(); // If we're shutting down, captured, or at EOS, break out and exit the audio // thread. if (mStopAudioThread || AudioQueue().AtEndOfStream()) { return false; } return true; }
uint32_t AudioSink::PlaySilence(uint32_t aFrames) { // Maximum number of bytes we'll allocate and write at once to the audio // hardware when the audio stream contains missing frames and we're // writing silence in order to fill the gap. We limit our silence-writes // to 32KB in order to avoid allocating an impossibly large chunk of // memory if we encounter a large chunk of silence. const uint32_t SILENCE_BYTES_CHUNK = 32 * 1024; AssertOnAudioThread(); NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused"); uint32_t maxFrames = SILENCE_BYTES_CHUNK / mInfo.mChannels / sizeof(AudioDataValue); uint32_t frames = std::min(aFrames, maxFrames); SINK_LOG_V("playing %u frames of silence", aFrames); WriteSilence(frames); return frames; }
uint32_t AudioSink::PlayFromAudioQueue() { AssertOnAudioThread(); NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused"); nsRefPtr<AudioData> audio(AudioQueue().PopFront()); SINK_LOG_V("playing %u frames of audio at time %lld", audio->mFrames, audio->mTime); mAudioStream->Write(audio->mAudioData, audio->mFrames); StartAudioStreamPlaybackIfNeeded(); if (audio->mOffset != -1) { mStateMachine->DispatchOnPlaybackOffsetUpdate(audio->mOffset); } return audio->mFrames; }
uint32_t DecodedAudioDataSink::PlayFromAudioQueue() { AssertOnAudioThread(); NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused"); nsRefPtr<AudioData> audio = dont_AddRef(AudioQueue().PopFront().take()->As<AudioData>()); SINK_LOG_V("playing %u frames of audio at time %lld", audio->mFrames, audio->mTime); if (audio->mRate == mInfo.mRate && audio->mChannels == mInfo.mChannels) { mAudioStream->Write(audio->mAudioData, audio->mFrames); } else { SINK_LOG_V("mismatched sample format mInfo=[%uHz/%u channels] audio=[%uHz/%u channels]", mInfo.mRate, mInfo.mChannels, audio->mRate, audio->mChannels); PlaySilence(audio->mFrames); } StartAudioStreamPlaybackIfNeeded(); return audio->mFrames; }
uint32_t AudioSink::PlayFromAudioQueue() { AssertOnAudioThread(); NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused"); nsAutoPtr<AudioData> audio(AudioQueue().PopFront()); { ReentrantMonitorAutoEnter mon(GetReentrantMonitor()); NS_WARN_IF_FALSE(mPlaying, "Should be playing"); // Awaken the decode loop if it's waiting for space to free up in the // audio queue. GetReentrantMonitor().NotifyAll(); } SINK_LOG_V("playing %u frames of audio at time %lld", this, audio->mFrames, audio->mTime); mAudioStream->Write(audio->mAudioData, audio->mFrames); StartAudioStreamPlaybackIfNeeded(); if (audio->mOffset != -1) { mStateMachine->OnPlaybackOffsetUpdate(audio->mOffset); } return audio->mFrames; }
uint32_t AudioSink::PlayFromAudioQueue() { AssertOnAudioThread(); NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused"); nsRefPtr<AudioData> audio(AudioQueue().PopFront()); SINK_LOG_V("playing %u frames of audio at time %lld", audio->mFrames, audio->mTime); if (audio->mRate == mInfo.mRate && audio->mChannels == mInfo.mChannels) { mAudioStream->Write(audio->mAudioData, audio->mFrames); } else { SINK_LOG_V("mismatched sample format mInfo=[%uHz/%u channels] audio=[%uHz/%u channels]", mInfo.mRate, mInfo.mChannels, audio->mRate, audio->mChannels); PlaySilence(audio->mFrames); } StartAudioStreamPlaybackIfNeeded(); if (audio->mOffset != -1) { mStateMachine->DispatchOnPlaybackOffsetUpdate(audio->mOffset); } return audio->mFrames; }
void DecodedAudioDataSink::SetState(State aState) { AssertOnAudioThread(); mPendingState = Some(aState); }
void DecodedAudioDataSink::AudioLoop() { AssertOnAudioThread(); mAudioLoopScheduled = false; switch (mState) { case AUDIOSINK_STATE_INIT: { SINK_LOG("AudioLoop started"); nsresult rv = InitializeAudioStream(); if (NS_FAILED(rv)) { NS_WARNING("Initializing AudioStream failed."); mEndPromise.Reject(rv, __func__); SetState(AUDIOSINK_STATE_ERROR); break; } SetState(AUDIOSINK_STATE_PLAYING); ConnectListener(); break; } case AUDIOSINK_STATE_PLAYING: { if (WaitingForAudioToPlay()) { // OnAudioQueueEvent() will schedule next loop. break; } if (!IsPlaybackContinuing()) { SetState(AUDIOSINK_STATE_COMPLETE); break; } if (!PlayAudio()) { SetState(AUDIOSINK_STATE_COMPLETE); break; } // Schedule next loop to play next sample. ScheduleNextLoop(); break; } case AUDIOSINK_STATE_COMPLETE: { DisconnectListener(); FinishAudioLoop(); SetState(AUDIOSINK_STATE_SHUTDOWN); break; } case AUDIOSINK_STATE_SHUTDOWN: break; case AUDIOSINK_STATE_ERROR: break; } // end of switch // We want mState to stay stable during AudioLoop to keep things simple. // Therefore, we only do state transition at the end of AudioLoop. if (mPendingState.isSome()) { MOZ_ASSERT(mState != mPendingState.ref()); SINK_LOG("change mState, %d -> %d", mState, mPendingState.ref()); mState = mPendingState.ref(); mPendingState.reset(); // Schedule next loop when state changes. ScheduleNextLoop(); } }