void AudioSink::AudioLoop() { AssertOnAudioThread(); SINK_LOG("AudioLoop started"); if (NS_FAILED(InitializeAudioStream())) { NS_WARNING("Initializing AudioStream failed."); mStateMachine->DispatchOnAudioSinkError(); return; } while (1) { { ReentrantMonitorAutoEnter mon(GetReentrantMonitor()); WaitForAudioToPlay(); if (!IsPlaybackContinuing()) { break; } } // See if there's a gap in the audio. If there is, push silence into the // audio hardware, so we can play across the gap. // Calculate the timestamp of the next chunk of audio in numbers of // samples. NS_ASSERTION(AudioQueue().GetSize() > 0, "Should have data to play"); CheckedInt64 sampleTime = UsecsToFrames(AudioQueue().PeekFront()->mTime, mInfo.mRate); // Calculate the number of frames that have been pushed onto the audio hardware. CheckedInt64 playedFrames = UsecsToFrames(mStartTime, mInfo.mRate) + mWritten; CheckedInt64 missingFrames = sampleTime - playedFrames; if (!missingFrames.isValid() || !sampleTime.isValid()) { NS_WARNING("Int overflow adding in AudioLoop"); break; } if (missingFrames.value() > AUDIO_FUZZ_FRAMES) { // The next audio chunk begins some time after the end of the last chunk // we pushed to the audio hardware. We must push silence into the audio // hardware so that the next audio chunk begins playback at the correct // time. missingFrames = std::min<int64_t>(UINT32_MAX, missingFrames.value()); mWritten += PlaySilence(static_cast<uint32_t>(missingFrames.value())); } else { mWritten += PlayFromAudioQueue(); } int64_t endTime = GetEndTime(); if (endTime != -1) { mOnAudioEndTimeUpdateTask->Dispatch(endTime); } } ReentrantMonitorAutoEnter mon(GetReentrantMonitor()); MOZ_ASSERT(mStopAudioThread || AudioQueue().AtEndOfStream()); if (!mStopAudioThread && mPlaying) { Drain(); } SINK_LOG("AudioLoop complete"); Cleanup(); SINK_LOG("AudioLoop exit"); }
bool DecodedAudioDataSink::PlayAudio() { // See if there's a gap in the audio. If there is, push silence into the // audio hardware, so we can play across the gap. // Calculate the timestamp of the next chunk of audio in numbers of // samples. NS_ASSERTION(AudioQueue().GetSize() > 0, "Should have data to play"); CheckedInt64 sampleTime = UsecsToFrames(AudioQueue().PeekFront()->mTime, mInfo.mRate); // Calculate the number of frames that have been pushed onto the audio hardware. CheckedInt64 playedFrames = UsecsToFrames(mStartTime, mInfo.mRate) + static_cast<int64_t>(mWritten); CheckedInt64 missingFrames = sampleTime - playedFrames; if (!missingFrames.isValid() || !sampleTime.isValid()) { NS_WARNING("Int overflow adding in AudioLoop"); return false; } if (missingFrames.value() > AUDIO_FUZZ_FRAMES) { // The next audio chunk begins some time after the end of the last chunk // we pushed to the audio hardware. We must push silence into the audio // hardware so that the next audio chunk begins playback at the correct // time. missingFrames = std::min<int64_t>(UINT32_MAX, missingFrames.value()); mWritten += PlaySilence(static_cast<uint32_t>(missingFrames.value())); } else { mWritten += PlayFromAudioQueue(); } return true; }
void CMMFDataPath2::FillSourceBufferL() { __ASSERT_DEBUG((iState == EPlaying || iState == EConverting || iState == ERecording || (iState == EPrimed && iPauseCalled && iIsUsingResumeSupport) ), Panic(EMMFDataPathPanicBadState,__LINE__)); //if the silence timer is active then dont propagate the request if(iRepeatTrailingSilenceTimer->IsActive()) { return; } //play the silence period and dont propagate the request if(iTrailingSilenceLeftToPlay>0 || iVerifyPlayComplete) { if(iVerifyPlayComplete)//case when the trailing silence is zero { if (!*iDisableAutoIntent && iDrmSource) { CMMFFile* file = static_cast<CMMFFile*>(iDrmSource); TInt err = file->ExecuteIntent(ContentAccess::EPlay); if (err != KErrNone) { DoSendEventToClient(KMMFEventCategoryPlaybackComplete, err); return; } } //Retrieve the current play time and add "duration-currentplaytime" to the silence period //This is to ensure that silence timer is not started before the previous play is actually completed by the devsound TTimeIntervalMicroSeconds currentTime = CalculateAudioOutputPosition(); if(currentTime.Int64()>iPlayWindowStartPosition.Int64()) { iTimeLeftToPlayComplete = iPlayWindowEndPosition.Int64()-currentTime.Int64(); } else { iTimeLeftToPlayComplete = 0; } iVerifyPlayComplete = EFalse; } if(iTrailingSilenceLeftToPlay==0 && iTimeLeftToPlayComplete==0) { SetPositionL(iPlayWindowStartPosition); iTimeLeftToPlayComplete=-1; } else { PlaySilence(); return; } } DoFillSourceBufferL(); }
TInt CMMFDataPath2::DoRepeatTrailingSilenceTimerCompleteL() { //cancel this periodic timer iRepeatTrailingSilenceTimer->Cancel(); if(iTimeLeftToPlayComplete.Int64()>0) { iTimeLeftToPlayComplete=0; } if (iTrailingSilenceLeftToPlay.Int64() > 0) { PlaySilence(); } else { SetPositionL(iPlayWindowStartPosition); iTimeLeftToPlayComplete=-1; FillSourceBufferL(); } return KErrNone; }
uint32_t DecodedAudioDataSink::PlayFromAudioQueue() { AssertOnAudioThread(); NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused"); nsRefPtr<AudioData> audio = dont_AddRef(AudioQueue().PopFront().take()->As<AudioData>()); SINK_LOG_V("playing %u frames of audio at time %lld", audio->mFrames, audio->mTime); if (audio->mRate == mInfo.mRate && audio->mChannels == mInfo.mChannels) { mAudioStream->Write(audio->mAudioData, audio->mFrames); } else { SINK_LOG_V("mismatched sample format mInfo=[%uHz/%u channels] audio=[%uHz/%u channels]", mInfo.mRate, mInfo.mChannels, audio->mRate, audio->mChannels); PlaySilence(audio->mFrames); } StartAudioStreamPlaybackIfNeeded(); return audio->mFrames; }
uint32_t AudioSink::PlayFromAudioQueue() { AssertOnAudioThread(); NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused"); nsRefPtr<AudioData> audio(AudioQueue().PopFront()); SINK_LOG_V("playing %u frames of audio at time %lld", audio->mFrames, audio->mTime); if (audio->mRate == mInfo.mRate && audio->mChannels == mInfo.mChannels) { mAudioStream->Write(audio->mAudioData, audio->mFrames); } else { SINK_LOG_V("mismatched sample format mInfo=[%uHz/%u channels] audio=[%uHz/%u channels]", mInfo.mRate, mInfo.mChannels, audio->mRate, audio->mChannels); PlaySilence(audio->mFrames); } StartAudioStreamPlaybackIfNeeded(); if (audio->mOffset != -1) { mStateMachine->DispatchOnPlaybackOffsetUpdate(audio->mOffset); } return audio->mFrames; }