NS_IMETHODIMP MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer) { TimeStamp now = TimeStamp::Now(); TimeDuration timeSinceLastNotify = now - mLastNotify; mLastNotify = now; TrackTicks samplesSinceLastNotify = RateConvertTicksRoundUp(AUDIO_RATE, 1000000, timeSinceLastNotify.ToMicroseconds()); // If it's been longer since the last Notify() than mBufferSize holds, we // have underrun and the MSG had to append silence while waiting for us // to push more data. In this case we reset to mBufferSize again. TrackTicks samplesToAppend = std::min(samplesSinceLastNotify, mBufferSize); AudioSegment segment; AppendToSegment(segment, samplesToAppend); mSource->AppendToTrack(mTrackID, &segment); // Generate null data for fake tracks. if (mHasFakeTracks) { for (int i = 0; i < kFakeAudioTrackCount; ++i) { AudioSegment nullSegment; nullSegment.AppendNullData(samplesToAppend); mSource->AppendToTrack(kTrackCount + kFakeVideoTrackCount+i, &nullSegment); } } return NS_OK; }
// The MediaStreamGraph guarantees that this is actually one block, for // AudioNodeStreams. void AudioNodeStream::ProduceOutput(GraphTime aFrom, GraphTime aTo) { if (mMarkAsFinishedAfterThisBlock) { // This stream was finished the last time that we looked at it, and all // of the depending streams have finished their output as well, so now // it's time to mark this stream as finished. FinishOutput(); } StreamBuffer::Track* track = EnsureTrack(); AudioSegment* segment = track->Get<AudioSegment>(); mLastChunks.SetLength(1); mLastChunks[0].SetNull(0); if (mInCycle) { // XXX DelayNode not supported yet so just produce silence mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE); } else { // We need to generate at least one input uint16_t maxInputs = std::max(uint16_t(1), mEngine->InputCount()); OutputChunks inputChunks; inputChunks.SetLength(maxInputs); for (uint16_t i = 0; i < maxInputs; ++i) { ObtainInputBlock(inputChunks[i], i); } bool finished = false; if (maxInputs <= 1 && mEngine->OutputCount() <= 1) { mEngine->ProduceAudioBlock(this, inputChunks[0], &mLastChunks[0], &finished); } else { mEngine->ProduceAudioBlocksOnPorts(this, inputChunks, mLastChunks, &finished); } if (finished) { mMarkAsFinishedAfterThisBlock = true; } } if (mKind == MediaStreamGraph::EXTERNAL_STREAM) { segment->AppendAndConsumeChunk(&mLastChunks[0]); } else { segment->AppendNullData(mLastChunks[0].GetDuration()); } for (uint32_t j = 0; j < mListeners.Length(); ++j) { MediaStreamListener* l = mListeners[j]; AudioChunk copyChunk = mLastChunks[0]; AudioSegment tmpSegment; tmpSegment.AppendAndConsumeChunk(©Chunk); l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID, IdealAudioRate(), segment->GetDuration(), 0, tmpSegment); } }
nsresult MediaEngineDefaultAudioSource::Start(SourceMediaStream* aStream, TrackID aID, const PrincipalHandle& aPrincipalHandle) { if (mState != kAllocated) { return NS_ERROR_FAILURE; } mTimer = do_CreateInstance(NS_TIMER_CONTRACTID); if (!mTimer) { return NS_ERROR_FAILURE; } mSource = aStream; // We try to keep the appended data at this size. // Make it two timer intervals to try to avoid underruns. mBufferSize = 2 * (AUDIO_RATE * DEFAULT_AUDIO_TIMER_MS) / 1000; // AddTrack will take ownership of segment AudioSegment* segment = new AudioSegment(); AppendToSegment(*segment, mBufferSize); mSource->AddAudioTrack(aID, AUDIO_RATE, 0, segment, SourceMediaStream::ADDTRACK_QUEUED); if (mHasFakeTracks) { for (int i = 0; i < kFakeAudioTrackCount; ++i) { segment = new AudioSegment(); segment->AppendNullData(mBufferSize); mSource->AddAudioTrack(kTrackCount + kFakeVideoTrackCount+i, AUDIO_RATE, 0, segment, SourceMediaStream::ADDTRACK_QUEUED); } } // Remember TrackID so we can finish later mTrackID = aID; // Remember PrincipalHandle since we don't append in NotifyPull. mPrincipalHandle = aPrincipalHandle; mLastNotify = TimeStamp::Now(); // 1 Audio frame per 10ms #if defined(MOZ_WIDGET_GONK) && defined(DEBUG) // B2G emulator debug is very, very slow and has problems dealing with realtime audio inputs mTimer->InitWithCallback(this, DEFAULT_AUDIO_TIMER_MS*10, nsITimer::TYPE_REPEATING_PRECISE_CAN_SKIP); #else mTimer->InitWithCallback(this, DEFAULT_AUDIO_TIMER_MS, nsITimer::TYPE_REPEATING_PRECISE_CAN_SKIP); #endif mState = kStarted; return NS_OK; }
void AudioCaptureStream::ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) { uint32_t inputCount = mInputs.Length(); StreamBuffer::Track* track = EnsureTrack(mTrackId); // Notify the DOM everything is in order. if (!mTrackCreated) { for (uint32_t i = 0; i < mListeners.Length(); i++) { MediaStreamListener* l = mListeners[i]; AudioSegment tmp; l->NotifyQueuedTrackChanges( Graph(), mTrackId, 0, MediaStreamListener::TRACK_EVENT_CREATED, tmp); l->NotifyFinishedTrackCreation(Graph()); } mTrackCreated = true; } // If the captured stream is connected back to a object on the page (be it an // HTMLMediaElement with a stream as source, or an AudioContext), a cycle // situation occur. This can work if it's an AudioContext with at least one // DelayNode, but the MSG will mute the whole cycle otherwise. if (mFinished || InMutedCycle() || inputCount == 0) { track->Get<AudioSegment>()->AppendNullData(aTo - aFrom); } else { // We mix down all the tracks of all inputs, to a stereo track. Everything // is {up,down}-mixed to stereo. mMixer.StartMixing(); AudioSegment output; for (uint32_t i = 0; i < inputCount; i++) { MediaStream* s = mInputs[i]->GetSource(); StreamBuffer::TrackIter tracks(s->GetStreamBuffer(), MediaSegment::AUDIO); while (!tracks.IsEnded()) { AudioSegment* inputSegment = tracks->Get<AudioSegment>(); StreamTime inputStart = s->GraphTimeToStreamTimeWithBlocking(aFrom); StreamTime inputEnd = s->GraphTimeToStreamTimeWithBlocking(aTo); AudioSegment toMix; toMix.AppendSlice(*inputSegment, inputStart, inputEnd); // Care for streams blocked in the [aTo, aFrom] range. if (inputEnd - inputStart < aTo - aFrom) { toMix.AppendNullData((aTo - aFrom) - (inputEnd - inputStart)); } toMix.Mix(mMixer, MONO, Graph()->GraphRate()); tracks.Next(); } } // This calls MixerCallback below mMixer.FinishMixing(); } // Regardless of the status of the input tracks, we go foward. mBuffer.AdvanceKnownTracksTime(GraphTimeToStreamTimeWithBlocking((aTo))); }
void AudioNodeStream::AdvanceOutputSegment() { StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK); AudioSegment* segment = track->Get<AudioSegment>(); if (mKind == MediaStreamGraph::EXTERNAL_STREAM) { segment->AppendAndConsumeChunk(&mLastChunks[0]); } else { segment->AppendNullData(mLastChunks[0].GetDuration()); } for (uint32_t j = 0; j < mListeners.Length(); ++j) { MediaStreamListener* l = mListeners[j]; AudioChunk copyChunk = mLastChunks[0]; AudioSegment tmpSegment; tmpSegment.AppendAndConsumeChunk(©Chunk); l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK, segment->GetDuration(), 0, tmpSegment); } }
// The MediaStreamGraph guarantees that this is actually one block, for // AudioNodeStreams. void AudioNodeStream::ProduceOutput(GraphTime aFrom, GraphTime aTo) { StreamBuffer::Track* track = EnsureTrack(); AudioChunk outputChunk; AudioSegment* segment = track->Get<AudioSegment>(); outputChunk.SetNull(0); if (mInCycle) { // XXX DelayNode not supported yet so just produce silence outputChunk.SetNull(WEBAUDIO_BLOCK_SIZE); } else { AudioChunk tmpChunk; AudioChunk* inputChunk = ObtainInputBlock(&tmpChunk); bool finished = false; mEngine->ProduceAudioBlock(this, *inputChunk, &outputChunk, &finished); if (finished) { FinishOutput(); } } mLastChunk = outputChunk; if (mKind == MediaStreamGraph::EXTERNAL_STREAM) { segment->AppendAndConsumeChunk(&outputChunk); } else { segment->AppendNullData(outputChunk.GetDuration()); } for (uint32_t j = 0; j < mListeners.Length(); ++j) { MediaStreamListener* l = mListeners[j]; AudioChunk copyChunk = outputChunk; AudioSegment tmpSegment; tmpSegment.AppendAndConsumeChunk(©Chunk); l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID, IdealAudioRate(), segment->GetDuration(), 0, tmpSegment); } }
NS_IMETHODIMP MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer) { AudioSegment segment; nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(AUDIO_FRAME_LENGTH * sizeof(int16_t)); int16_t* dest = static_cast<int16_t*>(buffer->Data()); mSineGenerator->generate(dest, AUDIO_FRAME_LENGTH); nsAutoTArray<const int16_t*,1> channels; channels.AppendElement(dest); segment.AppendFrames(buffer.forget(), channels, AUDIO_FRAME_LENGTH); mSource->AppendToTrack(mTrackID, &segment); // Generate null data for fake tracks. if (mHasFakeTracks) { for (int i = 0; i < kFakeAudioTrackCount; ++i) { AudioSegment nullSegment; nullSegment.AppendNullData(AUDIO_FRAME_LENGTH); mSource->AppendToTrack(kTrackCount + kFakeVideoTrackCount+i, &nullSegment); } } return NS_OK; }
void AudioNodeStream::AdvanceOutputSegment() { StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK); // No more tracks will be coming mBuffer.AdvanceKnownTracksTime(STREAM_TIME_MAX); AudioSegment* segment = track->Get<AudioSegment>(); if (!mLastChunks[0].IsNull()) { segment->AppendAndConsumeChunk(mLastChunks[0].AsMutableChunk()); } else { segment->AppendNullData(mLastChunks[0].GetDuration()); } for (uint32_t j = 0; j < mListeners.Length(); ++j) { MediaStreamListener* l = mListeners[j]; AudioChunk copyChunk = mLastChunks[0].AsAudioChunk(); AudioSegment tmpSegment; tmpSegment.AppendAndConsumeChunk(©Chunk); l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK, segment->GetDuration(), 0, tmpSegment); } }
TEST(OpusAudioTrackEncoder, Init) { { // The encoder does not normally recieve enough info from null data to // init. However, multiple attempts to do so, with sufficiently long // duration segments, should result in a best effort attempt. The first // attempt should never do this though, even if the duration is long: OpusTrackEncoder encoder(48000); AudioSegment segment; segment.AppendNullData(48000 * 100); encoder.TryInit(segment, segment.GetDuration()); EXPECT_FALSE(encoder.IsInitialized()); // Multiple init attempts should result in best effort init: encoder.TryInit(segment, segment.GetDuration()); EXPECT_TRUE(encoder.IsInitialized()); } { // If the duration of the segments given to the encoder is not long then // we shouldn't try a best effort init: OpusTrackEncoder encoder(48000); AudioSegment segment; segment.AppendNullData(1); encoder.TryInit(segment, segment.GetDuration()); EXPECT_FALSE(encoder.IsInitialized()); encoder.TryInit(segment, segment.GetDuration()); EXPECT_FALSE(encoder.IsInitialized()); } { // For non-null segments we should init immediately OpusTrackEncoder encoder(48000); AudioSegment segment; AudioGenerator generator(2, 48000); generator.Generate(segment, 1); encoder.TryInit(segment, segment.GetDuration()); EXPECT_TRUE(encoder.IsInitialized()); } { // Test low sample rate bound OpusTrackEncoder encoder(7999); AudioSegment segment; AudioGenerator generator(2, 7999); generator.Generate(segment, 1); encoder.TryInit(segment, segment.GetDuration()); EXPECT_FALSE(encoder.IsInitialized()); } { // Test low sample rate bound OpusTrackEncoder encoder(8000); AudioSegment segment; AudioGenerator generator(2, 8000); generator.Generate(segment, 1); encoder.TryInit(segment, segment.GetDuration()); EXPECT_TRUE(encoder.IsInitialized()); } { // Test high sample rate bound OpusTrackEncoder encoder(192001); AudioSegment segment; AudioGenerator generator(2, 192001); generator.Generate(segment, 1); encoder.TryInit(segment, segment.GetDuration()); EXPECT_FALSE(encoder.IsInitialized()); } { // Test high sample rate bound OpusTrackEncoder encoder(192000); AudioSegment segment; AudioGenerator generator(2, 192000); generator.Generate(segment, 1); encoder.TryInit(segment, segment.GetDuration()); EXPECT_TRUE(encoder.IsInitialized()); } }
void AudioNodeExternalInputStream::ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) { // According to spec, number of outputs is always 1. mLastChunks.SetLength(1); // GC stuff can result in our input stream being destroyed before this stream. // Handle that. if (mInputs.IsEmpty()) { mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE); AdvanceOutputSegment(); return; } MOZ_ASSERT(mInputs.Length() == 1); MediaStream* source = mInputs[0]->GetSource(); nsAutoTArray<AudioSegment,1> audioSegments; nsAutoTArray<bool,1> trackMapEntriesUsed; uint32_t inputChannels = 0; for (StreamBuffer::TrackIter tracks(source->mBuffer, MediaSegment::AUDIO); !tracks.IsEnded(); tracks.Next()) { const StreamBuffer::Track& inputTrack = *tracks; // Create a TrackMapEntry if necessary. size_t trackMapIndex = GetTrackMapEntry(inputTrack, aFrom); // Maybe there's nothing in this track yet. If so, ignore it. (While the // track is only playing silence, we may not be able to determine the // correct number of channels to start resampling.) if (trackMapIndex == nsTArray<TrackMapEntry>::NoIndex) { continue; } while (trackMapEntriesUsed.Length() <= trackMapIndex) { trackMapEntriesUsed.AppendElement(false); } trackMapEntriesUsed[trackMapIndex] = true; TrackMapEntry* trackMap = &mTrackMap[trackMapIndex]; AudioSegment segment; GraphTime next; TrackRate inputTrackRate = inputTrack.GetRate(); for (GraphTime t = aFrom; t < aTo; t = next) { MediaInputPort::InputInterval interval = mInputs[0]->GetNextInputInterval(t); interval.mEnd = std::min(interval.mEnd, aTo); if (interval.mStart >= interval.mEnd) break; next = interval.mEnd; // Ticks >= startTicks and < endTicks are in the interval StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd); TrackTicks startTicks = trackMap->mSamplesPassedToResampler + segment.GetDuration(); StreamTime outputStart = GraphTimeToStreamTime(interval.mStart); NS_ASSERTION(startTicks == TimeToTicksRoundUp(inputTrackRate, outputStart), "Samples missing"); TrackTicks endTicks = TimeToTicksRoundUp(inputTrackRate, outputEnd); TrackTicks ticks = endTicks - startTicks; if (interval.mInputIsBlocked) { segment.AppendNullData(ticks); } else { // See comments in TrackUnionStream::CopyTrackData StreamTime inputStart = source->GraphTimeToStreamTime(interval.mStart); StreamTime inputEnd = source->GraphTimeToStreamTime(interval.mEnd); TrackTicks inputTrackEndPoint = inputTrack.IsEnded() ? inputTrack.GetEnd() : TRACK_TICKS_MAX; if (trackMap->mEndOfLastInputIntervalInInputStream != inputStart || trackMap->mEndOfLastInputIntervalInOutputStream != outputStart) { // Start of a new series of intervals where neither stream is blocked. trackMap->mEndOfConsumedInputTicks = TimeToTicksRoundDown(inputTrackRate, inputStart) - 1; } TrackTicks inputStartTicks = trackMap->mEndOfConsumedInputTicks; TrackTicks inputEndTicks = inputStartTicks + ticks; trackMap->mEndOfConsumedInputTicks = inputEndTicks; trackMap->mEndOfLastInputIntervalInInputStream = inputEnd; trackMap->mEndOfLastInputIntervalInOutputStream = outputEnd; if (inputStartTicks < 0) { // Data before the start of the track is just null. segment.AppendNullData(-inputStartTicks); inputStartTicks = 0; } if (inputEndTicks > inputStartTicks) { segment.AppendSlice(*inputTrack.GetSegment(), std::min(inputTrackEndPoint, inputStartTicks), std::min(inputTrackEndPoint, inputEndTicks)); } // Pad if we're looking past the end of the track segment.AppendNullData(ticks - segment.GetDuration()); } } trackMap->mSamplesPassedToResampler += segment.GetDuration(); trackMap->ResampleInputData(&segment); if (trackMap->mResampledData.GetDuration() < mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE) { // We don't have enough data. Delay it. trackMap->mResampledData.InsertNullDataAtStart( mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE - trackMap->mResampledData.GetDuration()); } audioSegments.AppendElement()->AppendSlice(trackMap->mResampledData, mCurrentOutputPosition, mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE); trackMap->mResampledData.ForgetUpTo(mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE); inputChannels = GetAudioChannelsSuperset(inputChannels, trackMap->mResamplerChannelCount); } for (int32_t i = mTrackMap.Length() - 1; i >= 0; --i) { if (i >= int32_t(trackMapEntriesUsed.Length()) || !trackMapEntriesUsed[i]) { mTrackMap.RemoveElementAt(i); } } uint32_t accumulateIndex = 0; if (inputChannels) { nsAutoTArray<float,GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer; for (uint32_t i = 0; i < audioSegments.Length(); ++i) { AudioChunk tmpChunk; ConvertSegmentToAudioBlock(&audioSegments[i], &tmpChunk); if (!tmpChunk.IsNull()) { if (accumulateIndex == 0) { AllocateAudioBlock(inputChannels, &mLastChunks[0]); } AccumulateInputChunk(accumulateIndex, tmpChunk, &mLastChunks[0], &downmixBuffer); accumulateIndex++; } } } if (accumulateIndex == 0) { mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE); } mCurrentOutputPosition += WEBAUDIO_BLOCK_SIZE; // Using AudioNodeStream's AdvanceOutputSegment to push the media stream graph along with null data. AdvanceOutputSegment(); }