void AudioNodeStream::ObtainInputBlock(AudioBlock& aTmpChunk, uint32_t aPortIndex) { uint32_t inputCount = mInputs.Length(); uint32_t outputChannelCount = 1; nsAutoTArray<const AudioBlock*,250> inputChunks; for (uint32_t i = 0; i < inputCount; ++i) { if (aPortIndex != mInputs[i]->InputNumber()) { // This input is connected to a different port continue; } MediaStream* s = mInputs[i]->GetSource(); AudioNodeStream* a = static_cast<AudioNodeStream*>(s); MOZ_ASSERT(a == s->AsAudioNodeStream()); if (a->IsAudioParamStream()) { continue; } const AudioBlock* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()]; MOZ_ASSERT(chunk); if (chunk->IsNull() || chunk->mChannelData.IsEmpty()) { continue; } inputChunks.AppendElement(chunk); outputChannelCount = GetAudioChannelsSuperset(outputChannelCount, chunk->ChannelCount()); } outputChannelCount = ComputedNumberOfChannels(outputChannelCount); uint32_t inputChunkCount = inputChunks.Length(); if (inputChunkCount == 0 || (inputChunkCount == 1 && inputChunks[0]->ChannelCount() == 0)) { aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE); return; } if (inputChunkCount == 1 && inputChunks[0]->ChannelCount() == outputChannelCount) { aTmpChunk = *inputChunks[0]; return; } if (outputChannelCount == 0) { aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE); return; } aTmpChunk.AllocateChannels(outputChannelCount); // The static storage here should be 1KB, so it's fine nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer; for (uint32_t i = 0; i < inputChunkCount; ++i) { AccumulateInputChunk(i, *inputChunks[i], &aTmpChunk, &downmixBuffer); } }
void DelayBuffer::Read(const double aPerFrameDelays[WEBAUDIO_BLOCK_SIZE], AudioChunk* aOutputChunk, ChannelInterpretation aChannelInterpretation) { int chunkCount = mChunks.Length(); if (!chunkCount) { aOutputChunk->SetNull(WEBAUDIO_BLOCK_SIZE); return; } // Find the maximum number of contributing channels to determine the output // channel count that retains all signal information. Buffered blocks will // be upmixed if necessary. // // First find the range of "delay" offsets backwards from the current // position. Note that these may be negative for frames that are after the // current position (including i). double minDelay = aPerFrameDelays[0]; double maxDelay = minDelay; for (unsigned i = 1; i < WEBAUDIO_BLOCK_SIZE; ++i) { minDelay = std::min(minDelay, aPerFrameDelays[i] - i); maxDelay = std::max(maxDelay, aPerFrameDelays[i] - i); } // Now find the chunks touched by this range and check their channel counts. int oldestChunk = ChunkForDelay(int(maxDelay) + 1); int youngestChunk = ChunkForDelay(minDelay); uint32_t channelCount = 0; for (int i = oldestChunk; true; i = (i + 1) % chunkCount) { channelCount = GetAudioChannelsSuperset(channelCount, mChunks[i].ChannelCount()); if (i == youngestChunk) { break; } } if (channelCount) { AllocateAudioBlock(channelCount, aOutputChunk); ReadChannels(aPerFrameDelays, aOutputChunk, 0, channelCount, aChannelInterpretation); } else { aOutputChunk->SetNull(WEBAUDIO_BLOCK_SIZE); } // Remember currentDelayFrames for the next ProcessBlock call mCurrentDelay = aPerFrameDelays[WEBAUDIO_BLOCK_SIZE - 1]; }
void AudioChannelsUpMix(nsTArray<const void*>* aChannelArray, uint32_t aOutputChannelCount, const void* aZeroChannel) { uint32_t inputChannelCount = aChannelArray->Length(); uint32_t outputChannelCount = GetAudioChannelsSuperset(aOutputChannelCount, inputChannelCount); NS_ASSERTION(outputChannelCount > inputChannelCount, "No up-mix needed"); MOZ_ASSERT(inputChannelCount > 0, "Bad number of channels"); MOZ_ASSERT(outputChannelCount > 0, "Bad number of channels"); aChannelArray->SetLength(outputChannelCount); if (inputChannelCount < CUSTOM_CHANNEL_LAYOUTS && outputChannelCount <= CUSTOM_CHANNEL_LAYOUTS) { const UpMixMatrix& m = gUpMixMatrices[ gMixingMatrixIndexByChannels[inputChannelCount - 1] + outputChannelCount - inputChannelCount - 1]; const void* outputChannels[CUSTOM_CHANNEL_LAYOUTS]; for (uint32_t i = 0; i < outputChannelCount; ++i) { uint8_t channelIndex = m.mInputDestination[i]; if (channelIndex == IGNORE) { outputChannels[i] = aZeroChannel; } else { outputChannels[i] = aChannelArray->ElementAt(channelIndex); } } for (uint32_t i = 0; i < outputChannelCount; ++i) { aChannelArray->ElementAt(i) = outputChannels[i]; } return; } for (uint32_t i = inputChannelCount; i < outputChannelCount; ++i) { aChannelArray->ElementAt(i) = aZeroChannel; } }
void AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex) { uint32_t inputCount = mInputs.Length(); uint32_t outputChannelCount = 1; nsAutoTArray<AudioChunk*,250> inputChunks; for (uint32_t i = 0; i < inputCount; ++i) { if (aPortIndex != mInputs[i]->InputNumber()) { // This input is connected to a different port continue; } MediaStream* s = mInputs[i]->GetSource(); AudioNodeStream* a = static_cast<AudioNodeStream*>(s); MOZ_ASSERT(a == s->AsAudioNodeStream()); if (a->IsAudioParamStream()) { continue; } // It is possible for mLastChunks to be empty here, because `a` might be a // AudioNodeStream that has not been scheduled yet, because it is further // down the graph _but_ as a connection to this node. Because we enforce the // presence of at least one DelayNode, with at least one block of delay, and // because the output of a DelayNode when it has been fed less that // `delayTime` amount of audio is silence, we can simply continue here, // because this input would not influence the output of this node. Next // iteration, a->mLastChunks.IsEmpty() will be false, and everthing will // work as usual. if (a->mLastChunks.IsEmpty()) { continue; } AudioChunk* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()]; MOZ_ASSERT(chunk); if (chunk->IsNull() || chunk->mChannelData.IsEmpty()) { continue; } inputChunks.AppendElement(chunk); outputChannelCount = GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length()); } outputChannelCount = ComputedNumberOfChannels(outputChannelCount); uint32_t inputChunkCount = inputChunks.Length(); if (inputChunkCount == 0 || (inputChunkCount == 1 && inputChunks[0]->mChannelData.Length() == 0)) { aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE); return; } if (inputChunkCount == 1 && inputChunks[0]->mChannelData.Length() == outputChannelCount) { aTmpChunk = *inputChunks[0]; return; } if (outputChannelCount == 0) { aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE); return; } AllocateAudioBlock(outputChannelCount, &aTmpChunk); // The static storage here should be 1KB, so it's fine nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer; for (uint32_t i = 0; i < inputChunkCount; ++i) { AccumulateInputChunk(i, *inputChunks[i], &aTmpChunk, &downmixBuffer); } }
AudioChunk* AudioNodeStream::ObtainInputBlock(AudioChunk* aTmpChunk) { uint32_t inputCount = mInputs.Length(); uint32_t outputChannelCount = 0; nsAutoTArray<AudioChunk*,250> inputChunks; for (uint32_t i = 0; i < inputCount; ++i) { MediaStream* s = mInputs[i]->GetSource(); AudioNodeStream* a = static_cast<AudioNodeStream*>(s); MOZ_ASSERT(a == s->AsAudioNodeStream()); if (a->IsFinishedOnGraphThread()) { continue; } AudioChunk* chunk = &a->mLastChunk; // XXX when we implement DelayNode, this will no longer be true and we'll // need to treat a null chunk (when the DelayNode hasn't had a chance // to produce data yet) as silence here. MOZ_ASSERT(chunk); if (chunk->IsNull()) { continue; } inputChunks.AppendElement(chunk); outputChannelCount = GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length()); } uint32_t inputChunkCount = inputChunks.Length(); if (inputChunkCount == 0) { aTmpChunk->SetNull(WEBAUDIO_BLOCK_SIZE); return aTmpChunk; } if (inputChunkCount == 1) { return inputChunks[0]; } AllocateAudioBlock(outputChannelCount, aTmpChunk); for (uint32_t i = 0; i < inputChunkCount; ++i) { AudioChunk* chunk = inputChunks[i]; nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channels; channels.AppendElements(chunk->mChannelData); if (channels.Length() < outputChannelCount) { AudioChannelsUpMix(&channels, outputChannelCount, nullptr); NS_ASSERTION(outputChannelCount == channels.Length(), "We called GetAudioChannelsSuperset to avoid this"); } for (uint32_t c = 0; c < channels.Length(); ++c) { const float* inputData = static_cast<const float*>(channels[c]); float* outputData = static_cast<float*>(const_cast<void*>(aTmpChunk->mChannelData[c])); if (inputData) { if (i == 0) { AudioBlockCopyChannelWithScale(inputData, chunk->mVolume, outputData); } else { AudioBlockAddChannelWithScale(inputData, chunk->mVolume, outputData); } } else { if (i == 0) { memset(outputData, 0, WEBAUDIO_BLOCK_SIZE*sizeof(float)); } } } } return aTmpChunk; }
void AudioNodeExternalInputStream::ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) { // According to spec, number of outputs is always 1. MOZ_ASSERT(mLastChunks.Length() == 1); // GC stuff can result in our input stream being destroyed before this stream. // Handle that. if (!IsEnabled() || mInputs.IsEmpty() || mPassThrough) { mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE); AdvanceOutputSegment(); return; } MOZ_ASSERT(mInputs.Length() == 1); MediaStream* source = mInputs[0]->GetSource(); nsAutoTArray<AudioSegment,1> audioSegments; uint32_t inputChannels = 0; for (StreamBuffer::TrackIter tracks(source->mBuffer, MediaSegment::AUDIO); !tracks.IsEnded(); tracks.Next()) { const StreamBuffer::Track& inputTrack = *tracks; const AudioSegment& inputSegment = *static_cast<AudioSegment*>(inputTrack.GetSegment()); if (inputSegment.IsNull()) { continue; } AudioSegment& segment = *audioSegments.AppendElement(); GraphTime next; for (GraphTime t = aFrom; t < aTo; t = next) { MediaInputPort::InputInterval interval = mInputs[0]->GetNextInputInterval(t); interval.mEnd = std::min(interval.mEnd, aTo); if (interval.mStart >= interval.mEnd) break; next = interval.mEnd; StreamTime outputStart = GraphTimeToStreamTime(interval.mStart); StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd); StreamTime ticks = outputEnd - outputStart; if (interval.mInputIsBlocked) { segment.AppendNullData(ticks); } else { StreamTime inputStart = std::min(inputSegment.GetDuration(), source->GraphTimeToStreamTime(interval.mStart)); StreamTime inputEnd = std::min(inputSegment.GetDuration(), source->GraphTimeToStreamTime(interval.mEnd)); segment.AppendSlice(inputSegment, inputStart, inputEnd); // Pad if we're looking past the end of the track segment.AppendNullData(ticks - (inputEnd - inputStart)); } } for (AudioSegment::ChunkIterator iter(segment); !iter.IsEnded(); iter.Next()) { inputChannels = GetAudioChannelsSuperset(inputChannels, iter->ChannelCount()); } } uint32_t accumulateIndex = 0; if (inputChannels) { nsAutoTArray<float,GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer; for (uint32_t i = 0; i < audioSegments.Length(); ++i) { AudioChunk tmpChunk; ConvertSegmentToAudioBlock(&audioSegments[i], &tmpChunk, inputChannels); if (!tmpChunk.IsNull()) { if (accumulateIndex == 0) { AllocateAudioBlock(inputChannels, &mLastChunks[0]); } AccumulateInputChunk(accumulateIndex, tmpChunk, &mLastChunks[0], &downmixBuffer); accumulateIndex++; } } } if (accumulateIndex == 0) { mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE); } // Using AudioNodeStream's AdvanceOutputSegment to push the media stream graph along with null data. AdvanceOutputSegment(); }
void AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex) { uint32_t inputCount = mInputs.Length(); uint32_t outputChannelCount = 1; nsAutoTArray<AudioChunk*,250> inputChunks; for (uint32_t i = 0; i < inputCount; ++i) { if (aPortIndex != mInputs[i]->InputNumber()) { // This input is connected to a different port continue; } MediaStream* s = mInputs[i]->GetSource(); AudioNodeStream* a = static_cast<AudioNodeStream*>(s); MOZ_ASSERT(a == s->AsAudioNodeStream()); if (a->IsFinishedOnGraphThread() || a->IsAudioParamStream()) { continue; } AudioChunk* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()]; MOZ_ASSERT(chunk); if (chunk->IsNull()) { continue; } inputChunks.AppendElement(chunk); outputChannelCount = GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length()); } switch (mChannelCountMode) { case ChannelCountMode::Explicit: // Disregard the output channel count that we've calculated, and just use // mNumberOfInputChannels. outputChannelCount = mNumberOfInputChannels; break; case ChannelCountMode::Clamped_max: // Clamp the computed output channel count to mNumberOfInputChannels. outputChannelCount = std::min(outputChannelCount, mNumberOfInputChannels); break; case ChannelCountMode::Max: // Nothing to do here, just shut up the compiler warning. break; } uint32_t inputChunkCount = inputChunks.Length(); if (inputChunkCount == 0 || (inputChunkCount == 1 && inputChunks[0]->mChannelData.Length() == 0)) { aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE); return; } if (inputChunkCount == 1 && inputChunks[0]->mChannelData.Length() == outputChannelCount) { aTmpChunk = *inputChunks[0]; return; } AllocateAudioBlock(outputChannelCount, &aTmpChunk); float silenceChannel[WEBAUDIO_BLOCK_SIZE] = {0.f}; // The static storage here should be 1KB, so it's fine nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer; for (uint32_t i = 0; i < inputChunkCount; ++i) { AudioChunk* chunk = inputChunks[i]; nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channels; channels.AppendElements(chunk->mChannelData); if (channels.Length() < outputChannelCount) { if (mChannelInterpretation == ChannelInterpretation::Speakers) { AudioChannelsUpMix(&channels, outputChannelCount, nullptr); NS_ASSERTION(outputChannelCount == channels.Length(), "We called GetAudioChannelsSuperset to avoid this"); } else { // Fill up the remaining channels by zeros for (uint32_t j = channels.Length(); j < outputChannelCount; ++j) { channels.AppendElement(silenceChannel); } } } else if (channels.Length() > outputChannelCount) { if (mChannelInterpretation == ChannelInterpretation::Speakers) { nsAutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels; outputChannels.SetLength(outputChannelCount); downmixBuffer.SetLength(outputChannelCount * WEBAUDIO_BLOCK_SIZE); for (uint32_t j = 0; j < outputChannelCount; ++j) { outputChannels[j] = &downmixBuffer[j * WEBAUDIO_BLOCK_SIZE]; } AudioChannelsDownMix(channels, outputChannels.Elements(), outputChannelCount, WEBAUDIO_BLOCK_SIZE); channels.SetLength(outputChannelCount); for (uint32_t j = 0; j < channels.Length(); ++j) { channels[j] = outputChannels[j]; } } else { // Drop the remaining channels channels.RemoveElementsAt(outputChannelCount, channels.Length() - outputChannelCount); } } for (uint32_t c = 0; c < channels.Length(); ++c) { const float* inputData = static_cast<const float*>(channels[c]); float* outputData = static_cast<float*>(const_cast<void*>(aTmpChunk.mChannelData[c])); if (inputData) { if (i == 0) { AudioBlockCopyChannelWithScale(inputData, chunk->mVolume, outputData); } else { AudioBlockAddChannelWithScale(inputData, chunk->mVolume, outputData); } } else { if (i == 0) { memset(outputData, 0, WEBAUDIO_BLOCK_SIZE*sizeof(float)); } } } } }
void AudioNodeExternalInputStream::ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) { // According to spec, number of outputs is always 1. MOZ_ASSERT(mLastChunks.Length() == 1); // GC stuff can result in our input stream being destroyed before this stream. // Handle that. if (!IsEnabled() || mInputs.IsEmpty() || mPassThrough) { mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE); return; } MOZ_ASSERT(mInputs.Length() == 1); MediaStream* source = mInputs[0]->GetSource(); AutoTArray<AudioSegment,1> audioSegments; uint32_t inputChannels = 0; for (StreamTracks::TrackIter tracks(source->mTracks); !tracks.IsEnded(); tracks.Next()) { const StreamTracks::Track& inputTrack = *tracks; if (!mInputs[0]->PassTrackThrough(tracks->GetID())) { continue; } if (inputTrack.GetSegment()->GetType() == MediaSegment::VIDEO) { MOZ_ASSERT(false, "AudioNodeExternalInputStream shouldn't have video tracks"); continue; } const AudioSegment& inputSegment = *static_cast<AudioSegment*>(inputTrack.GetSegment()); if (inputSegment.IsNull()) { continue; } AudioSegment& segment = *audioSegments.AppendElement(); GraphTime next; for (GraphTime t = aFrom; t < aTo; t = next) { MediaInputPort::InputInterval interval = mInputs[0]->GetNextInputInterval(t); interval.mEnd = std::min(interval.mEnd, aTo); if (interval.mStart >= interval.mEnd) break; next = interval.mEnd; // We know this stream does not block during the processing interval --- // we're not finished, we don't underrun, and we're not suspended. StreamTime outputStart = GraphTimeToStreamTime(interval.mStart); StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd); StreamTime ticks = outputEnd - outputStart; if (interval.mInputIsBlocked) { segment.AppendNullData(ticks); } else { // The input stream is not blocked in this interval, so no need to call // GraphTimeToStreamTimeWithBlocking. StreamTime inputStart = std::min(inputSegment.GetDuration(), source->GraphTimeToStreamTime(interval.mStart)); StreamTime inputEnd = std::min(inputSegment.GetDuration(), source->GraphTimeToStreamTime(interval.mEnd)); segment.AppendSlice(inputSegment, inputStart, inputEnd); // Pad if we're looking past the end of the track segment.AppendNullData(ticks - (inputEnd - inputStart)); } } for (AudioSegment::ChunkIterator iter(segment); !iter.IsEnded(); iter.Next()) { inputChannels = GetAudioChannelsSuperset(inputChannels, iter->ChannelCount()); } } uint32_t accumulateIndex = 0; if (inputChannels) { DownmixBufferType downmixBuffer; ASSERT_ALIGNED16(downmixBuffer.Elements()); for (uint32_t i = 0; i < audioSegments.Length(); ++i) { AudioBlock tmpChunk; ConvertSegmentToAudioBlock(&audioSegments[i], &tmpChunk, inputChannels); if (!tmpChunk.IsNull()) { if (accumulateIndex == 0) { mLastChunks[0].AllocateChannels(inputChannels); } AccumulateInputChunk(accumulateIndex, tmpChunk, &mLastChunks[0], &downmixBuffer); accumulateIndex++; } } } if (accumulateIndex == 0) { mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE); } }
void AudioNodeExternalInputStream::ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) { // According to spec, number of outputs is always 1. mLastChunks.SetLength(1); // GC stuff can result in our input stream being destroyed before this stream. // Handle that. if (mInputs.IsEmpty()) { mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE); AdvanceOutputSegment(); return; } MOZ_ASSERT(mInputs.Length() == 1); MediaStream* source = mInputs[0]->GetSource(); nsAutoTArray<AudioSegment,1> audioSegments; nsAutoTArray<bool,1> trackMapEntriesUsed; uint32_t inputChannels = 0; for (StreamBuffer::TrackIter tracks(source->mBuffer, MediaSegment::AUDIO); !tracks.IsEnded(); tracks.Next()) { const StreamBuffer::Track& inputTrack = *tracks; // Create a TrackMapEntry if necessary. size_t trackMapIndex = GetTrackMapEntry(inputTrack, aFrom); // Maybe there's nothing in this track yet. If so, ignore it. (While the // track is only playing silence, we may not be able to determine the // correct number of channels to start resampling.) if (trackMapIndex == nsTArray<TrackMapEntry>::NoIndex) { continue; } while (trackMapEntriesUsed.Length() <= trackMapIndex) { trackMapEntriesUsed.AppendElement(false); } trackMapEntriesUsed[trackMapIndex] = true; TrackMapEntry* trackMap = &mTrackMap[trackMapIndex]; AudioSegment segment; GraphTime next; TrackRate inputTrackRate = inputTrack.GetRate(); for (GraphTime t = aFrom; t < aTo; t = next) { MediaInputPort::InputInterval interval = mInputs[0]->GetNextInputInterval(t); interval.mEnd = std::min(interval.mEnd, aTo); if (interval.mStart >= interval.mEnd) break; next = interval.mEnd; // Ticks >= startTicks and < endTicks are in the interval StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd); TrackTicks startTicks = trackMap->mSamplesPassedToResampler + segment.GetDuration(); StreamTime outputStart = GraphTimeToStreamTime(interval.mStart); NS_ASSERTION(startTicks == TimeToTicksRoundUp(inputTrackRate, outputStart), "Samples missing"); TrackTicks endTicks = TimeToTicksRoundUp(inputTrackRate, outputEnd); TrackTicks ticks = endTicks - startTicks; if (interval.mInputIsBlocked) { segment.AppendNullData(ticks); } else { // See comments in TrackUnionStream::CopyTrackData StreamTime inputStart = source->GraphTimeToStreamTime(interval.mStart); StreamTime inputEnd = source->GraphTimeToStreamTime(interval.mEnd); TrackTicks inputTrackEndPoint = inputTrack.IsEnded() ? inputTrack.GetEnd() : TRACK_TICKS_MAX; if (trackMap->mEndOfLastInputIntervalInInputStream != inputStart || trackMap->mEndOfLastInputIntervalInOutputStream != outputStart) { // Start of a new series of intervals where neither stream is blocked. trackMap->mEndOfConsumedInputTicks = TimeToTicksRoundDown(inputTrackRate, inputStart) - 1; } TrackTicks inputStartTicks = trackMap->mEndOfConsumedInputTicks; TrackTicks inputEndTicks = inputStartTicks + ticks; trackMap->mEndOfConsumedInputTicks = inputEndTicks; trackMap->mEndOfLastInputIntervalInInputStream = inputEnd; trackMap->mEndOfLastInputIntervalInOutputStream = outputEnd; if (inputStartTicks < 0) { // Data before the start of the track is just null. segment.AppendNullData(-inputStartTicks); inputStartTicks = 0; } if (inputEndTicks > inputStartTicks) { segment.AppendSlice(*inputTrack.GetSegment(), std::min(inputTrackEndPoint, inputStartTicks), std::min(inputTrackEndPoint, inputEndTicks)); } // Pad if we're looking past the end of the track segment.AppendNullData(ticks - segment.GetDuration()); } } trackMap->mSamplesPassedToResampler += segment.GetDuration(); trackMap->ResampleInputData(&segment); if (trackMap->mResampledData.GetDuration() < mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE) { // We don't have enough data. Delay it. trackMap->mResampledData.InsertNullDataAtStart( mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE - trackMap->mResampledData.GetDuration()); } audioSegments.AppendElement()->AppendSlice(trackMap->mResampledData, mCurrentOutputPosition, mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE); trackMap->mResampledData.ForgetUpTo(mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE); inputChannels = GetAudioChannelsSuperset(inputChannels, trackMap->mResamplerChannelCount); } for (int32_t i = mTrackMap.Length() - 1; i >= 0; --i) { if (i >= int32_t(trackMapEntriesUsed.Length()) || !trackMapEntriesUsed[i]) { mTrackMap.RemoveElementAt(i); } } uint32_t accumulateIndex = 0; if (inputChannels) { nsAutoTArray<float,GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer; for (uint32_t i = 0; i < audioSegments.Length(); ++i) { AudioChunk tmpChunk; ConvertSegmentToAudioBlock(&audioSegments[i], &tmpChunk); if (!tmpChunk.IsNull()) { if (accumulateIndex == 0) { AllocateAudioBlock(inputChannels, &mLastChunks[0]); } AccumulateInputChunk(accumulateIndex, tmpChunk, &mLastChunks[0], &downmixBuffer); accumulateIndex++; } } } if (accumulateIndex == 0) { mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE); } mCurrentOutputPosition += WEBAUDIO_BLOCK_SIZE; // Using AudioNodeStream's AdvanceOutputSegment to push the media stream graph along with null data. AdvanceOutputSegment(); }
void AudioNodeExternalInputStream::TrackMapEntry::ResampleInputData(AudioSegment* aSegment) { AudioSegment::ChunkIterator ci(*aSegment); while (!ci.IsEnded()) { const AudioChunk& chunk = *ci; nsAutoTArray<const void*,2> channels; if (chunk.GetDuration() > UINT32_MAX) { // This will cause us to OOM or overflow below. So let's just bail. NS_ERROR("Chunk duration out of bounds"); return; } uint32_t duration = uint32_t(chunk.GetDuration()); if (chunk.IsNull()) { nsAutoTArray<AudioDataValue,1024> silence; silence.SetLength(duration); PodZero(silence.Elements(), silence.Length()); channels.SetLength(mResamplerChannelCount); for (uint32_t i = 0; i < channels.Length(); ++i) { channels[i] = silence.Elements(); } ResampleChannels(channels, duration, AUDIO_OUTPUT_FORMAT, 0.0f); } else if (chunk.mChannelData.Length() == mResamplerChannelCount) { // Common case, since mResamplerChannelCount is set to the first chunk's // number of channels. channels.AppendElements(chunk.mChannelData); ResampleChannels(channels, duration, chunk.mBufferFormat, chunk.mVolume); } else { // Uncommon case. Since downmixing requires channels to be floats, // convert everything to floats now. uint32_t upChannels = GetAudioChannelsSuperset(chunk.mChannelData.Length(), mResamplerChannelCount); nsTArray<float> buffer; if (chunk.mBufferFormat == AUDIO_FORMAT_FLOAT32) { channels.AppendElements(chunk.mChannelData); } else { NS_ASSERTION(chunk.mBufferFormat == AUDIO_FORMAT_S16, "Unknown format"); if (duration > UINT32_MAX/chunk.mChannelData.Length()) { NS_ERROR("Chunk duration out of bounds"); return; } buffer.SetLength(chunk.mChannelData.Length()*duration); for (uint32_t i = 0; i < chunk.mChannelData.Length(); ++i) { const int16_t* samples = static_cast<const int16_t*>(chunk.mChannelData[i]); float* converted = &buffer[i*duration]; for (uint32_t j = 0; j < duration; ++j) { converted[j] = AudioSampleToFloat(samples[j]); } channels.AppendElement(converted); } } nsTArray<float> zeroes; if (channels.Length() < upChannels) { zeroes.SetLength(duration); PodZero(zeroes.Elements(), zeroes.Length()); AudioChannelsUpMix(&channels, upChannels, zeroes.Elements()); } if (channels.Length() == mResamplerChannelCount) { ResampleChannels(channels, duration, AUDIO_FORMAT_FLOAT32, chunk.mVolume); } else { nsTArray<float> output; if (duration > UINT32_MAX/mResamplerChannelCount) { NS_ERROR("Chunk duration out of bounds"); return; } output.SetLength(duration*mResamplerChannelCount); nsAutoTArray<float*,2> outputPtrs; nsAutoTArray<const void*,2> outputPtrsConst; for (uint32_t i = 0; i < mResamplerChannelCount; ++i) { outputPtrs.AppendElement(output.Elements() + i*duration); outputPtrsConst.AppendElement(outputPtrs[i]); } AudioChannelsDownMix(channels, outputPtrs.Elements(), outputPtrs.Length(), duration); ResampleChannels(outputPtrsConst, duration, AUDIO_FORMAT_FLOAT32, chunk.mVolume); } } ci.Next(); } }