void AudioNodeStream::ObtainInputBlock(AudioBlock& aTmpChunk, uint32_t aPortIndex) { uint32_t inputCount = mInputs.Length(); uint32_t outputChannelCount = 1; nsAutoTArray<const AudioBlock*,250> inputChunks; for (uint32_t i = 0; i < inputCount; ++i) { if (aPortIndex != mInputs[i]->InputNumber()) { // This input is connected to a different port continue; } MediaStream* s = mInputs[i]->GetSource(); AudioNodeStream* a = static_cast<AudioNodeStream*>(s); MOZ_ASSERT(a == s->AsAudioNodeStream()); if (a->IsAudioParamStream()) { continue; } const AudioBlock* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()]; MOZ_ASSERT(chunk); if (chunk->IsNull() || chunk->mChannelData.IsEmpty()) { continue; } inputChunks.AppendElement(chunk); outputChannelCount = GetAudioChannelsSuperset(outputChannelCount, chunk->ChannelCount()); } outputChannelCount = ComputedNumberOfChannels(outputChannelCount); uint32_t inputChunkCount = inputChunks.Length(); if (inputChunkCount == 0 || (inputChunkCount == 1 && inputChunks[0]->ChannelCount() == 0)) { aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE); return; } if (inputChunkCount == 1 && inputChunks[0]->ChannelCount() == outputChannelCount) { aTmpChunk = *inputChunks[0]; return; } if (outputChannelCount == 0) { aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE); return; } aTmpChunk.AllocateChannels(outputChannelCount); // The static storage here should be 1KB, so it's fine nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer; for (uint32_t i = 0; i < inputChunkCount; ++i) { AccumulateInputChunk(i, *inputChunks[i], &aTmpChunk, &downmixBuffer); } }
void DelayBuffer::Write(const AudioBlock& aInputChunk) { // We must have a reference to the buffer if there are channels MOZ_ASSERT(aInputChunk.IsNull() == !aInputChunk.ChannelCount()); #ifdef DEBUG MOZ_ASSERT(!mHaveWrittenBlock); mHaveWrittenBlock = true; #endif if (!EnsureBuffer()) { return; } if (mCurrentChunk == mLastReadChunk) { mLastReadChunk = -1; // invalidate cache } mChunks[mCurrentChunk] = aInputChunk.AsAudioChunk(); }
void PannerNodeEngine::EqualPowerPanningFunction(const AudioBlock& aInput, AudioBlock* aOutput) { float azimuth, elevation, gainL, gainR, normalizedAzimuth, distanceGain, coneGain; int inputChannels = aInput.ChannelCount(); // If both the listener are in the same spot, and no cone gain is specified, // this node is noop. if (mListenerPosition == mPosition && mConeInnerAngle == 360 && mConeOuterAngle == 360) { *aOutput = aInput; return; } // The output of this node is always stereo, no matter what the inputs are. aOutput->AllocateChannels(2); ComputeAzimuthAndElevation(azimuth, elevation); coneGain = ComputeConeGain(); // The following algorithm is described in the spec. // Clamp azimuth in the [-90, 90] range. azimuth = min(180.f, max(-180.f, azimuth)); // Wrap around if (azimuth < -90.f) { azimuth = -180.f - azimuth; } else if (azimuth > 90) { azimuth = 180.f - azimuth; } // Normalize the value in the [0, 1] range. if (inputChannels == 1) { normalizedAzimuth = (azimuth + 90.f) / 180.f; } else { if (azimuth <= 0) { normalizedAzimuth = (azimuth + 90.f) / 90.f; } else { normalizedAzimuth = azimuth / 90.f; } } distanceGain = ComputeDistanceGain(); // Actually compute the left and right gain. gainL = cos(0.5 * M_PI * normalizedAzimuth); gainR = sin(0.5 * M_PI * normalizedAzimuth); // Compute the output. ApplyStereoPanning(aInput, aOutput, gainL, gainR, azimuth <= 0); aOutput->mVolume = aInput.mVolume * distanceGain * coneGain; }
void ProcessBlock(AudioNodeStream* aStream, GraphTime aFrom, const AudioBlock& aInput, AudioBlock* aOutput, bool* aFinished) override { // This node is not connected to anything. Per spec, we don't fire the // onaudioprocess event. We also want to clear out the input and output // buffer queue, and output a null buffer. if (!mIsConnected) { aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); mSharedBuffers->Reset(); mInputWriteIndex = 0; return; } // The input buffer is allocated lazily when non-null input is received. if (!aInput.IsNull() && !mInputBuffer) { mInputBuffer = ThreadSharedFloatArrayBufferList:: Create(mInputChannelCount, mBufferSize, fallible); if (mInputBuffer && mInputWriteIndex) { // Zero leading for null chunks that were skipped. for (uint32_t i = 0; i < mInputChannelCount; ++i) { float* channelData = mInputBuffer->GetDataForWrite(i); PodZero(channelData, mInputWriteIndex); } } } // First, record our input buffer, if its allocation succeeded. uint32_t inputChannelCount = mInputBuffer ? mInputBuffer->GetChannels() : 0; for (uint32_t i = 0; i < inputChannelCount; ++i) { float* writeData = mInputBuffer->GetDataForWrite(i) + mInputWriteIndex; if (aInput.IsNull()) { PodZero(writeData, aInput.GetDuration()); } else { MOZ_ASSERT(aInput.GetDuration() == WEBAUDIO_BLOCK_SIZE, "sanity check"); MOZ_ASSERT(aInput.ChannelCount() == inputChannelCount); AudioBlockCopyChannelWithScale(static_cast<const float*>(aInput.mChannelData[i]), aInput.mVolume, writeData); } } mInputWriteIndex += aInput.GetDuration(); // Now, see if we have data to output // Note that we need to do this before sending the buffer to the main // thread so that our delay time is updated. *aOutput = mSharedBuffers->GetOutputBuffer(); if (mInputWriteIndex >= mBufferSize) { SendBuffersToMainThread(aStream, aFrom); mInputWriteIndex -= mBufferSize; } }
/** Convolution processing handling interpolation between previous and new states of the convolution engines. */ void processSamples (const AudioBlock<float>& input, AudioBlock<float>& output) { processFifo(); size_t numChannels = input.getNumChannels(); size_t numSamples = jmin (input.getNumSamples(), output.getNumSamples()); if (mustInterpolate == false) { for (size_t channel = 0; channel < numChannels; ++channel) engines[(int) channel]->processSamples (input.getChannelPointer (channel), output.getChannelPointer (channel), numSamples); } else { auto interpolated = AudioBlock<float> (interpolationBuffer).getSubBlock (0, numSamples); for (size_t channel = 0; channel < numChannels; ++channel) { auto&& buffer = output.getSingleChannelBlock (channel); interpolationBuffer.copyFrom ((int) channel, 0, input.getChannelPointer (channel), (int) numSamples); engines[(int) channel]->processSamples (input.getChannelPointer (channel), buffer.getChannelPointer (0), numSamples); changeVolumes[channel].applyGain (buffer.getChannelPointer (0), (int) numSamples); auto* interPtr = interpolationBuffer.getWritePointer ((int) channel); engines[(int) channel + 2]->processSamples (interPtr, interPtr, numSamples); changeVolumes[channel + 2].applyGain (interPtr, (int) numSamples); buffer += interpolated.getSingleChannelBlock (channel); } if (changeVolumes[0].isSmoothing() == false) { mustInterpolate = false; for (auto channel = 0; channel < 2; ++channel) engines[channel]->copyStateFromOtherEngine (*engines[channel + 2]); } } }
void AudioNodeExternalInputStream::ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) { // According to spec, number of outputs is always 1. MOZ_ASSERT(mLastChunks.Length() == 1); // GC stuff can result in our input stream being destroyed before this stream. // Handle that. if (!IsEnabled() || mInputs.IsEmpty() || mPassThrough) { mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE); return; } MOZ_ASSERT(mInputs.Length() == 1); MediaStream* source = mInputs[0]->GetSource(); AutoTArray<AudioSegment,1> audioSegments; uint32_t inputChannels = 0; for (StreamTracks::TrackIter tracks(source->mTracks); !tracks.IsEnded(); tracks.Next()) { const StreamTracks::Track& inputTrack = *tracks; if (!mInputs[0]->PassTrackThrough(tracks->GetID())) { continue; } if (inputTrack.GetSegment()->GetType() == MediaSegment::VIDEO) { MOZ_ASSERT(false, "AudioNodeExternalInputStream shouldn't have video tracks"); continue; } const AudioSegment& inputSegment = *static_cast<AudioSegment*>(inputTrack.GetSegment()); if (inputSegment.IsNull()) { continue; } AudioSegment& segment = *audioSegments.AppendElement(); GraphTime next; for (GraphTime t = aFrom; t < aTo; t = next) { MediaInputPort::InputInterval interval = mInputs[0]->GetNextInputInterval(t); interval.mEnd = std::min(interval.mEnd, aTo); if (interval.mStart >= interval.mEnd) break; next = interval.mEnd; // We know this stream does not block during the processing interval --- // we're not finished, we don't underrun, and we're not suspended. StreamTime outputStart = GraphTimeToStreamTime(interval.mStart); StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd); StreamTime ticks = outputEnd - outputStart; if (interval.mInputIsBlocked) { segment.AppendNullData(ticks); } else { // The input stream is not blocked in this interval, so no need to call // GraphTimeToStreamTimeWithBlocking. StreamTime inputStart = std::min(inputSegment.GetDuration(), source->GraphTimeToStreamTime(interval.mStart)); StreamTime inputEnd = std::min(inputSegment.GetDuration(), source->GraphTimeToStreamTime(interval.mEnd)); segment.AppendSlice(inputSegment, inputStart, inputEnd); // Pad if we're looking past the end of the track segment.AppendNullData(ticks - (inputEnd - inputStart)); } } for (AudioSegment::ChunkIterator iter(segment); !iter.IsEnded(); iter.Next()) { inputChannels = GetAudioChannelsSuperset(inputChannels, iter->ChannelCount()); } } uint32_t accumulateIndex = 0; if (inputChannels) { DownmixBufferType downmixBuffer; ASSERT_ALIGNED16(downmixBuffer.Elements()); for (uint32_t i = 0; i < audioSegments.Length(); ++i) { AudioBlock tmpChunk; ConvertSegmentToAudioBlock(&audioSegments[i], &tmpChunk, inputChannels); if (!tmpChunk.IsNull()) { if (accumulateIndex == 0) { mLastChunks[0].AllocateChannels(inputChannels); } AccumulateInputChunk(accumulateIndex, tmpChunk, &mLastChunks[0], &downmixBuffer); accumulateIndex++; } } } if (accumulateIndex == 0) { mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE); } }
void PannerNodeEngine::EqualPowerPanningFunction(const AudioBlock& aInput, AudioBlock* aOutput, StreamTime tick) { float azimuth, elevation, gainL, gainR, normalizedAzimuth, distanceGain, coneGain; int inputChannels = aInput.ChannelCount(); // Optimize the case where the position and orientation is constant for this // processing block: we can just apply a constant gain on the left and right // channel if (mPositionX.HasSimpleValue() && mPositionY.HasSimpleValue() && mPositionZ.HasSimpleValue() && mOrientationX.HasSimpleValue() && mOrientationY.HasSimpleValue() && mOrientationZ.HasSimpleValue()) { ThreeDPoint position = ConvertAudioParamTimelineTo3DP(mPositionX, mPositionY, mPositionZ, tick); ThreeDPoint orientation = ConvertAudioParamTimelineTo3DP(mOrientationX, mOrientationY, mOrientationZ, tick); if (!orientation.IsZero()) { orientation.Normalize(); } // If both the listener are in the same spot, and no cone gain is specified, // this node is noop. if (mListenerPosition == position && mConeInnerAngle == 360 && mConeOuterAngle == 360) { *aOutput = aInput; return; } // The output of this node is always stereo, no matter what the inputs are. aOutput->AllocateChannels(2); ComputeAzimuthAndElevation(position, azimuth, elevation); coneGain = ComputeConeGain(position, orientation); // The following algorithm is described in the spec. // Clamp azimuth in the [-90, 90] range. azimuth = min(180.f, max(-180.f, azimuth)); // Wrap around if (azimuth < -90.f) { azimuth = -180.f - azimuth; } else if (azimuth > 90) { azimuth = 180.f - azimuth; } // Normalize the value in the [0, 1] range. if (inputChannels == 1) { normalizedAzimuth = (azimuth + 90.f) / 180.f; } else { if (azimuth <= 0) { normalizedAzimuth = (azimuth + 90.f) / 90.f; } else { normalizedAzimuth = azimuth / 90.f; } } distanceGain = ComputeDistanceGain(position); // Actually compute the left and right gain. gainL = cos(0.5 * M_PI * normalizedAzimuth); gainR = sin(0.5 * M_PI * normalizedAzimuth); // Compute the output. ApplyStereoPanning(aInput, aOutput, gainL, gainR, azimuth <= 0); aOutput->mVolume = aInput.mVolume * distanceGain * coneGain; } else { float positionX[WEBAUDIO_BLOCK_SIZE]; float positionY[WEBAUDIO_BLOCK_SIZE]; float positionZ[WEBAUDIO_BLOCK_SIZE]; float orientationX[WEBAUDIO_BLOCK_SIZE]; float orientationY[WEBAUDIO_BLOCK_SIZE]; float orientationZ[WEBAUDIO_BLOCK_SIZE]; // The output of this node is always stereo, no matter what the inputs are. aOutput->AllocateChannels(2); if (!mPositionX.HasSimpleValue()) { mPositionX.GetValuesAtTime(tick, positionX, WEBAUDIO_BLOCK_SIZE); } else { positionX[0] = mPositionX.GetValueAtTime(tick); } if (!mPositionY.HasSimpleValue()) { mPositionY.GetValuesAtTime(tick, positionY, WEBAUDIO_BLOCK_SIZE); } else { positionY[0] = mPositionY.GetValueAtTime(tick); } if (!mPositionZ.HasSimpleValue()) { mPositionZ.GetValuesAtTime(tick, positionZ, WEBAUDIO_BLOCK_SIZE); } else { positionZ[0] = mPositionZ.GetValueAtTime(tick); } if (!mOrientationX.HasSimpleValue()) { mOrientationX.GetValuesAtTime(tick, orientationX, WEBAUDIO_BLOCK_SIZE); } else { orientationX[0] = mOrientationX.GetValueAtTime(tick); } if (!mOrientationY.HasSimpleValue()) { mOrientationY.GetValuesAtTime(tick, orientationY, WEBAUDIO_BLOCK_SIZE); } else { orientationY[0] = mOrientationY.GetValueAtTime(tick); } if (!mOrientationZ.HasSimpleValue()) { mOrientationZ.GetValuesAtTime(tick, orientationZ, WEBAUDIO_BLOCK_SIZE); } else { orientationZ[0] = mOrientationZ.GetValueAtTime(tick); } float computedGain[2*WEBAUDIO_BLOCK_SIZE + 4]; bool onLeft[WEBAUDIO_BLOCK_SIZE]; float* alignedComputedGain = ALIGNED16(computedGain); ASSERT_ALIGNED16(alignedComputedGain); for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) { ThreeDPoint position(mPositionX.HasSimpleValue() ? positionX[0] : positionX[counter], mPositionY.HasSimpleValue() ? positionY[0] : positionY[counter], mPositionZ.HasSimpleValue() ? positionZ[0] : positionZ[counter]); ThreeDPoint orientation(mOrientationX.HasSimpleValue() ? orientationX[0] : orientationX[counter], mOrientationY.HasSimpleValue() ? orientationY[0] : orientationY[counter], mOrientationZ.HasSimpleValue() ? orientationZ[0] : orientationZ[counter]); if (!orientation.IsZero()) { orientation.Normalize(); } ComputeAzimuthAndElevation(position, azimuth, elevation); coneGain = ComputeConeGain(position, orientation); // The following algorithm is described in the spec. // Clamp azimuth in the [-90, 90] range. azimuth = min(180.f, max(-180.f, azimuth)); // Wrap around if (azimuth < -90.f) { azimuth = -180.f - azimuth; } else if (azimuth > 90) { azimuth = 180.f - azimuth; } // Normalize the value in the [0, 1] range. if (inputChannels == 1) { normalizedAzimuth = (azimuth + 90.f) / 180.f; } else { if (azimuth <= 0) { normalizedAzimuth = (azimuth + 90.f) / 90.f; } else { normalizedAzimuth = azimuth / 90.f; } } distanceGain = ComputeDistanceGain(position); // Actually compute the left and right gain. float gainL = cos(0.5 * M_PI * normalizedAzimuth) * aInput.mVolume * distanceGain * coneGain; float gainR = sin(0.5 * M_PI * normalizedAzimuth) * aInput.mVolume * distanceGain * coneGain; alignedComputedGain[counter] = gainL; alignedComputedGain[WEBAUDIO_BLOCK_SIZE + counter] = gainR; onLeft[counter] = azimuth <= 0; } // Apply the gain to the output buffer ApplyStereoPanning(aInput, aOutput, alignedComputedGain, &alignedComputedGain[WEBAUDIO_BLOCK_SIZE], onLeft); } }