void AudioBufferAddWithScale(const float* aInput, float aScale, float* aOutput, uint32_t aSize) { #ifdef BUILD_ARM_NEON if (mozilla::supports_neon()) { AudioBufferAddWithScale_NEON(aInput, aScale, aOutput, aSize); return; } #endif #ifdef USE_SSE2 if (mozilla::supports_sse2()) { if (aScale == 1.0f) { while (aSize && (!IS_ALIGNED16(aInput) || !IS_ALIGNED16(aOutput))) { *aOutput += *aInput; ++aOutput; ++aInput; --aSize; } } else { while (aSize && (!IS_ALIGNED16(aInput) || !IS_ALIGNED16(aOutput))) { *aOutput += *aInput*aScale; ++aOutput; ++aInput; --aSize; } } // we need to round aSize down to the nearest multiple of 16 uint32_t alignedSize = aSize & ~0x0F; if (alignedSize > 0) { AudioBufferAddWithScale_SSE(aInput, aScale, aOutput, alignedSize); // adjust parameters for use with scalar operations below aInput += alignedSize; aOutput += alignedSize; aSize -= alignedSize; } } #endif if (aScale == 1.0f) { for (uint32_t i = 0; i < aSize; ++i) { aOutput[i] += aInput[i]; } } else { for (uint32_t i = 0; i < aSize; ++i) { aOutput[i] += aInput[i]*aScale; } } }
/** * Converts the data in aSegment to a single chunk aBlock. aSegment must have * duration WEBAUDIO_BLOCK_SIZE. aFallbackChannelCount is a superset of the * channels in every chunk of aSegment. aBlock must be float format or null. */ static void ConvertSegmentToAudioBlock(AudioSegment* aSegment, AudioBlock* aBlock, int32_t aFallbackChannelCount) { NS_ASSERTION(aSegment->GetDuration() == WEBAUDIO_BLOCK_SIZE, "Bad segment duration"); { AudioSegment::ChunkIterator ci(*aSegment); NS_ASSERTION(!ci.IsEnded(), "Should be at least one chunk!"); if (ci->GetDuration() == WEBAUDIO_BLOCK_SIZE && (ci->IsNull() || ci->mBufferFormat == AUDIO_FORMAT_FLOAT32)) { bool aligned = true; for (size_t i = 0; i < ci->mChannelData.Length(); ++i) { if (!IS_ALIGNED16(ci->mChannelData[i])) { aligned = false; break; } } // Return this chunk directly to avoid copying data. if (aligned) { *aBlock = *ci; return; } } } aBlock->AllocateChannels(aFallbackChannelCount); uint32_t duration = 0; for (AudioSegment::ChunkIterator ci(*aSegment); !ci.IsEnded(); ci.Next()) { switch (ci->mBufferFormat) { case AUDIO_FORMAT_S16: { CopyChunkToBlock<int16_t>(*ci, aBlock, duration); break; } case AUDIO_FORMAT_FLOAT32: { CopyChunkToBlock<float>(*ci, aBlock, duration); break; } case AUDIO_FORMAT_SILENCE: { // The actual type of the sample does not matter here, but we still need // to send some audio to the graph. CopyChunkToBlock<float>(*ci, aBlock, duration); break; } } duration += ci->GetDuration(); } }
/** * Copy as many frames as possible from the source buffer to aOutput, and * advance aOffsetWithinBlock and aCurrentPosition based on how many frames * we write. This will never advance aOffsetWithinBlock past * WEBAUDIO_BLOCK_SIZE, or aCurrentPosition past mStop. It takes data from * the buffer at aBufferOffset, and never takes more data than aBufferMax. * This function knows when it needs to allocate the output buffer, and also * optimizes the case where it can avoid memory allocations. */ void CopyFromBuffer(AudioBlock* aOutput, uint32_t aChannels, uint32_t* aOffsetWithinBlock, StreamTime* aCurrentPosition, uint32_t aBufferMax) { MOZ_ASSERT(*aCurrentPosition < mStop); uint32_t availableInOutput = std::min<StreamTime>(WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock, mStop - *aCurrentPosition); if (mResampler) { CopyFromInputBufferWithResampling(aOutput, aChannels, aOffsetWithinBlock, availableInOutput, aCurrentPosition, aBufferMax); return; } if (aChannels == 0) { aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); // There is no attempt here to limit advance so that mBufferPosition is // limited to aBufferMax. The only observable affect of skipping the // check would be in the precise timing of the ended event if the loop // attribute is reset after playback has looped. *aOffsetWithinBlock += availableInOutput; *aCurrentPosition += availableInOutput; // Rounding at the start and end of the period means that fractional // increments essentially accumulate if outRate remains constant. If // outRate is varying, then accumulation happens on average but not // precisely. TrackTicks start = *aCurrentPosition * mBufferSampleRate / mResamplerOutRate; TrackTicks end = (*aCurrentPosition + availableInOutput) * mBufferSampleRate / mResamplerOutRate; mBufferPosition += end - start; return; } uint32_t numFrames = std::min(aBufferMax - mBufferPosition, availableInOutput); bool shouldBorrow = false; if (numFrames == WEBAUDIO_BLOCK_SIZE && mBuffer.mBufferFormat == AUDIO_FORMAT_FLOAT32) { shouldBorrow = true; for (uint32_t i = 0; i < aChannels; ++i) { if (!IS_ALIGNED16(mBuffer.ChannelData<float>()[i] + mBufferPosition)) { shouldBorrow = false; break; } } } MOZ_ASSERT(mBufferPosition < aBufferMax); if (shouldBorrow) { BorrowFromInputBuffer(aOutput, aChannels); } else { if (*aOffsetWithinBlock == 0) { aOutput->AllocateChannels(aChannels); } if (mBuffer.mBufferFormat == AUDIO_FORMAT_FLOAT32) { CopyFromInputBuffer<float>(aOutput, aChannels, *aOffsetWithinBlock, numFrames); } else { MOZ_ASSERT(mBuffer.mBufferFormat == AUDIO_FORMAT_S16); CopyFromInputBuffer<int16_t>(aOutput, aChannels, *aOffsetWithinBlock, numFrames); } } *aOffsetWithinBlock += numFrames; *aCurrentPosition += numFrames; mBufferPosition += numFrames; }