void TestDeinterleaveAndConvert() { size_t arraySize = 1024; size_t maxChannels = 8; // 7.1 for (uint32_t channels = 1; channels < maxChannels; channels++) { const SrcT* src = GetInterleavedChannelArray<SrcT>(channels, arraySize); DstT** dst = GetPlanarArray<DstT>(channels, arraySize); DeinterleaveAndConvertBuffer(src, arraySize, channels, dst); for (size_t channel = 0; channel < channels; channel++) { for (size_t i = 0; i < arraySize; i++) { ASSERT_TRUE(FuzzyEqual(dst[channel][i], FloatToAudioSample<DstT>(1. / (channel + 1)))); } } DeleteInterleavedChannelArray(src); DeletePlanarArray(dst, channels); } }
void MediaEngineWebRTCMicrophoneSource::InsertInGraph(const T* aBuffer, size_t aFrames, uint32_t aChannels) { if (mState != kStarted) { return; } if (MOZ_LOG_TEST(AudioLogModule(), LogLevel::Debug)) { mTotalFrames += aFrames; if (mTotalFrames > mLastLogFrames + mSampleFrequency) { // ~ 1 second MOZ_LOG(AudioLogModule(), LogLevel::Debug, ("%p: Inserting %zu samples into graph, total frames = %" PRIu64, (void*)this, aFrames, mTotalFrames)); mLastLogFrames = mTotalFrames; } } size_t len = mSources.Length(); for (size_t i = 0; i < len; i++) { if (!mSources[i]) { continue; } TimeStamp insertTime; // Make sure we include the stream and the track. // The 0:1 is a flag to note when we've done the final insert for a given input block. LogTime(AsyncLatencyLogger::AudioTrackInsertion, LATENCY_STREAM_ID(mSources[i].get(), mTrackID), (i+1 < len) ? 0 : 1, insertTime); // Bug 971528 - Support stereo capture in gUM MOZ_ASSERT(aChannels == 1 || aChannels == 2, "GraphDriver only supports mono and stereo audio for now"); nsAutoPtr<AudioSegment> segment(new AudioSegment()); RefPtr<SharedBuffer> buffer = SharedBuffer::Create(aFrames * aChannels * sizeof(T)); AutoTArray<const T*, 8> channels; if (aChannels == 1) { PodCopy(static_cast<T*>(buffer->Data()), aBuffer, aFrames); channels.AppendElement(static_cast<T*>(buffer->Data())); } else { channels.SetLength(aChannels); AutoTArray<T*, 8> write_channels; write_channels.SetLength(aChannels); T * samples = static_cast<T*>(buffer->Data()); size_t offset = 0; for(uint32_t i = 0; i < aChannels; ++i) { channels[i] = write_channels[i] = samples + offset; offset += aFrames; } DeinterleaveAndConvertBuffer(aBuffer, aFrames, aChannels, write_channels.Elements()); } MOZ_ASSERT(aChannels == channels.Length()); segment->AppendFrames(buffer.forget(), channels, aFrames, mPrincipalHandles[i]); segment->GetStartTime(insertTime); mSources[i]->AppendToTrack(mTrackID, segment); } }