void MediaEngineWebRTCAudioSource::Process(int channel, webrtc::ProcessingTypes type, sample* audio10ms, int length, int samplingFreq, bool isStereo) { MonitorAutoLock lock(mMonitor); if (mState != kStarted) return; uint32_t len = mSources.Length(); for (uint32_t i = 0; i < len; i++) { nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample)); sample* dest = static_cast<sample*>(buffer->Data()); memcpy(dest, audio10ms, length * sizeof(sample)); AudioSegment segment; nsAutoTArray<const sample*,1> channels; channels.AppendElement(dest); segment.AppendFrames(buffer.forget(), channels, length); TimeStamp insertTime; segment.GetStartTime(insertTime); SourceMediaStream *source = mSources[i]; if (source) { // This is safe from any thread, and is safe if the track is Finished // or Destroyed. // Make sure we include the stream and the track. // The 0:1 is a flag to note when we've done the final insert for a given input block. LogTime(AsyncLatencyLogger::AudioTrackInsertion, LATENCY_STREAM_ID(source, mTrackID), (i+1 < len) ? 0 : 1, insertTime); source->AppendToTrack(mTrackID, &segment); } } return; }
void MediaEngineWebRTCAudioSource::Process(int channel, webrtc::ProcessingTypes type, sample* audio10ms, int length, int samplingFreq, bool isStereo) { // On initial capture, throw away all far-end data except the most recent sample // since it's already irrelevant and we want to keep avoid confusing the AEC far-end // input code with "old" audio. if (!mStarted) { mStarted = true; while (gFarendObserver->Size() > 1) { FarEndAudioChunk *buffer = gFarendObserver->Pop(); // only call if size() > 0 free(buffer); } } while (gFarendObserver->Size() > 0) { FarEndAudioChunk *buffer = gFarendObserver->Pop(); // only call if size() > 0 if (buffer) { int length = buffer->mSamples; if (mVoERender->ExternalPlayoutData(buffer->mData, gFarendObserver->PlayoutFrequency(), gFarendObserver->PlayoutChannels(), mPlayoutDelay, length) == -1) { return; } } free(buffer); } #ifdef PR_LOGGING mSamples += length; if (mSamples > samplingFreq) { mSamples %= samplingFreq; // just in case mSamples >> samplingFreq if (PR_LOG_TEST(GetMediaManagerLog(), PR_LOG_DEBUG)) { webrtc::EchoStatistics echo; mVoECallReport->GetEchoMetricSummary(echo); #define DUMP_STATVAL(x) (x).min, (x).max, (x).average LOG(("Echo: ERL: %d/%d/%d, ERLE: %d/%d/%d, RERL: %d/%d/%d, NLP: %d/%d/%d", DUMP_STATVAL(echo.erl), DUMP_STATVAL(echo.erle), DUMP_STATVAL(echo.rerl), DUMP_STATVAL(echo.a_nlp))); } } #endif MonitorAutoLock lock(mMonitor); if (mState != kStarted) return; uint32_t len = mSources.Length(); for (uint32_t i = 0; i < len; i++) { nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample)); sample* dest = static_cast<sample*>(buffer->Data()); memcpy(dest, audio10ms, length * sizeof(sample)); AudioSegment segment; nsAutoTArray<const sample*,1> channels; channels.AppendElement(dest); segment.AppendFrames(buffer.forget(), channels, length); TimeStamp insertTime; segment.GetStartTime(insertTime); SourceMediaStream *source = mSources[i]; if (source) { // This is safe from any thread, and is safe if the track is Finished // or Destroyed. // Make sure we include the stream and the track. // The 0:1 is a flag to note when we've done the final insert for a given input block. LogTime(AsyncLatencyLogger::AudioTrackInsertion, LATENCY_STREAM_ID(source, mTrackID), (i+1 < len) ? 0 : 1, insertTime); source->AppendToTrack(mTrackID, &segment); } } return; }