nsresult MediaEngineRemoteVideoSource::Allocate( const dom::MediaTrackConstraints& aConstraints, const MediaEnginePrefs& aPrefs, const nsString& aDeviceId, const mozilla::ipc::PrincipalInfo& aPrincipalInfo, AllocationHandle** aOutHandle, const char** aOutBadConstraint) { LOG((__PRETTY_FUNCTION__)); AssertIsOnOwningThread(); if (!mInitDone) { LOG(("Init not done")); return NS_ERROR_FAILURE; } nsresult rv = Super::Allocate(aConstraints, aPrefs, aDeviceId, aPrincipalInfo, aOutHandle, aOutBadConstraint); if (NS_FAILED(rv)) { return rv; } if (mState == kStarted && MOZ_LOG_TEST(GetMediaManagerLog(), mozilla::LogLevel::Debug)) { MonitorAutoLock lock(mMonitor); if (mSources.IsEmpty()) { MOZ_ASSERT(mPrincipalHandles.IsEmpty()); LOG(("Video device %d reallocated", mCaptureIndex)); } else { LOG(("Video device %d allocated shared", mCaptureIndex)); } } return NS_OK; }
nsresult MediaEngineWebRTCMicrophoneSource::Allocate(const dom::MediaTrackConstraints &aConstraints, const MediaEnginePrefs &aPrefs, const nsString& aDeviceId) { AssertIsOnOwningThread(); if (mState == kReleased) { if (mInitDone) { if (mAudioInput->SetRecordingDevice(mCapIndex)) { return NS_ERROR_FAILURE; } mState = kAllocated; LOG(("Audio device %d allocated", mCapIndex)); } else { LOG(("Audio device is not initalized")); return NS_ERROR_FAILURE; } } else if (MOZ_LOG_TEST(GetMediaManagerLog(), LogLevel::Debug)) { MonitorAutoLock lock(mMonitor); if (mSources.IsEmpty()) { LOG(("Audio device %d reallocated", mCapIndex)); } else { LOG(("Audio device %d allocated shared", mCapIndex)); } } ++mNrAllocations; return NS_OK; }
nsresult MediaEngineWebRTCVideoSource::Allocate(const dom::MediaTrackConstraints &aConstraints, const MediaEnginePrefs &aPrefs) { LOG((__FUNCTION__)); if (mState == kReleased && mInitDone) { // Note: if shared, we don't allow a later opener to affect the resolution. // (This may change depending on spec changes for Constraints/settings) if (!ChooseCapability(aConstraints, aPrefs)) { return NS_ERROR_UNEXPECTED; } if (mViECapture->AllocateCaptureDevice(GetUUID().get(), kMaxUniqueIdLength, mCaptureIndex)) { return NS_ERROR_FAILURE; } mState = kAllocated; LOG(("Video device %d allocated", mCaptureIndex)); } else if (MOZ_LOG_TEST(GetMediaManagerLog(), LogLevel::Debug)) { MonitorAutoLock lock(mMonitor); if (mSources.IsEmpty()) { LOG(("Video device %d reallocated", mCaptureIndex)); } else { LOG(("Video device %d allocated shared", mCaptureIndex)); } } return NS_OK; }
nsresult MediaEngineWebRTCMicrophoneSource::Allocate(const dom::MediaTrackConstraints &aConstraints, const MediaEnginePrefs &aPrefs, const nsString& aDeviceId) { if (mState == kReleased) { if (mInitDone) { ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw(webrtc::VoEHardware::GetInterface(mVoiceEngine)); if (!ptrVoEHw || ptrVoEHw->SetRecordingDevice(mCapIndex)) { return NS_ERROR_FAILURE; } mState = kAllocated; LOG(("Audio device %d allocated", mCapIndex)); } else { LOG(("Audio device is not initalized")); return NS_ERROR_FAILURE; } } else if (MOZ_LOG_TEST(GetMediaManagerLog(), LogLevel::Debug)) { MonitorAutoLock lock(mMonitor); if (mSources.IsEmpty()) { LOG(("Audio device %d reallocated", mCapIndex)); } else { LOG(("Audio device %d allocated shared", mCapIndex)); } } return NS_OK; }
nsresult MediaEngineWebRTCMicrophoneSource::UpdateSingleSource( const AllocationHandle* aHandle, const NormalizedConstraints& aNetConstraints, const MediaEnginePrefs& aPrefs, const nsString& aDeviceId, const char** aOutBadConstraint) { FlattenedConstraints c(aNetConstraints); MediaEnginePrefs prefs = aPrefs; prefs.mAecOn = c.mEchoCancellation.Get(prefs.mAecOn); prefs.mAgcOn = c.mAutoGainControl.Get(prefs.mAgcOn); prefs.mNoiseOn = c.mNoiseSuppression.Get(prefs.mNoiseOn); uint32_t maxChannels = 1; if (mAudioInput->GetMaxAvailableChannels(maxChannels) != 0) { return NS_ERROR_FAILURE; } // Check channelCount violation if (static_cast<int32_t>(maxChannels) < c.mChannelCount.mMin || static_cast<int32_t>(maxChannels) > c.mChannelCount.mMax) { *aOutBadConstraint = "channelCount"; return NS_ERROR_FAILURE; } // Clamp channelCount to a valid value if (prefs.mChannels <= 0) { prefs.mChannels = static_cast<int32_t>(maxChannels); } prefs.mChannels = c.mChannelCount.Get(std::min(prefs.mChannels, static_cast<int32_t>(maxChannels))); // Clamp channelCount to a valid value prefs.mChannels = std::max(1, std::min(prefs.mChannels, static_cast<int32_t>(maxChannels))); LOG(("Audio config: aec: %d, agc: %d, noise: %d, delay: %d, channels: %d", prefs.mAecOn ? prefs.mAec : -1, prefs.mAgcOn ? prefs.mAgc : -1, prefs.mNoiseOn ? prefs.mNoise : -1, prefs.mPlayoutDelay, prefs.mChannels)); mPlayoutDelay = prefs.mPlayoutDelay; switch (mState) { case kReleased: MOZ_ASSERT(aHandle); if (sChannelsOpen == 0) { if (!InitEngine()) { LOG(("Audio engine is not initalized")); return NS_ERROR_FAILURE; } } else { // Until we fix (or wallpaper) support for multiple mic input // (Bug 1238038) fail allocation for a second device return NS_ERROR_FAILURE; } if (mAudioInput->SetRecordingDevice(mCapIndex)) { return NS_ERROR_FAILURE; } mAudioInput->SetUserChannelCount(prefs.mChannels); if (!AllocChannel()) { FreeChannel(); LOG(("Audio device is not initalized")); return NS_ERROR_FAILURE; } LOG(("Audio device %d allocated", mCapIndex)); { // Update with the actual applied channelCount in order // to store it in settings. uint32_t channelCount = 0; mAudioInput->GetChannelCount(channelCount); MOZ_ASSERT(channelCount > 0); prefs.mChannels = channelCount; } break; case kStarted: if (prefs == mLastPrefs) { return NS_OK; } if (prefs.mChannels != mLastPrefs.mChannels) { MOZ_ASSERT(mSources.Length() > 0); auto& source = mSources.LastElement(); mAudioInput->SetUserChannelCount(prefs.mChannels); // Get validated number of channel uint32_t channelCount = 0; mAudioInput->GetChannelCount(channelCount); MOZ_ASSERT(channelCount > 0 && mLastPrefs.mChannels > 0); // Check if new validated channels is the same as previous if (static_cast<uint32_t>(mLastPrefs.mChannels) != channelCount && !source->OpenNewAudioCallbackDriver(mListener)) { return NS_ERROR_FAILURE; } // Update settings prefs.mChannels = channelCount; } if (MOZ_LOG_TEST(GetMediaManagerLog(), LogLevel::Debug)) { MonitorAutoLock lock(mMonitor); if (mSources.IsEmpty()) { LOG(("Audio device %d reallocated", mCapIndex)); } else { LOG(("Audio device %d allocated shared", mCapIndex)); } } break; default: LOG(("Audio device %d in ignored state %d", mCapIndex, mState)); break; } if (sChannelsOpen > 0) { int error; error = mVoEProcessing->SetEcStatus(prefs.mAecOn, (webrtc::EcModes)prefs.mAec); if (error) { LOG(("%s Error setting Echo Status: %d ",__FUNCTION__, error)); // Overhead of capturing all the time is very low (<0.1% of an audio only call) if (prefs.mAecOn) { error = mVoEProcessing->SetEcMetricsStatus(true); if (error) { LOG(("%s Error setting Echo Metrics: %d ",__FUNCTION__, error)); } } } error = mVoEProcessing->SetAgcStatus(prefs.mAgcOn, (webrtc::AgcModes)prefs.mAgc); if (error) { LOG(("%s Error setting AGC Status: %d ",__FUNCTION__, error)); } error = mVoEProcessing->SetNsStatus(prefs.mNoiseOn, (webrtc::NsModes)prefs.mNoise); if (error) { LOG(("%s Error setting NoiseSuppression Status: %d ",__FUNCTION__, error)); } } mSkipProcessing = !(prefs.mAecOn || prefs.mAgcOn || prefs.mNoiseOn); if (mSkipProcessing) { mSampleFrequency = MediaEngine::USE_GRAPH_RATE; mAudioOutputObserver = nullptr; } else { // make sure we route a copy of the mixed audio output of this MSG to the // AEC mAudioOutputObserver = new AudioOutputObserver(); } SetLastPrefs(prefs); return NS_OK; }
void MediaEngineWebRTCAudioSource::Process(int channel, webrtc::ProcessingTypes type, sample* audio10ms, int length, int samplingFreq, bool isStereo) { // On initial capture, throw away all far-end data except the most recent sample // since it's already irrelevant and we want to keep avoid confusing the AEC far-end // input code with "old" audio. if (!mStarted) { mStarted = true; while (gFarendObserver->Size() > 1) { moz_free(gFarendObserver->Pop()); // only call if size() > 0 } } while (gFarendObserver->Size() > 0) { FarEndAudioChunk *buffer = gFarendObserver->Pop(); // only call if size() > 0 if (buffer) { int length = buffer->mSamples; int res = mVoERender->ExternalPlayoutData(buffer->mData, gFarendObserver->PlayoutFrequency(), gFarendObserver->PlayoutChannels(), mPlayoutDelay, length); moz_free(buffer); if (res == -1) { return; } } } #ifdef PR_LOGGING mSamples += length; if (mSamples > samplingFreq) { mSamples %= samplingFreq; // just in case mSamples >> samplingFreq if (PR_LOG_TEST(GetMediaManagerLog(), PR_LOG_DEBUG)) { webrtc::EchoStatistics echo; mVoECallReport->GetEchoMetricSummary(echo); #define DUMP_STATVAL(x) (x).min, (x).max, (x).average LOG(("Echo: ERL: %d/%d/%d, ERLE: %d/%d/%d, RERL: %d/%d/%d, NLP: %d/%d/%d", DUMP_STATVAL(echo.erl), DUMP_STATVAL(echo.erle), DUMP_STATVAL(echo.rerl), DUMP_STATVAL(echo.a_nlp))); } } #endif MonitorAutoLock lock(mMonitor); if (mState != kStarted) return; uint32_t len = mSources.Length(); for (uint32_t i = 0; i < len; i++) { nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample)); sample* dest = static_cast<sample*>(buffer->Data()); memcpy(dest, audio10ms, length * sizeof(sample)); nsAutoPtr<AudioSegment> segment(new AudioSegment()); nsAutoTArray<const sample*,1> channels; channels.AppendElement(dest); segment->AppendFrames(buffer.forget(), channels, length); TimeStamp insertTime; segment->GetStartTime(insertTime); if (mSources[i]) { // Make sure we include the stream and the track. // The 0:1 is a flag to note when we've done the final insert for a given input block. LogTime(AsyncLatencyLogger::AudioTrackInsertion, LATENCY_STREAM_ID(mSources[i], mTrackID), (i+1 < len) ? 0 : 1, insertTime); // This is safe from any thread, and is safe if the track is Finished // or Destroyed. // Note: due to evil magic, the nsAutoPtr<AudioSegment>'s ownership transfers to // the Runnable (AutoPtr<> = AutoPtr<>) RUN_ON_THREAD(mThread, WrapRunnable(mSources[i], &SourceMediaStream::AppendToTrack, mTrackID, segment, (AudioSegment *) nullptr), NS_DISPATCH_NORMAL); } } return; }
nsresult MediaEngineWebRTCMicrophoneSource::UpdateSingleSource( const AllocationHandle* aHandle, const NormalizedConstraints& aNetConstraints, const MediaEnginePrefs& aPrefs, const nsString& aDeviceId, const char** aOutBadConstraint) { FlattenedConstraints c(aNetConstraints); MediaEnginePrefs prefs = aPrefs; prefs.mAecOn = c.mEchoCancellation.Get(prefs.mAecOn); prefs.mAgcOn = c.mAutoGainControl.Get(prefs.mAgcOn); prefs.mNoiseOn = c.mNoiseSuppression.Get(prefs.mNoiseOn); LOG(("Audio config: aec: %d, agc: %d, noise: %d, delay: %d", prefs.mAecOn ? prefs.mAec : -1, prefs.mAgcOn ? prefs.mAgc : -1, prefs.mNoiseOn ? prefs.mNoise : -1, prefs.mPlayoutDelay)); mPlayoutDelay = prefs.mPlayoutDelay; switch (mState) { case kReleased: MOZ_ASSERT(aHandle); if (sChannelsOpen == 0) { if (!InitEngine()) { LOG(("Audio engine is not initalized")); return NS_ERROR_FAILURE; } } else { // Until we fix (or wallpaper) support for multiple mic input // (Bug 1238038) fail allocation for a second device return NS_ERROR_FAILURE; } if (!AllocChannel()) { LOG(("Audio device is not initalized")); return NS_ERROR_FAILURE; } if (mAudioInput->SetRecordingDevice(mCapIndex)) { FreeChannel(); return NS_ERROR_FAILURE; } LOG(("Audio device %d allocated", mCapIndex)); break; case kStarted: if (prefs == mLastPrefs) { return NS_OK; } if (MOZ_LOG_TEST(GetMediaManagerLog(), LogLevel::Debug)) { MonitorAutoLock lock(mMonitor); if (mSources.IsEmpty()) { LOG(("Audio device %d reallocated", mCapIndex)); } else { LOG(("Audio device %d allocated shared", mCapIndex)); } } break; default: LOG(("Audio device %d in ignored state %d", mCapIndex, mState)); break; } if (sChannelsOpen > 0) { int error; error = mVoEProcessing->SetEcStatus(prefs.mAecOn, (webrtc::EcModes)prefs.mAec); if (error) { LOG(("%s Error setting Echo Status: %d ",__FUNCTION__, error)); // Overhead of capturing all the time is very low (<0.1% of an audio only call) if (prefs.mAecOn) { error = mVoEProcessing->SetEcMetricsStatus(true); if (error) { LOG(("%s Error setting Echo Metrics: %d ",__FUNCTION__, error)); } } } error = mVoEProcessing->SetAgcStatus(prefs.mAgcOn, (webrtc::AgcModes)prefs.mAgc); if (error) { LOG(("%s Error setting AGC Status: %d ",__FUNCTION__, error)); } error = mVoEProcessing->SetNsStatus(prefs.mNoiseOn, (webrtc::NsModes)prefs.mNoise); if (error) { LOG(("%s Error setting NoiseSuppression Status: %d ",__FUNCTION__, error)); } } mSkipProcessing = !(prefs.mAecOn || prefs.mAgcOn || prefs.mNoiseOn); if (mSkipProcessing) { mSampleFrequency = MediaEngine::USE_GRAPH_RATE; } SetLastPrefs(prefs); return NS_OK; }