bool MediaEngineWebRTCMicrophoneSource::AllocChannel() { MOZ_ASSERT(mVoEBase); mChannel = mVoEBase->CreateChannel(); if (mChannel >= 0) { if (!mVoENetwork->RegisterExternalTransport(mChannel, *mNullTransport)) { mSampleFrequency = MediaEngine::DEFAULT_SAMPLE_RATE; LOG(("%s: sampling rate %u", __FUNCTION__, mSampleFrequency)); // Check for availability. if (!mAudioInput->SetRecordingDevice(mCapIndex)) { // Because of the permission mechanism of B2G, we need to skip the status // check here. bool avail = false; mAudioInput->GetRecordingDeviceStatus(avail); if (!avail) { if (sChannelsOpen == 0) { DeInitEngine(); } return false; } // Set "codec" to PCM, 32kHz on device's channels ScopedCustomReleasePtr<webrtc::VoECodec> ptrVoECodec(webrtc::VoECodec::GetInterface(mVoiceEngine)); if (ptrVoECodec) { webrtc::CodecInst codec; strcpy(codec.plname, ENCODING); codec.channels = CHANNELS; uint32_t maxChannels = 0; if (mAudioInput->GetMaxAvailableChannels(maxChannels) == 0) { codec.channels = maxChannels; } MOZ_ASSERT(mSampleFrequency == 16000 || mSampleFrequency == 32000); codec.rate = SAMPLE_RATE(mSampleFrequency); codec.plfreq = mSampleFrequency; codec.pacsize = SAMPLE_LENGTH(mSampleFrequency); codec.pltype = 0; // Default payload type if (!ptrVoECodec->SetSendCodec(mChannel, codec)) { mState = kAllocated; sChannelsOpen++; return true; } } } } } mVoEBase->DeleteChannel(mChannel); mChannel = -1; if (sChannelsOpen == 0) { DeInitEngine(); } return false; }
nsresult MediaEngineWebRTCMicrophoneSource::Deallocate(AllocationHandle* aHandle) { AssertIsOnOwningThread(); Super::Deallocate(aHandle); if (!mRegisteredHandles.Length()) { // If empty, no callbacks to deliver data should be occuring if (mState != kStopped && mState != kAllocated) { return NS_ERROR_FAILURE; } FreeChannel(); mState = kReleased; LOG(("Audio device %d deallocated", mCapIndex)); MOZ_ASSERT(sChannelsOpen > 0); if (--sChannelsOpen == 0) { DeInitEngine(); } } else { LOG(("Audio device %d deallocated but still in use", mCapIndex)); } return NS_OK; }
bool VAppBase::AppDeInit() { if(!m_pAppImpl) { return true; } // Shutdown everything // Note that the application implementation gets deleted when Vision::Callbacks.OnEngineDeInit is triggered m_pAppImpl->DeInit(); DeInitEngine(); Vision::SetApplication(NULL); Vision::Shutdown(); return true; }
void MediaEngineWebRTCMicrophoneSource::Shutdown() { Super::Shutdown(); if (mListener) { // breaks a cycle, since the WebRTCAudioDataListener has a RefPtr to us mListener->Shutdown(); // Don't release the webrtc.org pointers yet until the Listener is (async) shutdown mListener = nullptr; } if (mState == kStarted) { SourceMediaStream *source; bool empty; while (1) { { MonitorAutoLock lock(mMonitor); empty = mSources.IsEmpty(); if (empty) { break; } source = mSources[0]; } Stop(source, kAudioTrack); // XXX change to support multiple tracks } MOZ_ASSERT(mState == kStopped); } while (mRegisteredHandles.Length()) { MOZ_ASSERT(mState == kAllocated || mState == kStopped); Deallocate(nullptr); // XXX Extend concurrent constraints code to mics. } if (mState != kReleased) { FreeChannel(); MOZ_ASSERT(sChannelsOpen > 0); if (--sChannelsOpen == 0) { DeInitEngine(); } } mAudioInput = nullptr; }
// This shuts down the engine when no channel is open. // mState records if a channel is allocated (slightly redundantly to mChannel) void MediaEngineWebRTCMicrophoneSource::FreeChannel() { if (mState != kReleased) { if (mChannel != -1) { MOZ_ASSERT(mVoENetwork && mVoEBase); if (mVoENetwork) { mVoENetwork->DeRegisterExternalTransport(mChannel); } if (mVoEBase) { mVoEBase->DeleteChannel(mChannel); } mChannel = -1; } mState = kReleased; MOZ_ASSERT(sChannelsOpen > 0); if (--sChannelsOpen == 0) { DeInitEngine(); } } }
bool MediaEngineWebRTCMicrophoneSource::InitEngine() { MOZ_ASSERT(!mVoEBase); mVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine); mVoEBase->Init(); mVoERender = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine); if (mVoERender) { mVoENetwork = webrtc::VoENetwork::GetInterface(mVoiceEngine); if (mVoENetwork) { mVoEProcessing = webrtc::VoEAudioProcessing::GetInterface(mVoiceEngine); if (mVoEProcessing) { mNullTransport = new NullTransport(); return true; } } } DeInitEngine(); return false; }
~AIEngineImpl() { DeInitEngine(); }
nsresult MediaEngineWebRTCMicrophoneSource::UpdateSingleSource( const AllocationHandle* aHandle, const NormalizedConstraints& aNetConstraints, const MediaEnginePrefs& aPrefs, const nsString& aDeviceId, const char** aOutBadConstraint) { FlattenedConstraints c(aNetConstraints); MediaEnginePrefs prefs = aPrefs; prefs.mAecOn = c.mEchoCancellation.Get(prefs.mAecOn); prefs.mAgcOn = c.mMozAutoGainControl.Get(prefs.mAgcOn); prefs.mNoiseOn = c.mMozNoiseSuppression.Get(prefs.mNoiseOn); LOG(("Audio config: aec: %d, agc: %d, noise: %d, delay: %d", prefs.mAecOn ? prefs.mAec : -1, prefs.mAgcOn ? prefs.mAgc : -1, prefs.mNoiseOn ? prefs.mNoise : -1, prefs.mPlayoutDelay)); mPlayoutDelay = prefs.mPlayoutDelay; switch (mState) { case kReleased: MOZ_ASSERT(aHandle); if (sChannelsOpen == 0) { if (!InitEngine()) { LOG(("Audio engine is not initalized")); return NS_ERROR_FAILURE; } } else { // Until we fix (or wallpaper) support for multiple mic input // (Bug 1238038) fail allocation for a second device return NS_ERROR_FAILURE; } if (!AllocChannel()) { if (sChannelsOpen == 0) { DeInitEngine(); } LOG(("Audio device is not initalized")); return NS_ERROR_FAILURE; } if (mAudioInput->SetRecordingDevice(mCapIndex)) { FreeChannel(); if (sChannelsOpen == 0) { DeInitEngine(); } return NS_ERROR_FAILURE; } sChannelsOpen++; mState = kAllocated; LOG(("Audio device %d allocated", mCapIndex)); break; case kStarted: if (prefs == mLastPrefs) { return NS_OK; } if (MOZ_LOG_TEST(GetMediaManagerLog(), LogLevel::Debug)) { MonitorAutoLock lock(mMonitor); if (mSources.IsEmpty()) { LOG(("Audio device %d reallocated", mCapIndex)); } else { LOG(("Audio device %d allocated shared", mCapIndex)); } } break; default: LOG(("Audio device %d %s in ignored state %d", mCapIndex, (aHandle? aHandle->mOrigin.get() : ""), mState)); break; } if (sChannelsOpen > 0) { int error; error = mVoEProcessing->SetEcStatus(prefs.mAecOn, (webrtc::EcModes)prefs.mAec); if (error) { LOG(("%s Error setting Echo Status: %d ",__FUNCTION__, error)); // Overhead of capturing all the time is very low (<0.1% of an audio only call) if (prefs.mAecOn) { error = mVoEProcessing->SetEcMetricsStatus(true); if (error) { LOG(("%s Error setting Echo Metrics: %d ",__FUNCTION__, error)); } } } error = mVoEProcessing->SetAgcStatus(prefs.mAgcOn, (webrtc::AgcModes)prefs.mAgc); if (error) { LOG(("%s Error setting AGC Status: %d ",__FUNCTION__, error)); } error = mVoEProcessing->SetNsStatus(prefs.mNoiseOn, (webrtc::NsModes)prefs.mNoise); if (error) { LOG(("%s Error setting NoiseSuppression Status: %d ",__FUNCTION__, error)); } } mSkipProcessing = !(prefs.mAecOn || prefs.mAgcOn || prefs.mNoiseOn); if (mSkipProcessing) { mSampleFrequency = MediaEngine::USE_GRAPH_RATE; } SetLastPrefs(prefs); return NS_OK; }