void MediaEngineWebRTC::EnumerateVideoDevices(nsTArray<nsRefPtr<MediaEngineVideoSource> >* aVSources) { #ifdef MOZ_B2G_CAMERA MutexAutoLock lock(mMutex); /** * We still enumerate every time, in case a new device was plugged in since * the last call. TODO: Verify that WebRTC actually does deal with hotplugging * new devices (with or without new engine creation) and accordingly adjust. * Enumeration is not neccessary if GIPS reports the same set of devices * for a given instance of the engine. Likewise, if a device was plugged out, * mVideoSources must be updated. */ int num = 0; nsresult result; result = ICameraControl::GetNumberOfCameras(num); if (num <= 0 || result != NS_OK) { return; } for (int i = 0; i < num; i++) { nsCString cameraName; result = ICameraControl::GetCameraName(i, cameraName); if (result != NS_OK) { continue; } nsRefPtr<MediaEngineWebRTCVideoSource> vSource; NS_ConvertUTF8toUTF16 uuid(cameraName); if (mVideoSources.Get(uuid, getter_AddRefs(vSource))) { // We've already seen this device, just append. aVSources->AppendElement(vSource.get()); } else { vSource = new MediaEngineWebRTCVideoSource(i); mVideoSources.Put(uuid, vSource); // Hashtable takes ownership. aVSources->AppendElement(vSource); } } return; #else ScopedCustomReleasePtr<webrtc::ViEBase> ptrViEBase; ScopedCustomReleasePtr<webrtc::ViECapture> ptrViECapture; // We spawn threads to handle gUM runnables, so we must protect the member vars MutexAutoLock lock(mMutex); #ifdef MOZ_WIDGET_ANDROID jobject context = mozilla::AndroidBridge::Bridge()->GetGlobalContextRef(); // get the JVM JavaVM *jvm = mozilla::AndroidBridge::Bridge()->GetVM(); if (webrtc::VideoEngine::SetAndroidObjects(jvm, (void*)context) != 0) { LOG(("VieCapture:SetAndroidObjects Failed")); return; } #endif if (!mVideoEngine) { if (!(mVideoEngine = webrtc::VideoEngine::Create())) { return; } } PRLogModuleInfo *logs = GetWebRTCLogInfo(); if (!gWebrtcTraceLoggingOn && logs && logs->level > 0) { // no need to a critical section or lock here gWebrtcTraceLoggingOn = 1; const char *file = PR_GetEnv("WEBRTC_TRACE_FILE"); if (!file) { file = "WebRTC.log"; } LOG(("%s Logging webrtc to %s level %d", __FUNCTION__, file, logs->level)); mVideoEngine->SetTraceFilter(logs->level); mVideoEngine->SetTraceFile(file); } ptrViEBase = webrtc::ViEBase::GetInterface(mVideoEngine); if (!ptrViEBase) { return; } if (!mVideoEngineInit) { if (ptrViEBase->Init() < 0) { return; } mVideoEngineInit = true; } ptrViECapture = webrtc::ViECapture::GetInterface(mVideoEngine); if (!ptrViECapture) { return; } /** * We still enumerate every time, in case a new device was plugged in since * the last call. TODO: Verify that WebRTC actually does deal with hotplugging * new devices (with or without new engine creation) and accordingly adjust. * Enumeration is not neccessary if GIPS reports the same set of devices * for a given instance of the engine. Likewise, if a device was plugged out, * mVideoSources must be updated. */ int num = ptrViECapture->NumberOfCaptureDevices(); if (num <= 0) { return; } for (int i = 0; i < num; i++) { const unsigned int kMaxDeviceNameLength = 128; // XXX FIX! const unsigned int kMaxUniqueIdLength = 256; char deviceName[kMaxDeviceNameLength]; char uniqueId[kMaxUniqueIdLength]; // paranoia deviceName[0] = '\0'; uniqueId[0] = '\0'; int error = ptrViECapture->GetCaptureDevice(i, deviceName, sizeof(deviceName), uniqueId, sizeof(uniqueId)); if (error) { LOG((" VieCapture:GetCaptureDevice: Failed %d", ptrViEBase->LastError() )); continue; } #ifdef DEBUG LOG((" Capture Device Index %d, Name %s", i, deviceName)); webrtc::CaptureCapability cap; int numCaps = ptrViECapture->NumberOfCapabilities(uniqueId, kMaxUniqueIdLength); LOG(("Number of Capabilities %d", numCaps)); for (int j = 0; j < numCaps; j++) { if (ptrViECapture->GetCaptureCapability(uniqueId, kMaxUniqueIdLength, j, cap ) != 0 ) { break; } LOG(("type=%d width=%d height=%d maxFPS=%d", cap.rawType, cap.width, cap.height, cap.maxFPS )); } #endif if (uniqueId[0] == '\0') { // In case a device doesn't set uniqueId! strncpy(uniqueId, deviceName, sizeof(uniqueId)); uniqueId[sizeof(uniqueId)-1] = '\0'; // strncpy isn't safe } nsRefPtr<MediaEngineWebRTCVideoSource> vSource; NS_ConvertUTF8toUTF16 uuid(uniqueId); if (mVideoSources.Get(uuid, getter_AddRefs(vSource))) { // We've already seen this device, just append. aVSources->AppendElement(vSource.get()); } else { vSource = new MediaEngineWebRTCVideoSource(mVideoEngine, i); mVideoSources.Put(uuid, vSource); // Hashtable takes ownership. aVSources->AppendElement(vSource); } } if (mHasTabVideoSource) aVSources->AppendElement(new MediaEngineTabVideoSource()); return; #endif }
void MediaEngineWebRTC::EnumerateAudioDevices(nsTArray<nsRefPtr<MediaEngineAudioSource> >* aASources) { ScopedCustomReleasePtr<webrtc::VoEBase> ptrVoEBase; ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw; // We spawn threads to handle gUM runnables, so we must protect the member vars MutexAutoLock lock(mMutex); #ifdef MOZ_WIDGET_ANDROID jobject context = mozilla::AndroidBridge::Bridge()->GetGlobalContextRef(); // get the JVM JavaVM *jvm = mozilla::AndroidBridge::Bridge()->GetVM(); JNIEnv *env = GetJNIForThread(); if (webrtc::VoiceEngine::SetAndroidObjects(jvm, env, (void*)context) != 0) { LOG(("VoiceEngine:SetAndroidObjects Failed")); return; } #endif if (!mVoiceEngine) { mVoiceEngine = webrtc::VoiceEngine::Create(); if (!mVoiceEngine) { return; } } PRLogModuleInfo *logs = GetWebRTCLogInfo(); if (!gWebrtcTraceLoggingOn && logs && logs->level > 0) { // no need to a critical section or lock here gWebrtcTraceLoggingOn = 1; const char *file = PR_GetEnv("WEBRTC_TRACE_FILE"); if (!file) { file = "WebRTC.log"; } LOG(("Logging webrtc to %s level %d", __FUNCTION__, file, logs->level)); mVoiceEngine->SetTraceFilter(logs->level); mVoiceEngine->SetTraceFile(file); } ptrVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine); if (!ptrVoEBase) { return; } if (!mAudioEngineInit) { if (ptrVoEBase->Init() < 0) { return; } mAudioEngineInit = true; } ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine); if (!ptrVoEHw) { return; } int nDevices = 0; ptrVoEHw->GetNumOfRecordingDevices(nDevices); for (int i = 0; i < nDevices; i++) { // We use constants here because GetRecordingDeviceName takes char[128]. char deviceName[128]; char uniqueId[128]; // paranoia; jingle doesn't bother with this deviceName[0] = '\0'; uniqueId[0] = '\0'; int error = ptrVoEHw->GetRecordingDeviceName(i, deviceName, uniqueId); if (error) { LOG((" VoEHardware:GetRecordingDeviceName: Failed %d", ptrVoEBase->LastError() )); continue; } if (uniqueId[0] == '\0') { // Mac and Linux don't set uniqueId! MOZ_ASSERT(sizeof(deviceName) == sizeof(uniqueId)); // total paranoia strcpy(uniqueId,deviceName); // safe given assert and initialization/error-check } nsRefPtr<MediaEngineWebRTCAudioSource> aSource; NS_ConvertUTF8toUTF16 uuid(uniqueId); if (mAudioSources.Get(uuid, getter_AddRefs(aSource))) { // We've already seen this device, just append. aASources->AppendElement(aSource.get()); } else { aSource = new MediaEngineWebRTCAudioSource( mVoiceEngine, i, deviceName, uniqueId ); mAudioSources.Put(uuid, aSource); // Hashtable takes ownership. aASources->AppendElement(aSource); } } }
void MediaEngineWebRTC::EnumerateVideoDevices(MediaSourceType aMediaSource, nsTArray<nsRefPtr<MediaEngineVideoSource> >* aVSources) { // We spawn threads to handle gUM runnables, so we must protect the member vars MutexAutoLock lock(mMutex); #ifdef MOZ_B2G_CAMERA if (aMediaSource != MediaSourceType::Camera) { // only supports camera sources return; } /** * We still enumerate every time, in case a new device was plugged in since * the last call. TODO: Verify that WebRTC actually does deal with hotplugging * new devices (with or without new engine creation) and accordingly adjust. * Enumeration is not neccessary if GIPS reports the same set of devices * for a given instance of the engine. Likewise, if a device was plugged out, * mVideoSources must be updated. */ int num = 0; nsresult result; result = ICameraControl::GetNumberOfCameras(num); if (num <= 0 || result != NS_OK) { return; } for (int i = 0; i < num; i++) { nsCString cameraName; result = ICameraControl::GetCameraName(i, cameraName); if (result != NS_OK) { continue; } nsRefPtr<MediaEngineWebRTCVideoSource> vSource; NS_ConvertUTF8toUTF16 uuid(cameraName); if (mVideoSources.Get(uuid, getter_AddRefs(vSource))) { // We've already seen this device, just append. aVSources->AppendElement(vSource.get()); } else { vSource = new MediaEngineWebRTCVideoSource(i, aMediaSource); mVideoSources.Put(uuid, vSource); // Hashtable takes ownership. aVSources->AppendElement(vSource); } } return; #else ScopedCustomReleasePtr<webrtc::ViEBase> ptrViEBase; ScopedCustomReleasePtr<webrtc::ViECapture> ptrViECapture; webrtc::Config configSet; webrtc::VideoEngine *videoEngine = nullptr; bool *videoEngineInit = nullptr; #ifdef MOZ_WIDGET_ANDROID // get the JVM JavaVM *jvm = mozilla::AndroidBridge::Bridge()->GetVM(); if (webrtc::VideoEngine::SetAndroidObjects(jvm) != 0) { LOG(("VieCapture:SetAndroidObjects Failed")); return; } #endif switch (aMediaSource) { case MediaSourceType::Window: mWinEngineConfig.Set<webrtc::CaptureDeviceInfo>( new webrtc::CaptureDeviceInfo(webrtc::CaptureDeviceType::Window)); if (!mWinEngine) { if (!(mWinEngine = webrtc::VideoEngine::Create(mWinEngineConfig))) { return; } } videoEngine = mWinEngine; videoEngineInit = &mWinEngineInit; break; case MediaSourceType::Application: mAppEngineConfig.Set<webrtc::CaptureDeviceInfo>( new webrtc::CaptureDeviceInfo(webrtc::CaptureDeviceType::Application)); if (!mAppEngine) { if (!(mAppEngine = webrtc::VideoEngine::Create(mAppEngineConfig))) { return; } } videoEngine = mAppEngine; videoEngineInit = &mAppEngineInit; break; case MediaSourceType::Screen: mScreenEngineConfig.Set<webrtc::CaptureDeviceInfo>( new webrtc::CaptureDeviceInfo(webrtc::CaptureDeviceType::Screen)); if (!mScreenEngine) { if (!(mScreenEngine = webrtc::VideoEngine::Create(mScreenEngineConfig))) { return; } } videoEngine = mScreenEngine; videoEngineInit = &mScreenEngineInit; break; case MediaSourceType::Browser: mBrowserEngineConfig.Set<webrtc::CaptureDeviceInfo>( new webrtc::CaptureDeviceInfo(webrtc::CaptureDeviceType::Browser)); if (!mBrowserEngine) { if (!(mBrowserEngine = webrtc::VideoEngine::Create(mBrowserEngineConfig))) { return; } } videoEngine = mBrowserEngine; videoEngineInit = &mBrowserEngineInit; break; case MediaSourceType::Camera: // fall through default: if (!mVideoEngine) { if (!(mVideoEngine = webrtc::VideoEngine::Create())) { return; } } videoEngine = mVideoEngine; videoEngineInit = &mVideoEngineInit; break; } ptrViEBase = webrtc::ViEBase::GetInterface(videoEngine); if (!ptrViEBase) { return; } if (ptrViEBase->Init() < 0) { return; } *videoEngineInit = true; ptrViECapture = webrtc::ViECapture::GetInterface(videoEngine); if (!ptrViECapture) { return; } /** * We still enumerate every time, in case a new device was plugged in since * the last call. TODO: Verify that WebRTC actually does deal with hotplugging * new devices (with or without new engine creation) and accordingly adjust. * Enumeration is not neccessary if GIPS reports the same set of devices * for a given instance of the engine. Likewise, if a device was plugged out, * mVideoSources must be updated. */ int num = ptrViECapture->NumberOfCaptureDevices(); if (num <= 0) { return; } for (int i = 0; i < num; i++) { char deviceName[MediaEngineSource::kMaxDeviceNameLength]; char uniqueId[MediaEngineSource::kMaxUniqueIdLength]; // paranoia deviceName[0] = '\0'; uniqueId[0] = '\0'; int error = ptrViECapture->GetCaptureDevice(i, deviceName, sizeof(deviceName), uniqueId, sizeof(uniqueId)); if (error) { LOG((" VieCapture:GetCaptureDevice: Failed %d", ptrViEBase->LastError() )); continue; } #ifdef DEBUG LOG((" Capture Device Index %d, Name %s", i, deviceName)); webrtc::CaptureCapability cap; int numCaps = ptrViECapture->NumberOfCapabilities(uniqueId, MediaEngineSource::kMaxUniqueIdLength); LOG(("Number of Capabilities %d", numCaps)); for (int j = 0; j < numCaps; j++) { if (ptrViECapture->GetCaptureCapability(uniqueId, MediaEngineSource::kMaxUniqueIdLength, j, cap ) != 0 ) { break; } LOG(("type=%d width=%d height=%d maxFPS=%d", cap.rawType, cap.width, cap.height, cap.maxFPS )); } #endif if (uniqueId[0] == '\0') { // In case a device doesn't set uniqueId! strncpy(uniqueId, deviceName, sizeof(uniqueId)); uniqueId[sizeof(uniqueId)-1] = '\0'; // strncpy isn't safe } nsRefPtr<MediaEngineWebRTCVideoSource> vSource; NS_ConvertUTF8toUTF16 uuid(uniqueId); if (mVideoSources.Get(uuid, getter_AddRefs(vSource))) { // We've already seen this device, just refresh and append. vSource->Refresh(i); aVSources->AppendElement(vSource.get()); } else { vSource = new MediaEngineWebRTCVideoSource(videoEngine, i, aMediaSource); mVideoSources.Put(uuid, vSource); // Hashtable takes ownership. aVSources->AppendElement(vSource); } } if (mHasTabVideoSource || MediaSourceType::Browser == aMediaSource) aVSources->AppendElement(new MediaEngineTabVideoSource()); return; #endif }
void MediaEngineWebRTC::EnumerateAudioDevices(dom::MediaSourceEnum aMediaSource, nsTArray<RefPtr<MediaEngineAudioSource> >* aASources) { ScopedCustomReleasePtr<webrtc::VoEBase> ptrVoEBase; // We spawn threads to handle gUM runnables, so we must protect the member vars MutexAutoLock lock(mMutex); if (aMediaSource == dom::MediaSourceEnum::AudioCapture) { RefPtr<MediaEngineWebRTCAudioCaptureSource> audioCaptureSource = new MediaEngineWebRTCAudioCaptureSource(nullptr); aASources->AppendElement(audioCaptureSource); return; } #ifdef MOZ_WIDGET_ANDROID jobject context = mozilla::AndroidBridge::Bridge()->GetGlobalContextRef(); // get the JVM JavaVM* jvm; JNIEnv* const env = jni::GetEnvForThread(); MOZ_ALWAYS_TRUE(!env->GetJavaVM(&jvm)); if (webrtc::VoiceEngine::SetAndroidObjects(jvm, (void*)context) != 0) { LOG(("VoiceEngine:SetAndroidObjects Failed")); return; } #endif if (!mVoiceEngine) { mConfig.Set<webrtc::ExtendedFilter>(new webrtc::ExtendedFilter(mExtendedFilter)); mConfig.Set<webrtc::DelayAgnostic>(new webrtc::DelayAgnostic(mDelayAgnostic)); mVoiceEngine = webrtc::VoiceEngine::Create(mConfig); if (!mVoiceEngine) { return; } } ptrVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine); if (!ptrVoEBase) { return; } // Always re-init the voice engine, since if we close the last use we // DeInitEngine() and Terminate(), which shuts down Process() - but means // we have to Init() again before using it. Init() when already inited is // just a no-op, so call always. if (ptrVoEBase->Init() < 0) { return; } if (!mAudioInput) { if (SupportsDuplex()) { // The platform_supports_full_duplex. mAudioInput = new mozilla::AudioInputCubeb(mVoiceEngine); } else { mAudioInput = new mozilla::AudioInputWebRTC(mVoiceEngine); } } int nDevices = 0; mAudioInput->GetNumOfRecordingDevices(nDevices); int i; #if defined(MOZ_WIDGET_ANDROID) || defined(MOZ_WIDGET_GONK) i = 0; // Bug 1037025 - let the OS handle defaulting for now on android/b2g #else // -1 is "default communications device" depending on OS in webrtc.org code i = -1; #endif for (; i < nDevices; i++) { // We use constants here because GetRecordingDeviceName takes char[128]. char deviceName[128]; char uniqueId[128]; // paranoia; jingle doesn't bother with this deviceName[0] = '\0'; uniqueId[0] = '\0'; int error = mAudioInput->GetRecordingDeviceName(i, deviceName, uniqueId); if (error) { LOG((" VoEHardware:GetRecordingDeviceName: Failed %d", error)); continue; } if (uniqueId[0] == '\0') { // Mac and Linux don't set uniqueId! MOZ_ASSERT(sizeof(deviceName) == sizeof(uniqueId)); // total paranoia strcpy(uniqueId, deviceName); // safe given assert and initialization/error-check } RefPtr<MediaEngineAudioSource> aSource; NS_ConvertUTF8toUTF16 uuid(uniqueId); if (mAudioSources.Get(uuid, getter_AddRefs(aSource))) { // We've already seen this device, just append. aASources->AppendElement(aSource.get()); } else { AudioInput* audioinput = mAudioInput; if (SupportsDuplex()) { // The platform_supports_full_duplex. // For cubeb, it has state (the selected ID) // XXX just use the uniqueID for cubeb and support it everywhere, and get rid of this // XXX Small window where the device list/index could change! audioinput = new mozilla::AudioInputCubeb(mVoiceEngine, i); } aSource = new MediaEngineWebRTCMicrophoneSource(mVoiceEngine, audioinput, i, deviceName, uniqueId); mAudioSources.Put(uuid, aSource); // Hashtable takes ownership. aASources->AppendElement(aSource); } } }
void MediaEngineWebRTC::EnumerateAudioDevices(MediaSourceType aMediaSource, nsTArray<nsRefPtr<MediaEngineAudioSource> >* aASources) { ScopedCustomReleasePtr<webrtc::VoEBase> ptrVoEBase; ScopedCustomReleasePtr<webrtc::VoEHardware> ptrVoEHw; // We spawn threads to handle gUM runnables, so we must protect the member vars MutexAutoLock lock(mMutex); #ifdef MOZ_WIDGET_ANDROID jobject context = mozilla::AndroidBridge::Bridge()->GetGlobalContextRef(); // get the JVM JavaVM *jvm = mozilla::AndroidBridge::Bridge()->GetVM(); JNIEnv *env = GetJNIForThread(); if (webrtc::VoiceEngine::SetAndroidObjects(jvm, env, (void*)context) != 0) { LOG(("VoiceEngine:SetAndroidObjects Failed")); return; } #endif if (!mVoiceEngine) { mVoiceEngine = webrtc::VoiceEngine::Create(); if (!mVoiceEngine) { return; } } ptrVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine); if (!ptrVoEBase) { return; } if (!mAudioEngineInit) { if (ptrVoEBase->Init() < 0) { return; } mAudioEngineInit = true; } ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine); if (!ptrVoEHw) { return; } int nDevices = 0; ptrVoEHw->GetNumOfRecordingDevices(nDevices); int i; #if defined(MOZ_WIDGET_ANDROID) || defined(MOZ_WIDGET_GONK) i = 0; // Bug 1037025 - let the OS handle defaulting for now on android/b2g #else // -1 is "default communications device" depending on OS in webrtc.org code i = -1; #endif for (; i < nDevices; i++) { // We use constants here because GetRecordingDeviceName takes char[128]. char deviceName[128]; char uniqueId[128]; // paranoia; jingle doesn't bother with this deviceName[0] = '\0'; uniqueId[0] = '\0'; int error = ptrVoEHw->GetRecordingDeviceName(i, deviceName, uniqueId); if (error) { LOG((" VoEHardware:GetRecordingDeviceName: Failed %d", ptrVoEBase->LastError() )); continue; } if (uniqueId[0] == '\0') { // Mac and Linux don't set uniqueId! MOZ_ASSERT(sizeof(deviceName) == sizeof(uniqueId)); // total paranoia strcpy(uniqueId,deviceName); // safe given assert and initialization/error-check } nsRefPtr<MediaEngineWebRTCAudioSource> aSource; NS_ConvertUTF8toUTF16 uuid(uniqueId); if (mAudioSources.Get(uuid, getter_AddRefs(aSource))) { // We've already seen this device, just append. aASources->AppendElement(aSource.get()); } else { aSource = new MediaEngineWebRTCAudioSource( mThread, mVoiceEngine, i, deviceName, uniqueId ); mAudioSources.Put(uuid, aSource); // Hashtable takes ownership. aASources->AppendElement(aSource); } } }