void MediaEngineGonkVideoSource::ChooseCapability(const VideoTrackConstraintsN& aConstraints, const MediaEnginePrefs& aPrefs) { return GuessCapability(aConstraints, aPrefs); }
void MediaEngineWebRTCVideoSource::ChooseCapability( const VideoTrackConstraintsN &aConstraints, const MediaEnginePrefs &aPrefs) { NS_ConvertUTF16toUTF8 uniqueId(mUniqueId); int num = mViECapture->NumberOfCapabilities(uniqueId.get(), kMaxUniqueIdLength); if (num <= 0) { // Mac doesn't support capabilities. return GuessCapability(aConstraints, aPrefs); } // The rest is the full algorithm for cameras that can list their capabilities. LOG(("ChooseCapability: prefs: %dx%d @%d-%dfps", aPrefs.mWidth, aPrefs.mHeight, aPrefs.mFPS, aPrefs.mMinFPS)); typedef nsTArray<uint8_t> SourceSet; SourceSet candidateSet; for (int i = 0; i < num; i++) { candidateSet.AppendElement(i); } // Pick among capabilities: First apply required constraints. for (uint32_t i = 0; i < candidateSet.Length();) { webrtc::CaptureCapability cap; mViECapture->GetCaptureCapability(uniqueId.get(), kMaxUniqueIdLength, candidateSet[i], cap); if (!SatisfyConstraintSet(aConstraints.mRequired, cap)) { candidateSet.RemoveElementAt(i); } else { ++i; } } SourceSet tailSet; // Then apply advanced (formerly known as optional) constraints. if (aConstraints.mAdvanced.WasPassed()) { auto &array = aConstraints.mAdvanced.Value(); for (uint32_t i = 0; i < array.Length(); i++) { SourceSet rejects; for (uint32_t j = 0; j < candidateSet.Length();) { webrtc::CaptureCapability cap; mViECapture->GetCaptureCapability(uniqueId.get(), kMaxUniqueIdLength, candidateSet[j], cap); if (!SatisfyConstraintSet(array[i], cap)) { rejects.AppendElement(candidateSet[j]); candidateSet.RemoveElementAt(j); } else { ++j; } } (candidateSet.Length()? tailSet : candidateSet).MoveElementsFrom(rejects); } } if (!candidateSet.Length()) { candidateSet.AppendElement(0); } int prefWidth = aPrefs.GetWidth(); int prefHeight = aPrefs.GetHeight(); // Default is closest to available capability but equal to or below; // otherwise closest above. Since we handle the num=0 case above and // take the first entry always, we can never exit uninitialized. webrtc::CaptureCapability cap; bool higher = true; for (uint32_t i = 0; i < candidateSet.Length(); i++) { mViECapture->GetCaptureCapability(NS_ConvertUTF16toUTF8(mUniqueId).get(), kMaxUniqueIdLength, candidateSet[i], cap); if (higher) { if (i == 0 || (mCapability.width > cap.width && mCapability.height > cap.height)) { // closer than the current choice mCapability = cap; // FIXME: expose expected capture delay? } if (cap.width <= (uint32_t) prefWidth && cap.height <= (uint32_t) prefHeight) { higher = false; } } else { if (cap.width > (uint32_t) prefWidth || cap.height > (uint32_t) prefHeight || cap.maxFPS < (uint32_t) aPrefs.mMinFPS) { continue; } if (mCapability.width < cap.width && mCapability.height < cap.height) { mCapability = cap; // FIXME: expose expected capture delay? } } // Same resolution, maybe better format or FPS match if (mCapability.width == cap.width && mCapability.height == cap.height) { // FPS too low if (cap.maxFPS < (uint32_t) aPrefs.mMinFPS) { continue; } // Better match if (cap.maxFPS < mCapability.maxFPS) { mCapability = cap; } else if (cap.maxFPS == mCapability.maxFPS) { // Resolution and FPS the same, check format if (cap.rawType == webrtc::RawVideoType::kVideoI420 || cap.rawType == webrtc::RawVideoType::kVideoYUY2 || cap.rawType == webrtc::RawVideoType::kVideoYV12) { mCapability = cap; } } } } LOG(("chose cap %dx%d @%dfps codec %d raw %d", mCapability.width, mCapability.height, mCapability.maxFPS, mCapability.codecType, mCapability.rawType)); }