MediaConduitErrorCode WebrtcVideoConduit::ConfigureRecvMediaCodecs( const std::vector<VideoCodecConfig* >& codecConfigList) { CSFLogDebug(logTag, "%s ", __FUNCTION__); MediaConduitErrorCode condError = kMediaConduitNoError; bool success = false; std::string payloadName; condError = StopReceiving(); if (condError != kMediaConduitNoError) { return condError; } if(codecConfigList.empty()) { CSFLogError(logTag, "%s Zero number of codecs to configure", __FUNCTION__); return kMediaConduitMalformedArgument; } webrtc::ViEKeyFrameRequestMethod kf_request = webrtc::kViEKeyFrameRequestNone; bool use_nack_basic = false; //Try Applying the codecs in the list // we treat as success if atleast one codec was applied and reception was // started successfully. for(std::vector<VideoCodecConfig*>::size_type i=0;i < codecConfigList.size();i++) { //if the codec param is invalid or diplicate, return error if((condError = ValidateCodecConfig(codecConfigList[i],false)) != kMediaConduitNoError) { return condError; } // Check for the keyframe request type: PLI is preferred // over FIR, and FIR is preferred over none. if (codecConfigList[i]->RtcpFbNackIsSet("pli")) { kf_request = webrtc::kViEKeyFrameRequestPliRtcp; } else if(kf_request == webrtc::kViEKeyFrameRequestNone && codecConfigList[i]->RtcpFbCcmIsSet("fir")) { kf_request = webrtc::kViEKeyFrameRequestFirRtcp; } // Check whether NACK is requested if(codecConfigList[i]->RtcpFbNackIsSet("")) { use_nack_basic = true; } webrtc::VideoCodec video_codec; memset(&video_codec, 0, sizeof(webrtc::VideoCodec)); if (mExternalRecvCodec && codecConfigList[i]->mType == mExternalRecvCodec->mType) { CSFLogError(logTag, "%s Configuring External H264 Receive Codec", __FUNCTION__); // XXX Do we need a separate setting for receive maxbitrate? Is it // different for hardware codecs? For now assume symmetry. CodecConfigToWebRTCCodec(codecConfigList[i], video_codec); // values SetReceiveCodec() cares about are name, type, maxbitrate if(mPtrViECodec->SetReceiveCodec(mChannel,video_codec) == -1) { CSFLogError(logTag, "%s Invalid Receive Codec %d ", __FUNCTION__, mPtrViEBase->LastError()); } else { CSFLogError(logTag, "%s Successfully Set the codec %s", __FUNCTION__, codecConfigList[i]->mName.c_str()); if(CopyCodecToDB(codecConfigList[i])) { success = true; } else { CSFLogError(logTag,"%s Unable to update Codec Database", __FUNCTION__); return kMediaConduitUnknownError; } } } else { //Retrieve pre-populated codec structure for our codec. for(int idx=0; idx < mPtrViECodec->NumberOfCodecs(); idx++) { if(mPtrViECodec->GetCodec(idx, video_codec) == 0) { payloadName = video_codec.plName; if(codecConfigList[i]->mName.compare(payloadName) == 0) { CodecConfigToWebRTCCodec(codecConfigList[i], video_codec); if(mPtrViECodec->SetReceiveCodec(mChannel,video_codec) == -1) { CSFLogError(logTag, "%s Invalid Receive Codec %d ", __FUNCTION__, mPtrViEBase->LastError()); } else { CSFLogError(logTag, "%s Successfully Set the codec %s", __FUNCTION__, codecConfigList[i]->mName.c_str()); if(CopyCodecToDB(codecConfigList[i])) { success = true; } else { CSFLogError(logTag,"%s Unable to update Codec Database", __FUNCTION__); return kMediaConduitUnknownError; } } break; //we found a match } } }//end for codeclist } }//end for if(!success) { CSFLogError(logTag, "%s Setting Receive Codec Failed ", __FUNCTION__); return kMediaConduitInvalidReceiveCodec; } if (!mVideoCodecStat) { mVideoCodecStat = new VideoCodecStatistics(mChannel, mPtrViECodec, false); } // XXX Currently, we gather up all of the feedback types that the remote // party indicated it supports for all video codecs and configure the entire // conduit based on those capabilities. This is technically out of spec, // as these values should be configured on a per-codec basis. However, // the video engine only provides this API on a per-conduit basis, so that's // how we have to do it. The approach of considering the remote capablities // for the entire conduit to be a union of all remote codec capabilities // (rather than the more conservative approach of using an intersection) // is made to provide as many feedback mechanisms as are likely to be // processed by the remote party (and should be relatively safe, since the // remote party is required to ignore feedback types that it does not // understand). // // Note that our configuration uses this union of remote capabilites as // input to the configuration. It is not isomorphic to the configuration. // For example, it only makes sense to have one frame request mechanism // active at a time; so, if the remote party indicates more than one // supported mechanism, we're only configuring the one we most prefer. // // See http://code.google.com/p/webrtc/issues/detail?id=2331 if (kf_request != webrtc::kViEKeyFrameRequestNone) { CSFLogDebug(logTag, "Enabling %s frame requests for video stream\n", (kf_request == webrtc::kViEKeyFrameRequestPliRtcp ? "PLI" : "FIR")); if(mPtrRTP->SetKeyFrameRequestMethod(mChannel, kf_request) != 0) { CSFLogError(logTag, "%s KeyFrameRequest Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitKeyFrameRequestError; } } switch (kf_request) { case webrtc::kViEKeyFrameRequestNone: mFrameRequestMethod = FrameRequestNone; break; case webrtc::kViEKeyFrameRequestPliRtcp: mFrameRequestMethod = FrameRequestPli; break; case webrtc::kViEKeyFrameRequestFirRtcp: mFrameRequestMethod = FrameRequestFir; break; default: MOZ_ASSERT(PR_FALSE); mFrameRequestMethod = FrameRequestUnknown; } if(use_nack_basic) { CSFLogDebug(logTag, "Enabling NACK (recv) for video stream\n"); if (mPtrRTP->SetNACKStatus(mChannel, true) != 0) { CSFLogError(logTag, "%s NACKStatus Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitNACKStatusError; } } mUsingNackBasic = use_nack_basic; condError = StartReceiving(); if (condError != kMediaConduitNoError) { return condError; } // by now we should be successfully started the reception mPtrRTP->SetRembStatus(mChannel, false, true); DumpCodecDB(); return kMediaConduitNoError; }
MediaConduitErrorCode WebrtcAudioConduit::ConfigureSendMediaCodec(const AudioCodecConfig* codecConfig) { CSFLogDebug(logTag, "%s ", __FUNCTION__); MediaConduitErrorCode condError = kMediaConduitNoError; int error = 0;//webrtc engine errors webrtc::CodecInst cinst; //validate codec param if((condError = ValidateCodecConfig(codecConfig, true)) != kMediaConduitNoError) { return condError; } //are we transmitting already, stop and apply the send codec if(mEngineTransmitting) { CSFLogDebug(logTag, "%s Engine Already Sending. Attemping to Stop ", __FUNCTION__); if(mPtrVoEBase->StopSend(mChannel) == -1) { CSFLogError(logTag, "%s StopSend() Failed %d ", __FUNCTION__, mPtrVoEBase->LastError()); return kMediaConduitUnknownError; } } mEngineTransmitting = false; if(!CodecConfigToWebRTCCodec(codecConfig,cinst)) { CSFLogError(logTag,"%s CodecConfig to WebRTC Codec Failed ",__FUNCTION__); return kMediaConduitMalformedArgument; } if(mPtrVoECodec->SetSendCodec(mChannel, cinst) == -1) { error = mPtrVoEBase->LastError(); CSFLogError(logTag, "%s SetSendCodec - Invalid Codec %d ",__FUNCTION__, error); if(error == VE_CANNOT_SET_SEND_CODEC || error == VE_CODEC_ERROR) { return kMediaConduitInvalidSendCodec; } return kMediaConduitUnknownError; } // TEMPORARY - see bug 694814 comment 2 nsresult rv; nsCOMPtr<nsIPrefService> prefs = do_GetService("@mozilla.org/preferences-service;1", &rv); if (NS_SUCCEEDED(rv)) { nsCOMPtr<nsIPrefBranch> branch = do_QueryInterface(prefs); if (branch) { int32_t aec = 0; // 0 == unchanged bool aec_on = false; branch->GetBoolPref("media.peerconnection.aec_enabled", &aec_on); branch->GetIntPref("media.peerconnection.aec", &aec); CSFLogDebug(logTag,"Audio config: aec: %d", aec_on ? aec : -1); mEchoOn = aec_on; if (static_cast<webrtc::EcModes>(aec) != webrtc::kEcUnchanged) mEchoCancel = static_cast<webrtc::EcModes>(aec); branch->GetIntPref("media.peerconnection.capture_delay", &mCaptureDelay); } } if (0 != (error = mPtrVoEProcessing->SetEcStatus(mEchoOn, mEchoCancel))) { CSFLogError(logTag,"%s Error setting EVStatus: %d ",__FUNCTION__, error); return kMediaConduitUnknownError; } //Let's Send Transport State-machine on the Engine if(mPtrVoEBase->StartSend(mChannel) == -1) { error = mPtrVoEBase->LastError(); CSFLogError(logTag, "%s StartSend failed %d", __FUNCTION__, error); return kMediaConduitUnknownError; } //Copy the applied config for future reference. delete mCurSendCodecConfig; mCurSendCodecConfig = new AudioCodecConfig(codecConfig->mType, codecConfig->mName, codecConfig->mFreq, codecConfig->mPacSize, codecConfig->mChannels, codecConfig->mRate); mEngineTransmitting = true; return kMediaConduitNoError; }
MediaConduitErrorCode WebrtcAudioConduit::ConfigureSendMediaCodec(const AudioCodecConfig* codecConfig) { CSFLogDebug(logTag, "%s ", __FUNCTION__); MediaConduitErrorCode condError = kMediaConduitNoError; int error = 0;//webrtc engine errors webrtc::CodecInst cinst; //validate codec param if((condError = ValidateCodecConfig(codecConfig, true)) != kMediaConduitNoError) { return condError; } //are we transmitting already, stop and apply the send codec if(mEngineTransmitting) { CSFLogDebug(logTag, "%s Engine Already Sending. Attemping to Stop ", __FUNCTION__); if(mPtrVoEBase->StopSend(mChannel) == -1) { CSFLogError(logTag, "%s StopSend() Failed %d ", __FUNCTION__, mPtrVoEBase->LastError()); return kMediaConduitUnknownError; } } mEngineTransmitting = false; if(!CodecConfigToWebRTCCodec(codecConfig,cinst)) { CSFLogError(logTag,"%s CodecConfig to WebRTC Codec Failed ",__FUNCTION__); return kMediaConduitMalformedArgument; } if(mPtrVoECodec->SetSendCodec(mChannel, cinst) == -1) { error = mPtrVoEBase->LastError(); CSFLogError(logTag, "%s SetSendCodec - Invalid Codec %d ",__FUNCTION__, error); if(error == VE_CANNOT_SET_SEND_CODEC || error == VE_CODEC_ERROR) { return kMediaConduitInvalidSendCodec; } return kMediaConduitUnknownError; } //Let's Send Transport State-machine on the Engine if(mPtrVoEBase->StartSend(mChannel) == -1) { error = mPtrVoEBase->LastError(); CSFLogError(logTag, "%s StartSend failed %d", __FUNCTION__, error); return kMediaConduitUnknownError; } //Copy the applied config for future reference. delete mCurSendCodecConfig; mCurSendCodecConfig = new AudioCodecConfig(codecConfig->mType, codecConfig->mName, codecConfig->mFreq, codecConfig->mPacSize, codecConfig->mChannels, codecConfig->mRate); mEngineTransmitting = true; return kMediaConduitNoError; }
/** * Note: Setting the send-codec on the Video Engine will restart the encoder, * sets up new SSRC and reset RTP_RTCP module with the new codec setting. */ MediaConduitErrorCode WebrtcVideoConduit::ConfigureSendMediaCodec(const VideoCodecConfig* codecConfig) { CSFLogDebug(logTag, "%s ", __FUNCTION__); bool codecFound = false; MediaConduitErrorCode condError = kMediaConduitNoError; int error = 0; //webrtc engine errors webrtc::VideoCodec video_codec; std::string payloadName; //validate basic params if((condError = ValidateCodecConfig(codecConfig,true)) != kMediaConduitNoError) { return condError; } //Check if we have same codec already applied if(CheckCodecsForMatch(mCurSendCodecConfig, codecConfig)) { CSFLogDebug(logTag, "%s Codec has been applied already ", __FUNCTION__); return kMediaConduitCodecInUse; } //transmitting already ? if(mEngineTransmitting) { CSFLogDebug(logTag, "%s Engine Already Sending. Attemping to Stop ", __FUNCTION__); if(mPtrViEBase->StopSend(mChannel) == -1) { CSFLogError(logTag, "%s StopSend() Failed %d ",__FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitUnknownError; } } mEngineTransmitting = false; if (codecConfig->mLoadManager) { mPtrViEBase->RegisterCpuOveruseObserver(mChannel, codecConfig->mLoadManager); mPtrViEBase->SetLoadManager(codecConfig->mLoadManager); } // we should be good here to set the new codec. for(int idx=0; idx < mPtrViECodec->NumberOfCodecs(); idx++) { if(0 == mPtrViECodec->GetCodec(idx, video_codec)) { payloadName = video_codec.plName; if(codecConfig->mName.compare(payloadName) == 0) { CodecConfigToWebRTCCodec(codecConfig, video_codec); codecFound = true; break; } } }//for if(codecFound == false) { CSFLogError(logTag, "%s Codec Mismatch ", __FUNCTION__); return kMediaConduitInvalidSendCodec; } if(mPtrViECodec->SetSendCodec(mChannel, video_codec) == -1) { error = mPtrViEBase->LastError(); if(error == kViECodecInvalidCodec) { CSFLogError(logTag, "%s Invalid Send Codec", __FUNCTION__); return kMediaConduitInvalidSendCodec; } CSFLogError(logTag, "%s SetSendCodec Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitUnknownError; } mSendingWidth = 0; mSendingHeight = 0; if(codecConfig->RtcpFbIsSet(SDP_RTCP_FB_NACK_BASIC)) { CSFLogDebug(logTag, "Enabling NACK (send) for video stream\n"); if (mPtrRTP->SetNACKStatus(mChannel, true) != 0) { CSFLogError(logTag, "%s NACKStatus Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitNACKStatusError; } } if(mPtrViEBase->StartSend(mChannel) == -1) { CSFLogError(logTag, "%s Start Send Error %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitUnknownError; } //Copy the applied config for future reference. delete mCurSendCodecConfig; mCurSendCodecConfig = new VideoCodecConfig(*codecConfig); mPtrRTP->SetRembStatus(mChannel, true, false); // by now we should be successfully started the transmission mEngineTransmitting = true; return kMediaConduitNoError; }
MediaConduitErrorCode WebrtcVideoConduit::ConfigureRecvMediaCodecs( const std::vector<VideoCodecConfig* >& codecConfigList) { CSFLogDebug(logTag, "%s ", __FUNCTION__); MediaConduitErrorCode condError = kMediaConduitNoError; int error = 0; //webrtc engine errors bool success = false; std::string payloadName; // are we receiving already? If so, stop receiving and playout // since we can't apply new recv codec when the engine is playing. if(mEngineReceiving) { CSFLogDebug(logTag, "%s Engine Already Receiving . Attemping to Stop ", __FUNCTION__); if(mPtrViEBase->StopReceive(mChannel) == -1) { error = mPtrViEBase->LastError(); if(error == kViEBaseUnknownError) { CSFLogDebug(logTag, "%s StopReceive() Success ", __FUNCTION__); mEngineReceiving = false; } else { CSFLogError(logTag, "%s StopReceive() Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitUnknownError; } } } mEngineReceiving = false; if(codecConfigList.empty()) { CSFLogError(logTag, "%s Zero number of codecs to configure", __FUNCTION__); return kMediaConduitMalformedArgument; } webrtc::ViEKeyFrameRequestMethod kf_request = webrtc::kViEKeyFrameRequestNone; bool use_nack_basic = false; //Try Applying the codecs in the list // we treat as success if atleast one codec was applied and reception was // started successfully. for(std::vector<VideoCodecConfig*>::size_type i=0;i < codecConfigList.size();i++) { //if the codec param is invalid or diplicate, return error if((condError = ValidateCodecConfig(codecConfigList[i],false)) != kMediaConduitNoError) { return condError; } // Check for the keyframe request type: PLI is preferred // over FIR, and FIR is preferred over none. if (codecConfigList[i]->RtcpFbIsSet(SDP_RTCP_FB_NACK_PLI)) { kf_request = webrtc::kViEKeyFrameRequestPliRtcp; } else if(kf_request == webrtc::kViEKeyFrameRequestNone && codecConfigList[i]->RtcpFbIsSet(SDP_RTCP_FB_CCM_FIR)) { kf_request = webrtc::kViEKeyFrameRequestFirRtcp; } // Check whether NACK is requested if(codecConfigList[i]->RtcpFbIsSet(SDP_RTCP_FB_NACK_BASIC)) { use_nack_basic = true; } webrtc::VideoCodec video_codec; mEngineReceiving = false; memset(&video_codec, 0, sizeof(webrtc::VideoCodec)); //Retrieve pre-populated codec structure for our codec. for(int idx=0; idx < mPtrViECodec->NumberOfCodecs(); idx++) { if(mPtrViECodec->GetCodec(idx, video_codec) == 0) { payloadName = video_codec.plName; if(codecConfigList[i]->mName.compare(payloadName) == 0) { CodecConfigToWebRTCCodec(codecConfigList[i], video_codec); if(mPtrViECodec->SetReceiveCodec(mChannel,video_codec) == -1) { CSFLogError(logTag, "%s Invalid Receive Codec %d ", __FUNCTION__, mPtrViEBase->LastError()); } else { CSFLogError(logTag, "%s Successfully Set the codec %s", __FUNCTION__, codecConfigList[i]->mName.c_str()); if(CopyCodecToDB(codecConfigList[i])) { success = true; } else { CSFLogError(logTag,"%s Unable to updated Codec Database", __FUNCTION__); return kMediaConduitUnknownError; } } break; //we found a match } } }//end for codeclist }//end for if(!success) { CSFLogError(logTag, "%s Setting Receive Codec Failed ", __FUNCTION__); return kMediaConduitInvalidReceiveCodec; } // XXX Currently, we gather up all of the feedback types that the remote // party indicated it supports for all video codecs and configure the entire // conduit based on those capabilities. This is technically out of spec, // as these values should be configured on a per-codec basis. However, // the video engine only provides this API on a per-conduit basis, so that's // how we have to do it. The approach of considering the remote capablities // for the entire conduit to be a union of all remote codec capabilities // (rather than the more conservative approach of using an intersection) // is made to provide as many feedback mechanisms as are likely to be // processed by the remote party (and should be relatively safe, since the // remote party is required to ignore feedback types that it does not // understand). // // Note that our configuration uses this union of remote capabilites as // input to the configuration. It is not isomorphic to the configuration. // For example, it only makes sense to have one frame request mechanism // active at a time; so, if the remote party indicates more than one // supported mechanism, we're only configuring the one we most prefer. // // See http://code.google.com/p/webrtc/issues/detail?id=2331 if (kf_request != webrtc::kViEKeyFrameRequestNone) { CSFLogDebug(logTag, "Enabling %s frame requests for video stream\n", (kf_request == webrtc::kViEKeyFrameRequestPliRtcp ? "PLI" : "FIR")); if(mPtrRTP->SetKeyFrameRequestMethod(mChannel, kf_request) != 0) { CSFLogError(logTag, "%s KeyFrameRequest Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitKeyFrameRequestError; } } switch (kf_request) { case webrtc::kViEKeyFrameRequestNone: mFrameRequestMethod = FrameRequestNone; break; case webrtc::kViEKeyFrameRequestPliRtcp: mFrameRequestMethod = FrameRequestPli; break; case webrtc::kViEKeyFrameRequestFirRtcp: mFrameRequestMethod = FrameRequestFir; break; default: MOZ_ASSERT(PR_FALSE); mFrameRequestMethod = FrameRequestUnknown; } if(use_nack_basic) { CSFLogDebug(logTag, "Enabling NACK (recv) for video stream\n"); if (mPtrRTP->SetNACKStatus(mChannel, true) != 0) { CSFLogError(logTag, "%s NACKStatus Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitNACKStatusError; } } mUsingNackBasic = use_nack_basic; //Start Receive on the video engine if(mPtrViEBase->StartReceive(mChannel) == -1) { error = mPtrViEBase->LastError(); CSFLogError(logTag, "%s Start Receive Error %d ", __FUNCTION__, error); return kMediaConduitUnknownError; } #ifdef MOZILLA_INTERNAL_API if (NS_IsMainThread()) { nsresult rv; nsCOMPtr<nsIPrefService> prefs = do_GetService("@mozilla.org/preferences-service;1", &rv); if (NS_SUCCEEDED(rv)) { nsCOMPtr<nsIPrefBranch> branch = do_QueryInterface(prefs); if (branch) { branch->GetBoolPref("media.video.test_latency", &mVideoLatencyTestEnable); } } } #endif // by now we should be successfully started the reception mPtrRTP->SetRembStatus(mChannel, false, true); mEngineReceiving = true; DumpCodecDB(); return kMediaConduitNoError; }
MediaConduitErrorCode WebrtcAudioConduit::ConfigureRecvMediaCodecs( const std::vector<AudioCodecConfig*>& codecConfigList) { CSFLogDebug(logTag, "%s ", __FUNCTION__); MediaConduitErrorCode condError = kMediaConduitNoError; int error = 0; //webrtc engine errors bool success = false; // Are we receiving already? If so, stop receiving and playout // since we can't apply new recv codec when the engine is playing. condError = StopReceiving(); if (condError != kMediaConduitNoError) { return condError; } if(codecConfigList.empty()) { CSFLogError(logTag, "%s Zero number of codecs to configure", __FUNCTION__); return kMediaConduitMalformedArgument; } // Try Applying the codecs in the list. // We succeed if at least one codec was applied and reception was // started successfully. for(std::vector<AudioCodecConfig*>::size_type i=0 ;i<codecConfigList.size();i++) { //if the codec param is invalid or diplicate, return error if((condError = ValidateCodecConfig(codecConfigList[i],false)) != kMediaConduitNoError) { return condError; } webrtc::CodecInst cinst; if(!CodecConfigToWebRTCCodec(codecConfigList[i],cinst)) { CSFLogError(logTag,"%s CodecConfig to WebRTC Codec Failed ",__FUNCTION__); continue; } if(mPtrVoECodec->SetRecPayloadType(mChannel,cinst) == -1) { error = mPtrVoEBase->LastError(); CSFLogError(logTag, "%s SetRecvCodec Failed %d ",__FUNCTION__, error); continue; } else { CSFLogDebug(logTag, "%s Successfully Set RecvCodec %s", __FUNCTION__, codecConfigList[i]->mName.c_str()); //copy this to local database if(CopyCodecToDB(codecConfigList[i])) { success = true; } else { CSFLogError(logTag,"%s Unable to updated Codec Database", __FUNCTION__); return kMediaConduitUnknownError; } } } //end for if(!success) { CSFLogError(logTag, "%s Setting Receive Codec Failed ", __FUNCTION__); return kMediaConduitInvalidReceiveCodec; } //If we are here, atleast one codec should have been set condError = StartReceiving(); if (condError != kMediaConduitNoError) { return condError; } DumpCodecDB(); return kMediaConduitNoError; }
MediaConduitErrorCode WebrtcAudioConduit::GetAudioFrame(int16_t speechData[], int32_t samplingFreqHz, int32_t capture_delay, int& lengthSamples) { CSFLogDebug(logTag, "%s ", __FUNCTION__); unsigned int numSamples = 0; //validate params if(!speechData ) { CSFLogError(logTag,"%s Null Audio Buffer Pointer", __FUNCTION__); MOZ_ASSERT(PR_FALSE); return kMediaConduitMalformedArgument; } // Validate sample length if((numSamples = GetNum10msSamplesForFrequency(samplingFreqHz)) == 0 ) { CSFLogError(logTag,"%s Invalid Sampling Frequency ", __FUNCTION__); MOZ_ASSERT(PR_FALSE); return kMediaConduitMalformedArgument; } //validate capture time if(capture_delay < 0 ) { CSFLogError(logTag,"%s Invalid Capture Delay ", __FUNCTION__); MOZ_ASSERT(PR_FALSE); return kMediaConduitMalformedArgument; } //Conduit should have reception enabled before we ask for decoded // samples if(!mEngineReceiving) { CSFLogError(logTag, "%s Engine not Receiving ", __FUNCTION__); return kMediaConduitSessionNotInited; } lengthSamples = 0; //output paramter if(mPtrVoEXmedia->ExternalPlayoutGetData( speechData, samplingFreqHz, capture_delay, lengthSamples) == -1) { int error = mPtrVoEBase->LastError(); CSFLogError(logTag, "%s Getting audio data Failed %d", __FUNCTION__, error); if(error == VE_RUNTIME_PLAY_ERROR) { return kMediaConduitPlayoutError; } return kMediaConduitUnknownError; } // Not #ifdef DEBUG or on a log module so we can use it for about:webrtc/etc mSamples += lengthSamples; if (mSamples >= mLastSyncLog + samplingFreqHz) { int jitter_buffer_delay_ms; int playout_buffer_delay_ms; int avsync_offset_ms; if (GetAVStats(&jitter_buffer_delay_ms, &playout_buffer_delay_ms, &avsync_offset_ms)) { #if !defined(MOZILLA_EXTERNAL_LINKAGE) if (avsync_offset_ms < 0) { Telemetry::Accumulate(Telemetry::WEBRTC_AVSYNC_WHEN_VIDEO_LAGS_AUDIO_MS, -avsync_offset_ms); } else { Telemetry::Accumulate(Telemetry::WEBRTC_AVSYNC_WHEN_AUDIO_LAGS_VIDEO_MS, avsync_offset_ms); } #endif CSFLogError(logTag, "A/V sync: sync delta: %dms, audio jitter delay %dms, playout delay %dms", avsync_offset_ms, jitter_buffer_delay_ms, playout_buffer_delay_ms); } else { CSFLogError(logTag, "A/V sync: GetAVStats failed"); } mLastSyncLog = mSamples; } #if !defined(MOZILLA_EXTERNAL_LINKAGE) if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG)) { if (mProcessing.Length() > 0) { unsigned int now; mPtrVoEVideoSync->GetPlayoutTimestamp(mChannel, now); if (static_cast<uint32_t>(now) != mLastTimestamp) { mLastTimestamp = static_cast<uint32_t>(now); // Find the block that includes this timestamp in the network input while (mProcessing.Length() > 0) { // FIX! assumes 20ms @ 48000Hz // FIX handle wrap-around if (mProcessing[0].mRTPTimeStamp + 20*(48000/1000) >= now) { TimeDuration t = TimeStamp::Now() - mProcessing[0].mTimeStamp; // Wrap-around? int64_t delta = t.ToMilliseconds() + (now - mProcessing[0].mRTPTimeStamp)/(48000/1000); LogTime(AsyncLatencyLogger::AudioRecvRTP, ((uint64_t) this), delta); break; } mProcessing.RemoveElementAt(0); } } } } #endif CSFLogDebug(logTag,"%s GetAudioFrame:Got samples: length %d ",__FUNCTION__, lengthSamples); return kMediaConduitNoError; }
MediaConduitErrorCode WebrtcAudioConduit::GetAudioFrame(int16_t speechData[], int32_t samplingFreqHz, int32_t capture_delay, int& lengthSamples) { CSFLogDebug(logTag, "%s ", __FUNCTION__); unsigned int numSamples = 0; //validate params if(!speechData ) { CSFLogError(logTag,"%s Null Audio Buffer Pointer", __FUNCTION__); MOZ_ASSERT(PR_FALSE); return kMediaConduitMalformedArgument; } // Validate sample length if((numSamples = GetNum10msSamplesForFrequency(samplingFreqHz)) == 0 ) { CSFLogError(logTag,"%s Invalid Sampling Frequency ", __FUNCTION__); MOZ_ASSERT(PR_FALSE); return kMediaConduitMalformedArgument; } //validate capture time if(capture_delay < 0 ) { CSFLogError(logTag,"%s Invalid Capture Delay ", __FUNCTION__); MOZ_ASSERT(PR_FALSE); return kMediaConduitMalformedArgument; } //Conduit should have reception enabled before we ask for decoded // samples if(!mEngineReceiving) { CSFLogError(logTag, "%s Engine not Receiving ", __FUNCTION__); return kMediaConduitSessionNotInited; } lengthSamples = 0; //output paramter if(mPtrVoEXmedia->ExternalPlayoutGetData( speechData, samplingFreqHz, capture_delay, lengthSamples) == -1) { int error = mPtrVoEBase->LastError(); CSFLogError(logTag, "%s Getting audio data Failed %d", __FUNCTION__, error); if(error == VE_RUNTIME_PLAY_ERROR) { return kMediaConduitPlayoutError; } return kMediaConduitUnknownError; } CSFLogDebug(logTag,"%s GetAudioFrame:Got samples: length %d ",__FUNCTION__, lengthSamples); return kMediaConduitNoError; }
/** * Note: Setting the send-codec on the Video Engine will restart the encoder, * sets up new SSRC and reset RTP_RTCP module with the new codec setting. */ MediaConduitErrorCode WebrtcVideoConduit::ConfigureSendMediaCodec(const VideoCodecConfig* codecConfig) { CSFLogDebug(logTag, "%s for %s", __FUNCTION__, codecConfig ? codecConfig->mName.c_str() : "<null>"); bool codecFound = false; MediaConduitErrorCode condError = kMediaConduitNoError; int error = 0; //webrtc engine errors webrtc::VideoCodec video_codec; std::string payloadName; memset(&video_codec, 0, sizeof(video_codec)); //validate basic params if((condError = ValidateCodecConfig(codecConfig,true)) != kMediaConduitNoError) { return condError; } //Check if we have same codec already applied if(CheckCodecsForMatch(mCurSendCodecConfig, codecConfig)) { CSFLogDebug(logTag, "%s Codec has been applied already ", __FUNCTION__); return kMediaConduitCodecInUse; } //transmitting already ? if(mEngineTransmitting) { CSFLogDebug(logTag, "%s Engine Already Sending. Attemping to Stop ", __FUNCTION__); if(mPtrViEBase->StopSend(mChannel) == -1) { CSFLogError(logTag, "%s StopSend() Failed %d ",__FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitUnknownError; } mEngineTransmitting = false; } if (mLoadManager) { mPtrViEBase->RegisterCpuOveruseObserver(mChannel, mLoadManager); mPtrViEBase->SetLoadManager(mLoadManager); } if (mExternalSendCodec && codecConfig->mType == mExternalSendCodec->mType) { CSFLogError(logTag, "%s Configuring External H264 Send Codec", __FUNCTION__); // width/height will be overridden on the first frame video_codec.width = 320; video_codec.height = 240; video_codec.qpMax = 56; video_codec.numberOfSimulcastStreams = 1; video_codec.mode = webrtc::kRealtimeVideo; codecFound = true; } else { // we should be good here to set the new codec. for(int idx=0; idx < mPtrViECodec->NumberOfCodecs(); idx++) { if(0 == mPtrViECodec->GetCodec(idx, video_codec)) { payloadName = video_codec.plName; if(codecConfig->mName.compare(payloadName) == 0) { // Note: side-effect of this is that video_codec is filled in // by GetCodec() codecFound = true; break; } } }//for } if(codecFound == false) { CSFLogError(logTag, "%s Codec Mismatch ", __FUNCTION__); return kMediaConduitInvalidSendCodec; } // Note: only for overriding parameters from GetCodec()! CodecConfigToWebRTCCodec(codecConfig, video_codec); if(mPtrViECodec->SetSendCodec(mChannel, video_codec) == -1) { error = mPtrViEBase->LastError(); if(error == kViECodecInvalidCodec) { CSFLogError(logTag, "%s Invalid Send Codec", __FUNCTION__); return kMediaConduitInvalidSendCodec; } CSFLogError(logTag, "%s SetSendCodec Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitUnknownError; } if (!mVideoCodecStat) { mVideoCodecStat = new VideoCodecStatistics(mChannel, mPtrViECodec, true); } mSendingWidth = 0; mSendingHeight = 0; if(codecConfig->RtcpFbIsSet(SDP_RTCP_FB_NACK_BASIC)) { CSFLogDebug(logTag, "Enabling NACK (send) for video stream\n"); if (mPtrRTP->SetNACKStatus(mChannel, true) != 0) { CSFLogError(logTag, "%s NACKStatus Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitNACKStatusError; } } if(mPtrViEBase->StartSend(mChannel) == -1) { CSFLogError(logTag, "%s Start Send Error %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitUnknownError; } //Copy the applied config for future reference. delete mCurSendCodecConfig; mCurSendCodecConfig = new VideoCodecConfig(*codecConfig); mPtrRTP->SetRembStatus(mChannel, true, false); // by now we should be successfully started the transmission mEngineTransmitting = true; return kMediaConduitNoError; }
/* * WebRTCAudioConduit Implementation */ MediaConduitErrorCode WebrtcAudioConduit::Init(WebrtcAudioConduit *other) { CSFLogDebug(logTag, "%s this=%p other=%p", __FUNCTION__, this, other); if (other) { MOZ_ASSERT(!other->mOtherDirection); other->mOtherDirection = this; mOtherDirection = other; // only one can call ::Create()/GetVoiceEngine() MOZ_ASSERT(other->mVoiceEngine); mVoiceEngine = other->mVoiceEngine; } else { //Per WebRTC APIs below function calls return NULL on failure if(!(mVoiceEngine = webrtc::VoiceEngine::Create())) { CSFLogError(logTag, "%s Unable to create voice engine", __FUNCTION__); return kMediaConduitSessionNotInited; } PRLogModuleInfo *logs = GetWebRTCLogInfo(); if (!gWebrtcTraceLoggingOn && logs && logs->level > 0) { // no need to a critical section or lock here gWebrtcTraceLoggingOn = 1; const char *file = PR_GetEnv("WEBRTC_TRACE_FILE"); if (!file) { file = "WebRTC.log"; } CSFLogDebug(logTag, "%s Logging webrtc to %s level %d", __FUNCTION__, file, logs->level); mVoiceEngine->SetTraceFilter(logs->level); mVoiceEngine->SetTraceFile(file); } } if(!(mPtrVoEBase = VoEBase::GetInterface(mVoiceEngine))) { CSFLogError(logTag, "%s Unable to initialize VoEBase", __FUNCTION__); return kMediaConduitSessionNotInited; } if(!(mPtrVoENetwork = VoENetwork::GetInterface(mVoiceEngine))) { CSFLogError(logTag, "%s Unable to initialize VoENetwork", __FUNCTION__); return kMediaConduitSessionNotInited; } if(!(mPtrVoECodec = VoECodec::GetInterface(mVoiceEngine))) { CSFLogError(logTag, "%s Unable to initialize VoEBCodec", __FUNCTION__); return kMediaConduitSessionNotInited; } if(!(mPtrVoEProcessing = VoEAudioProcessing::GetInterface(mVoiceEngine))) { CSFLogError(logTag, "%s Unable to initialize VoEProcessing", __FUNCTION__); return kMediaConduitSessionNotInited; } if(!(mPtrVoEXmedia = VoEExternalMedia::GetInterface(mVoiceEngine))) { CSFLogError(logTag, "%s Unable to initialize VoEExternalMedia", __FUNCTION__); return kMediaConduitSessionNotInited; } if (other) { mChannel = other->mChannel; } else { // init the engine with our audio device layer if(mPtrVoEBase->Init() == -1) { CSFLogError(logTag, "%s VoiceEngine Base Not Initialized", __FUNCTION__); return kMediaConduitSessionNotInited; } if( (mChannel = mPtrVoEBase->CreateChannel()) == -1) { CSFLogError(logTag, "%s VoiceEngine Channel creation failed",__FUNCTION__); return kMediaConduitChannelError; } CSFLogDebug(logTag, "%s Channel Created %d ",__FUNCTION__, mChannel); if(mPtrVoENetwork->RegisterExternalTransport(mChannel, *this) == -1) { CSFLogError(logTag, "%s VoiceEngine, External Transport Failed",__FUNCTION__); return kMediaConduitTransportRegistrationFail; } if(mPtrVoEXmedia->SetExternalRecordingStatus(true) == -1) { CSFLogError(logTag, "%s SetExternalRecordingStatus Failed %d",__FUNCTION__, mPtrVoEBase->LastError()); return kMediaConduitExternalPlayoutError; } if(mPtrVoEXmedia->SetExternalPlayoutStatus(true) == -1) { CSFLogError(logTag, "%s SetExternalPlayoutStatus Failed %d ",__FUNCTION__, mPtrVoEBase->LastError()); return kMediaConduitExternalRecordingError; } CSFLogDebug(logTag , "%s AudioSessionConduit Initialization Done (%p)",__FUNCTION__, this); } return kMediaConduitNoError; }
/** * Destruction defines for our super-classes */ WebrtcAudioConduit::~WebrtcAudioConduit() { #ifdef MOZILLA_INTERNAL_API // unit tests create their own "main thread" NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); #endif CSFLogDebug(logTag, "%s ", __FUNCTION__); for(std::vector<AudioCodecConfig*>::size_type i=0;i < mRecvCodecList.size();i++) { delete mRecvCodecList[i]; } delete mCurSendCodecConfig; // The first one of a pair to be deleted shuts down media for both if(mPtrVoEXmedia) { if (!mShutDown) { mPtrVoEXmedia->SetExternalRecordingStatus(false); mPtrVoEXmedia->SetExternalPlayoutStatus(false); } mPtrVoEXmedia->Release(); } if(mPtrVoEProcessing) { mPtrVoEProcessing->Release(); } //Deal with the transport if(mPtrVoENetwork) { if (!mShutDown) { mPtrVoENetwork->DeRegisterExternalTransport(mChannel); } mPtrVoENetwork->Release(); } if(mPtrVoECodec) { mPtrVoECodec->Release(); } if(mPtrVoEBase) { if (!mShutDown) { mPtrVoEBase->StopPlayout(mChannel); mPtrVoEBase->StopSend(mChannel); mPtrVoEBase->StopReceive(mChannel); mPtrVoEBase->DeleteChannel(mChannel); mPtrVoEBase->Terminate(); } mPtrVoEBase->Release(); } if (mOtherDirection) { // mOtherDirection owns these now! mOtherDirection->mOtherDirection = NULL; // let other side we terminated the channel mOtherDirection->mShutDown = true; mVoiceEngine = nullptr; } else { // only one opener can call Delete. Have it be the last to close. if(mVoiceEngine) { webrtc::VoiceEngine::Delete(mVoiceEngine); } } }
void CallControlManagerImpl::setVideoCodecs(int codecMask) { CSFLogDebug(logTag, "setVideoCodecs %X", codecMask); VcmSIPCCBinding::setVideoCodecs(codecMask); }
WebrtcVideoConduit::~WebrtcVideoConduit() { #ifdef MOZILLA_INTERNAL_API // unit tests create their own "main thread" NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); #endif CSFLogDebug(logTag, "%s ", __FUNCTION__); for(std::vector<VideoCodecConfig*>::size_type i=0;i < mRecvCodecList.size();i++) { delete mRecvCodecList[i]; } delete mCurSendCodecConfig; // The first one of a pair to be deleted shuts down media for both //Deal with External Capturer if(mPtrViECapture) { if (!mShutDown) { mPtrViECapture->DisconnectCaptureDevice(mCapId); mPtrViECapture->ReleaseCaptureDevice(mCapId); mPtrExtCapture = nullptr; if (mOtherDirection) mOtherDirection->mPtrExtCapture = nullptr; } mPtrViECapture->Release(); } if (mPtrExtCodec) { mPtrExtCodec->Release(); mPtrExtCodec = NULL; } //Deal with External Renderer if(mPtrViERender) { if (!mShutDown) { if(mRenderer) { mPtrViERender->StopRender(mChannel); } mPtrViERender->RemoveRenderer(mChannel); } mPtrViERender->Release(); } //Deal with the transport if(mPtrViENetwork) { if (!mShutDown) { mPtrViENetwork->DeregisterSendTransport(mChannel); } mPtrViENetwork->Release(); } if(mPtrViECodec) { mPtrViECodec->Release(); } if(mPtrViEBase) { if (!mShutDown) { mPtrViEBase->StopSend(mChannel); mPtrViEBase->StopReceive(mChannel); SyncTo(nullptr); mPtrViEBase->DeleteChannel(mChannel); } mPtrViEBase->Release(); } if (mPtrRTP) { mPtrRTP->Release(); } if (mOtherDirection) { // mOtherDirection owns these now! mOtherDirection->mOtherDirection = nullptr; // let other side we terminated the channel mOtherDirection->mShutDown = true; mVideoEngine = nullptr; } else { // only one opener can call Delete. Have it be the last to close. if(mVideoEngine) { webrtc::VideoEngine::Delete(mVideoEngine); } } }
nsresult PeerConnectionMedia::AddStream(nsIDOMMediaStream* aMediaStream, uint32_t hints, uint32_t *stream_id) { ASSERT_ON_THREAD(mMainThread); if (!aMediaStream) { CSFLogError(logTag, "%s - aMediaStream is NULL", __FUNCTION__); return NS_ERROR_FAILURE; } DOMMediaStream* stream = static_cast<DOMMediaStream*>(aMediaStream); CSFLogDebug(logTag, "%s: MediaStream: %p", __FUNCTION__, aMediaStream); // Adding tracks here based on nsDOMMediaStream expectation settings #ifdef MOZILLA_INTERNAL_API if (!Preferences::GetBool("media.peerconnection.video.enabled", true)) { hints &= ~(DOMMediaStream::HINT_CONTENTS_VIDEO); } #endif if (!(hints & (DOMMediaStream::HINT_CONTENTS_AUDIO | DOMMediaStream::HINT_CONTENTS_VIDEO))) { CSFLogDebug(logTag, "Empty Stream !!"); return NS_OK; } // Now see if we already have this stream or another stream with // tracks of the same type, since we only allow one track of each type. // TODO([email protected]): remove this when multiple of each stream // is allowed nsRefPtr<LocalSourceStreamInfo> localSourceStream = nullptr; for (uint32_t u = 0; u < mLocalSourceStreams.Length(); u++) { auto& lss = mLocalSourceStreams[u]; if (((hints & DOMMediaStream::HINT_CONTENTS_AUDIO) && lss->AudioTrackCount()) || ((hints & DOMMediaStream::HINT_CONTENTS_VIDEO) && lss->VideoTrackCount())) { CSFLogError(logTag, "Only one stream of any given type allowed"); return NS_ERROR_FAILURE; } if (stream == lss->GetMediaStream()) { localSourceStream = lss; *stream_id = u; break; } } if (!localSourceStream) { localSourceStream = new LocalSourceStreamInfo(stream, this); mLocalSourceStreams.AppendElement(localSourceStream); *stream_id = mLocalSourceStreams.Length() - 1; } if (hints & DOMMediaStream::HINT_CONTENTS_AUDIO) { localSourceStream->ExpectAudio(TRACK_AUDIO); } if (hints & DOMMediaStream::HINT_CONTENTS_VIDEO) { localSourceStream->ExpectVideo(TRACK_VIDEO); } return NS_OK; }
/* * WebRTCAudioConduit Implementation */ MediaConduitErrorCode WebrtcAudioConduit::Init() { CSFLogDebug(logTag, "%s this=%p", __FUNCTION__, this); #ifdef MOZ_WIDGET_ANDROID jobject context = jsjni_GetGlobalContextRef(); // get the JVM JavaVM *jvm = jsjni_GetVM(); JNIEnv* jenv = jsjni_GetJNIForThread(); if (webrtc::VoiceEngine::SetAndroidObjects(jvm, jenv, (void*)context) != 0) { CSFLogError(logTag, "%s Unable to set Android objects", __FUNCTION__); return kMediaConduitSessionNotInited; } #endif // Per WebRTC APIs below function calls return nullptr on failure if(!(mVoiceEngine = webrtc::VoiceEngine::Create())) { CSFLogError(logTag, "%s Unable to create voice engine", __FUNCTION__); return kMediaConduitSessionNotInited; } EnableWebRtcLog(); if(!(mPtrVoEBase = VoEBase::GetInterface(mVoiceEngine))) { CSFLogError(logTag, "%s Unable to initialize VoEBase", __FUNCTION__); return kMediaConduitSessionNotInited; } if(!(mPtrVoENetwork = VoENetwork::GetInterface(mVoiceEngine))) { CSFLogError(logTag, "%s Unable to initialize VoENetwork", __FUNCTION__); return kMediaConduitSessionNotInited; } if(!(mPtrVoECodec = VoECodec::GetInterface(mVoiceEngine))) { CSFLogError(logTag, "%s Unable to initialize VoEBCodec", __FUNCTION__); return kMediaConduitSessionNotInited; } if(!(mPtrVoEProcessing = VoEAudioProcessing::GetInterface(mVoiceEngine))) { CSFLogError(logTag, "%s Unable to initialize VoEProcessing", __FUNCTION__); return kMediaConduitSessionNotInited; } if(!(mPtrVoEXmedia = VoEExternalMedia::GetInterface(mVoiceEngine))) { CSFLogError(logTag, "%s Unable to initialize VoEExternalMedia", __FUNCTION__); return kMediaConduitSessionNotInited; } if(!(mPtrVoERTP_RTCP = VoERTP_RTCP::GetInterface(mVoiceEngine))) { CSFLogError(logTag, "%s Unable to initialize VoERTP_RTCP", __FUNCTION__); return kMediaConduitSessionNotInited; } if(!(mPtrVoEVideoSync = VoEVideoSync::GetInterface(mVoiceEngine))) { CSFLogError(logTag, "%s Unable to initialize VoEVideoSync", __FUNCTION__); return kMediaConduitSessionNotInited; } if (!(mPtrRTP = webrtc::VoERTP_RTCP::GetInterface(mVoiceEngine))) { CSFLogError(logTag, "%s Unable to get audio RTP/RTCP interface ", __FUNCTION__); return kMediaConduitSessionNotInited; } // init the engine with our audio device layer if(mPtrVoEBase->Init() == -1) { CSFLogError(logTag, "%s VoiceEngine Base Not Initialized", __FUNCTION__); return kMediaConduitSessionNotInited; } if( (mChannel = mPtrVoEBase->CreateChannel()) == -1) { CSFLogError(logTag, "%s VoiceEngine Channel creation failed",__FUNCTION__); return kMediaConduitChannelError; } CSFLogDebug(logTag, "%s Channel Created %d ",__FUNCTION__, mChannel); if(mPtrVoENetwork->RegisterExternalTransport(mChannel, *this) == -1) { CSFLogError(logTag, "%s VoiceEngine, External Transport Failed",__FUNCTION__); return kMediaConduitTransportRegistrationFail; } if(mPtrVoEXmedia->SetExternalRecordingStatus(true) == -1) { CSFLogError(logTag, "%s SetExternalRecordingStatus Failed %d",__FUNCTION__, mPtrVoEBase->LastError()); return kMediaConduitExternalPlayoutError; } if(mPtrVoEXmedia->SetExternalPlayoutStatus(true) == -1) { CSFLogError(logTag, "%s SetExternalPlayoutStatus Failed %d ",__FUNCTION__, mPtrVoEBase->LastError()); return kMediaConduitExternalRecordingError; } CSFLogDebug(logTag , "%s AudioSessionConduit Initialization Done (%p)",__FUNCTION__, this); return kMediaConduitNoError; }
/** * gets the device name * @returns - a pointer to the device name */ cc_string_t CCAPI_DeviceInfo_getDeviceName (cc_deviceinfo_ref_t handle) { CSFLogDebug(logTag, "Call to deprecated function %s, returning empty string", __FUNCTION__); return strlib_empty(); }
MediaConduitErrorCode WebrtcAudioConduit::ConfigureSendMediaCodec(const AudioCodecConfig* codecConfig) { CSFLogDebug(logTag, "%s ", __FUNCTION__); MediaConduitErrorCode condError = kMediaConduitNoError; int error = 0;//webrtc engine errors webrtc::CodecInst cinst; //validate codec param if((condError = ValidateCodecConfig(codecConfig, true)) != kMediaConduitNoError) { return condError; } condError = StopTransmitting(); if (condError != kMediaConduitNoError) { return condError; } if(!CodecConfigToWebRTCCodec(codecConfig,cinst)) { CSFLogError(logTag,"%s CodecConfig to WebRTC Codec Failed ",__FUNCTION__); return kMediaConduitMalformedArgument; } if(mPtrVoECodec->SetSendCodec(mChannel, cinst) == -1) { error = mPtrVoEBase->LastError(); CSFLogError(logTag, "%s SetSendCodec - Invalid Codec %d ",__FUNCTION__, error); if(error == VE_CANNOT_SET_SEND_CODEC || error == VE_CODEC_ERROR) { CSFLogError(logTag, "%s Invalid Send Codec", __FUNCTION__); return kMediaConduitInvalidSendCodec; } CSFLogError(logTag, "%s SetSendCodec Failed %d ", __FUNCTION__, mPtrVoEBase->LastError()); return kMediaConduitUnknownError; } #if !defined(MOZILLA_EXTERNAL_LINKAGE) // TEMPORARY - see bug 694814 comment 2 nsresult rv; nsCOMPtr<nsIPrefService> prefs = do_GetService("@mozilla.org/preferences-service;1", &rv); if (NS_SUCCEEDED(rv)) { nsCOMPtr<nsIPrefBranch> branch = do_QueryInterface(prefs); if (branch) { branch->GetIntPref("media.peerconnection.capture_delay", &mCaptureDelay); } } #endif condError = StartTransmitting(); if (condError != kMediaConduitNoError) { return condError; } //Copy the applied config for future reference. delete mCurSendCodecConfig; mCurSendCodecConfig = new AudioCodecConfig(codecConfig->mType, codecConfig->mName, codecConfig->mFreq, codecConfig->mPacSize, codecConfig->mChannels, codecConfig->mRate); return kMediaConduitNoError; }
/** * Peforms intialization of the MANDATORY components of the Video Engine */ MediaConduitErrorCode WebrtcVideoConduit::Init() { CSFLogDebug(logTag, "%s ", __FUNCTION__); #ifdef MOZ_WIDGET_ANDROID jobject context = jsjni_GetGlobalContextRef(); // get the JVM JavaVM *jvm = jsjni_GetVM(); JNIEnv* env; if (jvm->GetEnv((void**)&env, JNI_VERSION_1_4) != JNI_OK) { CSFLogError(logTag, "%s: could not get Java environment", __FUNCTION__); return kMediaConduitSessionNotInited; } jvm->AttachCurrentThread(&env, nullptr); if (webrtc::VideoEngine::SetAndroidObjects(jvm, (void*)context) != 0) { CSFLogError(logTag, "%s: could not set Android objects", __FUNCTION__); return kMediaConduitSessionNotInited; } env->DeleteGlobalRef(context); #endif if( !(mVideoEngine = webrtc::VideoEngine::Create()) ) { CSFLogError(logTag, "%s Unable to create video engine ", __FUNCTION__); return kMediaConduitSessionNotInited; } PRLogModuleInfo *logs = GetWebRTCLogInfo(); if (!gWebrtcTraceLoggingOn && logs && logs->level > 0) { // no need to a critical section or lock here gWebrtcTraceLoggingOn = 1; const char *file = PR_GetEnv("WEBRTC_TRACE_FILE"); if (!file) { file = "WebRTC.log"; } CSFLogDebug(logTag, "%s Logging webrtc to %s level %d", __FUNCTION__, file, logs->level); mVideoEngine->SetTraceFilter(logs->level); mVideoEngine->SetTraceFile(file); } if( !(mPtrViEBase = ViEBase::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get video base interface ", __FUNCTION__); return kMediaConduitSessionNotInited; } if( !(mPtrViECapture = ViECapture::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get video capture interface", __FUNCTION__); return kMediaConduitSessionNotInited; } if( !(mPtrViECodec = ViECodec::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get video codec interface ", __FUNCTION__); return kMediaConduitSessionNotInited; } if( !(mPtrViENetwork = ViENetwork::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get video network interface ", __FUNCTION__); return kMediaConduitSessionNotInited; } if( !(mPtrViERender = ViERender::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get video render interface ", __FUNCTION__); return kMediaConduitSessionNotInited; } if( !(mPtrRTP = webrtc::ViERTP_RTCP::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get video RTCP interface ", __FUNCTION__); return kMediaConduitSessionNotInited; } CSFLogDebug(logTag, "%s Engine Created: Init'ng the interfaces ",__FUNCTION__); if(mPtrViEBase->Init() == -1) { CSFLogError(logTag, " %s Video Engine Init Failed %d ",__FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitSessionNotInited; } if(mPtrViEBase->CreateChannel(mChannel) == -1) { CSFLogError(logTag, " %s Channel creation Failed %d ",__FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitChannelError; } if(mPtrViENetwork->RegisterSendTransport(mChannel, *this) == -1) { CSFLogError(logTag, "%s ViENetwork Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitTransportRegistrationFail; } mPtrExtCapture = 0; if(mPtrViECapture->AllocateExternalCaptureDevice(mCapId, mPtrExtCapture) == -1) { CSFLogError(logTag, "%s Unable to Allocate capture module: %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitCaptureError; } if(mPtrViECapture->ConnectCaptureDevice(mCapId,mChannel) == -1) { CSFLogError(logTag, "%s Unable to Connect capture module: %d ", __FUNCTION__,mPtrViEBase->LastError()); return kMediaConduitCaptureError; } if(mPtrViERender->AddRenderer(mChannel, webrtc::kVideoI420, (webrtc::ExternalRenderer*) this) == -1) { CSFLogError(logTag, "%s Failed to added external renderer ", __FUNCTION__); return kMediaConduitInvalidRenderer; } // Set up some parameters, per juberti. Set MTU. if(mPtrViENetwork->SetMTU(mChannel, 1200) != 0) { CSFLogError(logTag, "%s MTU Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitMTUError; } // Turn on RTCP and loss feedback reporting. if(mPtrRTP->SetRTCPStatus(mChannel, webrtc::kRtcpCompound_RFC4585) != 0) { CSFLogError(logTag, "%s RTCPStatus Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitRTCPStatusError; } // Enable pli as key frame request method. if(mPtrRTP->SetKeyFrameRequestMethod(mChannel, webrtc::kViEKeyFrameRequestPliRtcp) != 0) { CSFLogError(logTag, "%s KeyFrameRequest Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitKeyFrameRequestError; } // Enable lossless transport // XXX Note: We may want to disable this or limit it if (mPtrRTP->SetNACKStatus(mChannel, true) != 0) { CSFLogError(logTag, "%s NACKStatus Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitNACKStatusError; } CSFLogError(logTag, "%s Initialization Done", __FUNCTION__); return kMediaConduitNoError; }
MediaConduitErrorCode WebrtcAudioConduit::SendAudioFrame(const int16_t audio_data[], int32_t lengthSamples, int32_t samplingFreqHz, int32_t capture_delay) { CSFLogDebug(logTag, "%s ", __FUNCTION__); // Following checks need to be performed // 1. Non null audio buffer pointer, // 2. invalid sampling frequency - less than 0 or unsupported ones // 3. Appropriate Sample Length for 10 ms audio-frame. This represents // block size the VoiceEngine feeds into encoder for passed in audio-frame // Ex: for 16000 sampling rate , valid block-length is 160 // Similarly for 32000 sampling rate, valid block length is 320 // We do the check by the verify modular operator below to be zero if(!audio_data || (lengthSamples <= 0) || (IsSamplingFreqSupported(samplingFreqHz) == false) || ((lengthSamples % (samplingFreqHz / 100) != 0)) ) { CSFLogError(logTag, "%s Invalid Parameters ",__FUNCTION__); MOZ_ASSERT(PR_FALSE); return kMediaConduitMalformedArgument; } //validate capture time if(capture_delay < 0 ) { CSFLogError(logTag,"%s Invalid Capture Delay ", __FUNCTION__); MOZ_ASSERT(PR_FALSE); return kMediaConduitMalformedArgument; } // if transmission is not started .. conduit cannot insert frames if(!mEngineTransmitting) { CSFLogError(logTag, "%s Engine not transmitting ", __FUNCTION__); return kMediaConduitSessionNotInited; } #if !defined(MOZILLA_EXTERNAL_LINKAGE) if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG)) { struct Processing insert = { TimeStamp::Now(), 0 }; mProcessing.AppendElement(insert); } #endif capture_delay = mCaptureDelay; //Insert the samples if(mPtrVoEXmedia->ExternalRecordingInsertData(audio_data, lengthSamples, samplingFreqHz, capture_delay) == -1) { int error = mPtrVoEBase->LastError(); CSFLogError(logTag, "%s Inserting audio data Failed %d", __FUNCTION__, error); if(error == VE_RUNTIME_REC_ERROR) { return kMediaConduitRecordingError; } return kMediaConduitUnknownError; } // we should be good here return kMediaConduitNoError; }
MediaConduitErrorCode WebrtcVideoConduit::ConfigureRecvMediaCodecs( const std::vector<VideoCodecConfig* >& codecConfigList) { CSFLogDebug(logTag, "%s ", __FUNCTION__); MediaConduitErrorCode condError = kMediaConduitNoError; int error = 0; //webrtc engine errors bool success = false; std::string payloadName; if(mEngineReceiving) { CSFLogDebug(logTag, "%s Engine Already Receiving . Attemping to Stop ", __FUNCTION__); if(mPtrViEBase->StopReceive(mChannel) == -1) { error = mPtrViEBase->LastError(); if(error == kViEBaseUnknownError) { CSFLogDebug(logTag, "%s StopReceive() Success ", __FUNCTION__); mEngineReceiving = false; } else { CSFLogError(logTag, "%s StopReceive() Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitUnknownError; } } } if(codecConfigList.empty()) { CSFLogError(logTag, "%s Zero number of codecs to configure", __FUNCTION__); return kMediaConduitMalformedArgument; } //Try Applying the codecs in the list // we treat as success if atleast one codec was applied and reception was // started successfully. for(std::vector<VideoCodecConfig*>::size_type i=0;i < codecConfigList.size();i++) { //if the codec param is invalid or diplicate, return error if((condError = ValidateCodecConfig(codecConfigList[i],false)) != kMediaConduitNoError) { return condError; } webrtc::VideoCodec video_codec; mEngineReceiving = false; memset(&video_codec, 0, sizeof(webrtc::VideoCodec)); //Retrieve pre-populated codec structure for our codec. for(int idx=0; idx < mPtrViECodec->NumberOfCodecs(); idx++) { if(mPtrViECodec->GetCodec(idx, video_codec) == 0) { payloadName = video_codec.plName; if(codecConfigList[i]->mName.compare(payloadName) == 0) { CodecConfigToWebRTCCodec(codecConfigList[i], video_codec); if(mPtrViECodec->SetReceiveCodec(mChannel,video_codec) == -1) { CSFLogError(logTag, "%s Invalid Receive Codec %d ", __FUNCTION__, mPtrViEBase->LastError()); } else { CSFLogError(logTag, "%s Successfully Set the codec %s", __FUNCTION__, codecConfigList[i]->mName.c_str()); if(CopyCodecToDB(codecConfigList[i])) { success = true; } else { CSFLogError(logTag,"%s Unable to updated Codec Database", __FUNCTION__); return kMediaConduitUnknownError; } } break; //we found a match } } }//end for codeclist }//end for if(!success) { CSFLogError(logTag, "%s Setting Receive Codec Failed ", __FUNCTION__); return kMediaConduitInvalidReceiveCodec; } //Start Receive on the video engine if(mPtrViEBase->StartReceive(mChannel) == -1) { error = mPtrViEBase->LastError(); CSFLogError(logTag, "%s Start Receive Error %d ", __FUNCTION__, error); return kMediaConduitUnknownError; } // by now we should be successfully started the reception mPtrRTP->SetRembStatus(mChannel, false, true); mEngineReceiving = true; DumpCodecDB(); return kMediaConduitNoError; }
/** * Peforms intialization of the MANDATORY components of the Video Engine */ MediaConduitErrorCode WebrtcVideoConduit::Init(WebrtcVideoConduit *other) { CSFLogDebug(logTag, "%s this=%p other=%p", __FUNCTION__, this, other); if (other) { MOZ_ASSERT(!other->mOtherDirection); other->mOtherDirection = this; mOtherDirection = other; // only one can call ::Create()/GetVideoEngine() MOZ_ASSERT(other->mVideoEngine); mVideoEngine = other->mVideoEngine; } else { #ifdef MOZ_WIDGET_ANDROID jobject context = jsjni_GetGlobalContextRef(); // get the JVM JavaVM *jvm = jsjni_GetVM(); if (webrtc::VideoEngine::SetAndroidObjects(jvm, (void*)context) != 0) { CSFLogError(logTag, "%s: could not set Android objects", __FUNCTION__); return kMediaConduitSessionNotInited; } #endif // Per WebRTC APIs below function calls return nullptr on failure if( !(mVideoEngine = webrtc::VideoEngine::Create()) ) { CSFLogError(logTag, "%s Unable to create video engine ", __FUNCTION__); return kMediaConduitSessionNotInited; } PRLogModuleInfo *logs = GetWebRTCLogInfo(); if (!gWebrtcTraceLoggingOn && logs && logs->level > 0) { // no need to a critical section or lock here gWebrtcTraceLoggingOn = 1; const char *file = PR_GetEnv("WEBRTC_TRACE_FILE"); if (!file) { file = "WebRTC.log"; } CSFLogDebug(logTag, "%s Logging webrtc to %s level %d", __FUNCTION__, file, logs->level); mVideoEngine->SetTraceFilter(logs->level); mVideoEngine->SetTraceFile(file); } } if( !(mPtrViEBase = ViEBase::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get video base interface ", __FUNCTION__); return kMediaConduitSessionNotInited; } if( !(mPtrViECapture = ViECapture::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get video capture interface", __FUNCTION__); return kMediaConduitSessionNotInited; } if( !(mPtrViECodec = ViECodec::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get video codec interface ", __FUNCTION__); return kMediaConduitSessionNotInited; } if( !(mPtrViENetwork = ViENetwork::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get video network interface ", __FUNCTION__); return kMediaConduitSessionNotInited; } if( !(mPtrViERender = ViERender::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get video render interface ", __FUNCTION__); return kMediaConduitSessionNotInited; } if( !(mPtrRTP = webrtc::ViERTP_RTCP::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get video RTCP interface ", __FUNCTION__); return kMediaConduitSessionNotInited; } if ( !(mPtrExtCodec = webrtc::ViEExternalCodec::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get external codec interface %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitSessionNotInited; } if (other) { mChannel = other->mChannel; mPtrExtCapture = other->mPtrExtCapture; mCapId = other->mCapId; } else { CSFLogDebug(logTag, "%s Engine Created: Init'ng the interfaces ",__FUNCTION__); if(mPtrViEBase->Init() == -1) { CSFLogError(logTag, " %s Video Engine Init Failed %d ",__FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitSessionNotInited; } if(mPtrViEBase->CreateChannel(mChannel) == -1) { CSFLogError(logTag, " %s Channel creation Failed %d ",__FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitChannelError; } if(mPtrViENetwork->RegisterSendTransport(mChannel, *this) == -1) { CSFLogError(logTag, "%s ViENetwork Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitTransportRegistrationFail; } if(mPtrViECapture->AllocateExternalCaptureDevice(mCapId, mPtrExtCapture) == -1) { CSFLogError(logTag, "%s Unable to Allocate capture module: %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitCaptureError; } if(mPtrViECapture->ConnectCaptureDevice(mCapId,mChannel) == -1) { CSFLogError(logTag, "%s Unable to Connect capture module: %d ", __FUNCTION__,mPtrViEBase->LastError()); return kMediaConduitCaptureError; } if(mPtrViERender->AddRenderer(mChannel, webrtc::kVideoI420, (webrtc::ExternalRenderer*) this) == -1) { CSFLogError(logTag, "%s Failed to added external renderer ", __FUNCTION__); return kMediaConduitInvalidRenderer; } // Set up some parameters, per juberti. Set MTU. if(mPtrViENetwork->SetMTU(mChannel, 1200) != 0) { CSFLogError(logTag, "%s MTU Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitMTUError; } // Turn on RTCP and loss feedback reporting. if(mPtrRTP->SetRTCPStatus(mChannel, webrtc::kRtcpCompound_RFC4585) != 0) { CSFLogError(logTag, "%s RTCPStatus Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitRTCPStatusError; } } CSFLogError(logTag, "%s Initialization Done", __FUNCTION__); return kMediaConduitNoError; }
MediaConduitErrorCode WebrtcVideoConduit::SendVideoFrame(unsigned char* video_frame, unsigned int video_frame_length, unsigned short width, unsigned short height, VideoType video_type, uint64_t capture_time) { CSFLogDebug(logTag, "%s ", __FUNCTION__); //check for the parameters sanity if(!video_frame || video_frame_length == 0 || width == 0 || height == 0) { CSFLogError(logTag, "%s Invalid Parameters ",__FUNCTION__); MOZ_ASSERT(PR_FALSE); return kMediaConduitMalformedArgument; } webrtc::RawVideoType type; switch (video_type) { case kVideoI420: type = webrtc::kVideoI420; break; case kVideoNV21: type = webrtc::kVideoNV21; break; default: CSFLogError(logTag, "%s VideoType Invalid. Only 1420 and NV21 Supported",__FUNCTION__); MOZ_ASSERT(PR_FALSE); return kMediaConduitMalformedArgument; } //Transmission should be enabled before we insert any frames. if(!mEngineTransmitting) { CSFLogError(logTag, "%s Engine not transmitting ", __FUNCTION__); return kMediaConduitSessionNotInited; } // enforce even width/height (paranoia) MOZ_ASSERT(!(width & 1)); MOZ_ASSERT(!(height & 1)); if (!SelectSendResolution(width, height)) { return kMediaConduitCaptureError; } //insert the frame to video engine in I420 format only if(mPtrExtCapture->IncomingFrame(video_frame, video_frame_length, width, height, type, (unsigned long long)capture_time) == -1) { CSFLogError(logTag, "%s IncomingFrame Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitCaptureError; } CSFLogError(logTag, "%s Inserted A Frame", __FUNCTION__); return kMediaConduitNoError; }
WebrtcVideoConduit::~WebrtcVideoConduit() { NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); CSFLogDebug(logTag, "%s ", __FUNCTION__); for(std::vector<VideoCodecConfig*>::size_type i=0;i < mRecvCodecList.size();i++) { delete mRecvCodecList[i]; } delete mCurSendCodecConfig; // The first one of a pair to be deleted shuts down media for both //Deal with External Capturer if(mPtrViECapture) { if (!mShutDown) { mPtrViECapture->DisconnectCaptureDevice(mCapId); mPtrViECapture->ReleaseCaptureDevice(mCapId); mPtrExtCapture = nullptr; if (mOtherDirection) mOtherDirection->mPtrExtCapture = nullptr; } } //Deal with External Renderer if(mPtrViERender) { if (!mShutDown) { if(mRenderer) { mPtrViERender->StopRender(mChannel); } mPtrViERender->RemoveRenderer(mChannel); } } //Deal with the transport if(mPtrViENetwork) { if (!mShutDown) { mPtrViENetwork->DeregisterSendTransport(mChannel); } } if(mPtrViEBase) { if (!mShutDown) { mPtrViEBase->StopSend(mChannel); mPtrViEBase->StopReceive(mChannel); SyncTo(nullptr); mPtrViEBase->DeleteChannel(mChannel); } } if (mOtherDirection) { // mOtherDirection owns these now! mOtherDirection->mOtherDirection = nullptr; // let other side we terminated the channel mOtherDirection->mShutDown = true; mVideoEngine = nullptr; } else { // We can't delete the VideoEngine until all these are released! // And we can't use a Scoped ptr, since the order is arbitrary mPtrViEBase = nullptr; mPtrViECapture = nullptr; mPtrViECodec = nullptr; mPtrViENetwork = nullptr; mPtrViERender = nullptr; mPtrRTP = nullptr; mPtrExtCodec = nullptr; // only one opener can call Delete. Have it be the last to close. if(mVideoEngine) { webrtc::VideoEngine::Delete(mVideoEngine); } } }
/** * Performs initialization of the MANDATORY components of the Video Engine */ MediaConduitErrorCode WebrtcVideoConduit::Init() { CSFLogDebug(logTag, "%s this=%p", __FUNCTION__, this); #ifdef MOZILLA_INTERNAL_API // already know we must be on MainThread barring unit test weirdness MOZ_ASSERT(NS_IsMainThread()); nsresult rv; nsCOMPtr<nsIPrefService> prefs = do_GetService("@mozilla.org/preferences-service;1", &rv); if (!NS_WARN_IF(NS_FAILED(rv))) { nsCOMPtr<nsIPrefBranch> branch = do_QueryInterface(prefs); if (branch) { int32_t temp; (void) NS_WARN_IF(NS_FAILED(branch->GetBoolPref("media.video.test_latency", &mVideoLatencyTestEnable))); (void) NS_WARN_IF(NS_FAILED(branch->GetIntPref("media.peerconnection.video.min_bitrate", &temp))); if (temp >= 0) { mMinBitrate = temp; } (void) NS_WARN_IF(NS_FAILED(branch->GetIntPref("media.peerconnection.video.start_bitrate", &temp))); if (temp >= 0) { mStartBitrate = temp; } (void) NS_WARN_IF(NS_FAILED(branch->GetIntPref("media.peerconnection.video.max_bitrate", &temp))); if (temp >= 0) { mMaxBitrate = temp; } bool use_loadmanager = false; (void) NS_WARN_IF(NS_FAILED(branch->GetBoolPref("media.navigator.load_adapt", &use_loadmanager))); if (use_loadmanager) { mLoadManager = LoadManagerBuild(); } } } #endif #ifdef MOZ_WIDGET_ANDROID // get the JVM JavaVM *jvm = jsjni_GetVM(); if (webrtc::VideoEngine::SetAndroidObjects(jvm) != 0) { CSFLogError(logTag, "%s: could not set Android objects", __FUNCTION__); return kMediaConduitSessionNotInited; } #endif // Per WebRTC APIs below function calls return nullptr on failure mVideoEngine = webrtc::VideoEngine::Create(); if(!mVideoEngine) { CSFLogError(logTag, "%s Unable to create video engine ", __FUNCTION__); return kMediaConduitSessionNotInited; } EnableWebRtcLog(); if( !(mPtrViEBase = ViEBase::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get video base interface ", __FUNCTION__); return kMediaConduitSessionNotInited; } if( !(mPtrViECapture = ViECapture::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get video capture interface", __FUNCTION__); return kMediaConduitSessionNotInited; } if( !(mPtrViECodec = ViECodec::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get video codec interface ", __FUNCTION__); return kMediaConduitSessionNotInited; } if( !(mPtrViENetwork = ViENetwork::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get video network interface ", __FUNCTION__); return kMediaConduitSessionNotInited; } if( !(mPtrViERender = ViERender::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get video render interface ", __FUNCTION__); return kMediaConduitSessionNotInited; } mPtrExtCodec = webrtc::ViEExternalCodec::GetInterface(mVideoEngine); if (!mPtrExtCodec) { CSFLogError(logTag, "%s Unable to get external codec interface: %d ", __FUNCTION__,mPtrViEBase->LastError()); return kMediaConduitSessionNotInited; } if( !(mPtrRTP = webrtc::ViERTP_RTCP::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get video RTCP interface ", __FUNCTION__); return kMediaConduitSessionNotInited; } if ( !(mPtrExtCodec = webrtc::ViEExternalCodec::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get external codec interface %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitSessionNotInited; } CSFLogDebug(logTag, "%s Engine Created: Init'ng the interfaces ",__FUNCTION__); if(mPtrViEBase->Init() == -1) { CSFLogError(logTag, " %s Video Engine Init Failed %d ",__FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitSessionNotInited; } if(mPtrViEBase->CreateChannel(mChannel) == -1) { CSFLogError(logTag, " %s Channel creation Failed %d ",__FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitChannelError; } if(mPtrViENetwork->RegisterSendTransport(mChannel, *this) == -1) { CSFLogError(logTag, "%s ViENetwork Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitTransportRegistrationFail; } if(mPtrViECapture->AllocateExternalCaptureDevice(mCapId, mPtrExtCapture) == -1) { CSFLogError(logTag, "%s Unable to Allocate capture module: %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitCaptureError; } if(mPtrViECapture->ConnectCaptureDevice(mCapId,mChannel) == -1) { CSFLogError(logTag, "%s Unable to Connect capture module: %d ", __FUNCTION__,mPtrViEBase->LastError()); return kMediaConduitCaptureError; } // Set up some parameters, per juberti. Set MTU. if(mPtrViENetwork->SetMTU(mChannel, 1200) != 0) { CSFLogError(logTag, "%s MTU Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitMTUError; } // Turn on RTCP and loss feedback reporting. if(mPtrRTP->SetRTCPStatus(mChannel, webrtc::kRtcpCompound_RFC4585) != 0) { CSFLogError(logTag, "%s RTCPStatus Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitRTCPStatusError; } if (mPtrViERender->AddRenderer(mChannel, webrtc::kVideoI420, (webrtc::ExternalRenderer*) this) == -1) { CSFLogError(logTag, "%s Failed to added external renderer ", __FUNCTION__); return kMediaConduitInvalidRenderer; } if (mLoadManager) { mPtrViEBase->RegisterCpuOveruseObserver(mChannel, mLoadManager); mPtrViEBase->SetLoadManager(mLoadManager); } CSFLogError(logTag, "%s Initialization Done", __FUNCTION__); return kMediaConduitNoError; }
/* * WebRTCAudioConduit Implementation */ MediaConduitErrorCode WebrtcAudioConduit::Init(WebrtcAudioConduit *other) { CSFLogDebug(logTag, "%s this=%p other=%p", __FUNCTION__, this, other); if (other) { MOZ_ASSERT(!other->mOtherDirection); other->mOtherDirection = this; mOtherDirection = other; // only one can call ::Create()/GetVoiceEngine() MOZ_ASSERT(other->mVoiceEngine); mVoiceEngine = other->mVoiceEngine; } else { #ifdef MOZ_WIDGET_ANDROID jobject context = jsjni_GetGlobalContextRef(); // get the JVM JavaVM *jvm = jsjni_GetVM(); JNIEnv* env; if (jvm->GetEnv((void**)&env, JNI_VERSION_1_4) != JNI_OK) { CSFLogError(logTag, "%s: could not get Java environment", __FUNCTION__); return kMediaConduitSessionNotInited; } jvm->AttachCurrentThread(&env, NULL); if (webrtc::VoiceEngine::SetAndroidObjects(jvm, (void*)context) != 0) { CSFLogError(logTag, "%s Unable to set Android objects", __FUNCTION__); return kMediaConduitSessionNotInited; } env->DeleteGlobalRef(context); #endif //Per WebRTC APIs below function calls return NULL on failure if(!(mVoiceEngine = webrtc::VoiceEngine::Create())) { CSFLogError(logTag, "%s Unable to create voice engine", __FUNCTION__); return kMediaConduitSessionNotInited; } PRLogModuleInfo *logs = GetWebRTCLogInfo(); if (!gWebrtcTraceLoggingOn && logs && logs->level > 0) { // no need to a critical section or lock here gWebrtcTraceLoggingOn = 1; const char *file = PR_GetEnv("WEBRTC_TRACE_FILE"); if (!file) { file = "WebRTC.log"; } CSFLogDebug(logTag, "%s Logging webrtc to %s level %d", __FUNCTION__, file, logs->level); mVoiceEngine->SetTraceFilter(logs->level); mVoiceEngine->SetTraceFile(file); } } if(!(mPtrVoEBase = VoEBase::GetInterface(mVoiceEngine))) { CSFLogError(logTag, "%s Unable to initialize VoEBase", __FUNCTION__); return kMediaConduitSessionNotInited; } if(!(mPtrVoENetwork = VoENetwork::GetInterface(mVoiceEngine))) { CSFLogError(logTag, "%s Unable to initialize VoENetwork", __FUNCTION__); return kMediaConduitSessionNotInited; } if(!(mPtrVoECodec = VoECodec::GetInterface(mVoiceEngine))) { CSFLogError(logTag, "%s Unable to initialize VoEBCodec", __FUNCTION__); return kMediaConduitSessionNotInited; } if(!(mPtrVoEProcessing = VoEAudioProcessing::GetInterface(mVoiceEngine))) { CSFLogError(logTag, "%s Unable to initialize VoEProcessing", __FUNCTION__); return kMediaConduitSessionNotInited; } if(!(mPtrVoEXmedia = VoEExternalMedia::GetInterface(mVoiceEngine))) { CSFLogError(logTag, "%s Unable to initialize VoEExternalMedia", __FUNCTION__); return kMediaConduitSessionNotInited; } if (other) { mChannel = other->mChannel; } else { // init the engine with our audio device layer if(mPtrVoEBase->Init() == -1) { CSFLogError(logTag, "%s VoiceEngine Base Not Initialized", __FUNCTION__); return kMediaConduitSessionNotInited; } if( (mChannel = mPtrVoEBase->CreateChannel()) == -1) { CSFLogError(logTag, "%s VoiceEngine Channel creation failed",__FUNCTION__); return kMediaConduitChannelError; } CSFLogDebug(logTag, "%s Channel Created %d ",__FUNCTION__, mChannel); if(mPtrVoENetwork->RegisterExternalTransport(mChannel, *this) == -1) { CSFLogError(logTag, "%s VoiceEngine, External Transport Failed",__FUNCTION__); return kMediaConduitTransportRegistrationFail; } if(mPtrVoEXmedia->SetExternalRecordingStatus(true) == -1) { CSFLogError(logTag, "%s SetExternalRecordingStatus Failed %d",__FUNCTION__, mPtrVoEBase->LastError()); return kMediaConduitExternalPlayoutError; } if(mPtrVoEXmedia->SetExternalPlayoutStatus(true) == -1) { CSFLogError(logTag, "%s SetExternalPlayoutStatus Failed %d ",__FUNCTION__, mPtrVoEBase->LastError()); return kMediaConduitExternalRecordingError; } CSFLogDebug(logTag , "%s AudioSessionConduit Initialization Done (%p)",__FUNCTION__, this); } return kMediaConduitNoError; }
/** * Note: Setting the send-codec on the Video Engine will restart the encoder, * sets up new SSRC and reset RTP_RTCP module with the new codec setting. */ MediaConduitErrorCode WebrtcVideoConduit::ConfigureSendMediaCodec(const VideoCodecConfig* codecConfig) { CSFLogDebug(logTag, "%s for %s", __FUNCTION__, codecConfig ? codecConfig->mName.c_str() : "<null>"); bool codecFound = false; MediaConduitErrorCode condError = kMediaConduitNoError; int error = 0; //webrtc engine errors webrtc::VideoCodec video_codec; std::string payloadName; memset(&video_codec, 0, sizeof(video_codec)); //validate basic params if((condError = ValidateCodecConfig(codecConfig,true)) != kMediaConduitNoError) { return condError; } //Check if we have same codec already applied if(CheckCodecsForMatch(mCurSendCodecConfig, codecConfig)) { CSFLogDebug(logTag, "%s Codec has been applied already ", __FUNCTION__); } condError = StopTransmitting(); if (condError != kMediaConduitNoError) { return condError; } if (mExternalSendCodec && codecConfig->mType == mExternalSendCodec->mType) { CSFLogError(logTag, "%s Configuring External H264 Send Codec", __FUNCTION__); // width/height will be overridden on the first frame video_codec.width = 320; video_codec.height = 240; #ifdef MOZ_WEBRTC_OMX if (codecConfig->mType == webrtc::kVideoCodecH264) { video_codec.resolution_divisor = 16; } else { video_codec.resolution_divisor = 1; // We could try using it to handle odd resolutions } #else video_codec.resolution_divisor = 1; // We could try using it to handle odd resolutions #endif video_codec.qpMax = 56; video_codec.numberOfSimulcastStreams = 1; video_codec.mode = webrtc::kRealtimeVideo; codecFound = true; } else { // we should be good here to set the new codec. for(int idx=0; idx < mPtrViECodec->NumberOfCodecs(); idx++) { if(0 == mPtrViECodec->GetCodec(idx, video_codec)) { payloadName = video_codec.plName; if(codecConfig->mName.compare(payloadName) == 0) { // Note: side-effect of this is that video_codec is filled in // by GetCodec() codecFound = true; break; } } }//for } if(codecFound == false) { CSFLogError(logTag, "%s Codec Mismatch ", __FUNCTION__); return kMediaConduitInvalidSendCodec; } // Note: only for overriding parameters from GetCodec()! CodecConfigToWebRTCCodec(codecConfig, video_codec); if(mPtrViECodec->SetSendCodec(mChannel, video_codec) == -1) { error = mPtrViEBase->LastError(); if(error == kViECodecInvalidCodec) { CSFLogError(logTag, "%s Invalid Send Codec", __FUNCTION__); return kMediaConduitInvalidSendCodec; } CSFLogError(logTag, "%s SetSendCodec Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitUnknownError; } if (!mVideoCodecStat) { mVideoCodecStat = new VideoCodecStatistics(mChannel, mPtrViECodec, true); } mSendingWidth = 0; mSendingHeight = 0; mSendingFramerate = video_codec.maxFramerate; if(codecConfig->RtcpFbNackIsSet("")) { CSFLogDebug(logTag, "Enabling NACK (send) for video stream\n"); if (mPtrRTP->SetNACKStatus(mChannel, true) != 0) { CSFLogError(logTag, "%s NACKStatus Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitNACKStatusError; } } condError = StartTransmitting(); if (condError != kMediaConduitNoError) { return condError; } //Copy the applied config for future reference. delete mCurSendCodecConfig; mCurSendCodecConfig = new VideoCodecConfig(*codecConfig); mPtrRTP->SetRembStatus(mChannel, true, false); return kMediaConduitNoError; }
MediaConduitErrorCode WebrtcAudioConduit::ConfigureRecvMediaCodecs( const std::vector<AudioCodecConfig*>& codecConfigList) { CSFLogDebug(logTag, "%s ", __FUNCTION__); MediaConduitErrorCode condError = kMediaConduitNoError; int error = 0; //webrtc engine errors bool success = false; // are we receiving already. If so, stop receiving and playout // since we can't apply new recv codec when the engine is playing if(mEngineReceiving) { CSFLogDebug(logTag, "%s Engine Already Receiving. Attemping to Stop ", __FUNCTION__); // AudioEngine doesn't fail fatal on stop reception. Ref:voe_errors.h. // hence we need-not be strict in failing here on error mPtrVoEBase->StopReceive(mChannel); CSFLogDebug(logTag, "%s Attemping to Stop playout ", __FUNCTION__); if(mPtrVoEBase->StopPlayout(mChannel) == -1) { if( mPtrVoEBase->LastError() == VE_CANNOT_STOP_PLAYOUT) { CSFLogDebug(logTag, "%s Stop-Playout Failed %d", __FUNCTION__, mPtrVoEBase->LastError()); return kMediaConduitPlayoutError; } } } mEngineReceiving = false; if(!codecConfigList.size()) { CSFLogError(logTag, "%s Zero number of codecs to configure", __FUNCTION__); return kMediaConduitMalformedArgument; } //Try Applying the codecs in the list for(std::vector<AudioCodecConfig*>::size_type i=0 ;i<codecConfigList.size();i++) { //if the codec param is invalid or diplicate, return error if((condError = ValidateCodecConfig(codecConfigList[i],false)) != kMediaConduitNoError) { return condError; } webrtc::CodecInst cinst; if(!CodecConfigToWebRTCCodec(codecConfigList[i],cinst)) { CSFLogError(logTag,"%s CodecConfig to WebRTC Codec Failed ",__FUNCTION__); continue; } if(mPtrVoECodec->SetRecPayloadType(mChannel,cinst) == -1) { error = mPtrVoEBase->LastError(); CSFLogError(logTag, "%s SetRecvCodec Failed %d ",__FUNCTION__, error); continue; } else { CSFLogDebug(logTag, "%s Successfully Set RecvCodec %s", __FUNCTION__, codecConfigList[i]->mName.c_str()); //copy this to local database if(CopyCodecToDB(codecConfigList[i])) { success = true; } else { CSFLogError(logTag,"%s Unable to updated Codec Database", __FUNCTION__); return kMediaConduitUnknownError; } } } //end for //Success == false indicates none of the codec was applied if(!success) { CSFLogError(logTag, "%s Setting Receive Codec Failed ", __FUNCTION__); return kMediaConduitInvalidReceiveCodec; } //If we are here, atleast one codec should have been set if(mPtrVoEBase->StartReceive(mChannel) == -1) { error = mPtrVoEBase->LastError(); CSFLogError(logTag , "%s StartReceive Failed %d ",__FUNCTION__, error); if(error == VE_RECV_SOCKET_ERROR) { return kMediaConduitSocketError; } return kMediaConduitUnknownError; } if(mPtrVoEBase->StartPlayout(mChannel) == -1) { CSFLogError(logTag, "%s Starting playout Failed", __FUNCTION__); return kMediaConduitPlayoutError; } //we should be good here for setting this. mEngineReceiving = true; DumpCodecDB(); return kMediaConduitNoError; }
/** * Performs initialization of the MANDATORY components of the Video Engine */ MediaConduitErrorCode WebrtcVideoConduit::Init(WebrtcVideoConduit *other) { CSFLogDebug(logTag, "%s this=%p other=%p", __FUNCTION__, this, other); #ifdef MOZILLA_INTERNAL_API // already know we must be on MainThread barring unit test weirdness MOZ_ASSERT(NS_IsMainThread()); nsresult rv; nsCOMPtr<nsIPrefService> prefs = do_GetService("@mozilla.org/preferences-service;1", &rv); if (!NS_WARN_IF(NS_FAILED(rv))) { nsCOMPtr<nsIPrefBranch> branch = do_QueryInterface(prefs); if (branch) { int32_t temp; NS_WARN_IF(NS_FAILED(branch->GetBoolPref("media.video.test_latency", &mVideoLatencyTestEnable))); NS_WARN_IF(NS_FAILED(branch->GetIntPref("media.peerconnection.video.min_bitrate", &temp))); if (temp >= 0) { mMinBitrate = temp; } NS_WARN_IF(NS_FAILED(branch->GetIntPref("media.peerconnection.video.start_bitrate", &temp))); if (temp >= 0) { mStartBitrate = temp; } NS_WARN_IF(NS_FAILED(branch->GetIntPref("media.peerconnection.video.max_bitrate", &temp))); if (temp >= 0) { mMaxBitrate = temp; } } } #endif if (other) { MOZ_ASSERT(!other->mOtherDirection); other->mOtherDirection = this; mOtherDirection = other; // only one can call ::Create()/GetVideoEngine() MOZ_ASSERT(other->mVideoEngine); mVideoEngine = other->mVideoEngine; } else { #ifdef MOZ_WIDGET_ANDROID // get the JVM JavaVM *jvm = jsjni_GetVM(); if (webrtc::VideoEngine::SetAndroidObjects(jvm) != 0) { CSFLogError(logTag, "%s: could not set Android objects", __FUNCTION__); return kMediaConduitSessionNotInited; } #endif // Per WebRTC APIs below function calls return nullptr on failure if( !(mVideoEngine = webrtc::VideoEngine::Create()) ) { CSFLogError(logTag, "%s Unable to create video engine ", __FUNCTION__); return kMediaConduitSessionNotInited; } PRLogModuleInfo *logs = GetWebRTCLogInfo(); if (!gWebrtcTraceLoggingOn && logs && logs->level > 0) { // no need to a critical section or lock here gWebrtcTraceLoggingOn = 1; const char *file = PR_GetEnv("WEBRTC_TRACE_FILE"); if (!file) { file = "WebRTC.log"; } CSFLogDebug(logTag, "%s Logging webrtc to %s level %d", __FUNCTION__, file, logs->level); mVideoEngine->SetTraceFilter(logs->level); mVideoEngine->SetTraceFile(file); } } if( !(mPtrViEBase = ViEBase::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get video base interface ", __FUNCTION__); return kMediaConduitSessionNotInited; } if( !(mPtrViECapture = ViECapture::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get video capture interface", __FUNCTION__); return kMediaConduitSessionNotInited; } if( !(mPtrViECodec = ViECodec::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get video codec interface ", __FUNCTION__); return kMediaConduitSessionNotInited; } if( !(mPtrViENetwork = ViENetwork::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get video network interface ", __FUNCTION__); return kMediaConduitSessionNotInited; } if( !(mPtrViERender = ViERender::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get video render interface ", __FUNCTION__); return kMediaConduitSessionNotInited; } if( !(mPtrRTP = webrtc::ViERTP_RTCP::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get video RTCP interface ", __FUNCTION__); return kMediaConduitSessionNotInited; } if ( !(mPtrExtCodec = webrtc::ViEExternalCodec::GetInterface(mVideoEngine))) { CSFLogError(logTag, "%s Unable to get external codec interface %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitSessionNotInited; } if (other) { mChannel = other->mChannel; mPtrExtCapture = other->mPtrExtCapture; mCapId = other->mCapId; } else { CSFLogDebug(logTag, "%s Engine Created: Init'ng the interfaces ",__FUNCTION__); if(mPtrViEBase->Init() == -1) { CSFLogError(logTag, " %s Video Engine Init Failed %d ",__FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitSessionNotInited; } if(mPtrViEBase->CreateChannel(mChannel) == -1) { CSFLogError(logTag, " %s Channel creation Failed %d ",__FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitChannelError; } if(mPtrViENetwork->RegisterSendTransport(mChannel, *this) == -1) { CSFLogError(logTag, "%s ViENetwork Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitTransportRegistrationFail; } if(mPtrViECapture->AllocateExternalCaptureDevice(mCapId, mPtrExtCapture) == -1) { CSFLogError(logTag, "%s Unable to Allocate capture module: %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitCaptureError; } if(mPtrViECapture->ConnectCaptureDevice(mCapId,mChannel) == -1) { CSFLogError(logTag, "%s Unable to Connect capture module: %d ", __FUNCTION__,mPtrViEBase->LastError()); return kMediaConduitCaptureError; } if(mPtrViERender->AddRenderer(mChannel, webrtc::kVideoI420, (webrtc::ExternalRenderer*) this) == -1) { CSFLogError(logTag, "%s Failed to added external renderer ", __FUNCTION__); return kMediaConduitInvalidRenderer; } // Set up some parameters, per juberti. Set MTU. if(mPtrViENetwork->SetMTU(mChannel, 1200) != 0) { CSFLogError(logTag, "%s MTU Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitMTUError; } // Turn on RTCP and loss feedback reporting. if(mPtrRTP->SetRTCPStatus(mChannel, webrtc::kRtcpCompound_RFC4585) != 0) { CSFLogError(logTag, "%s RTCPStatus Failed %d ", __FUNCTION__, mPtrViEBase->LastError()); return kMediaConduitRTCPStatusError; } } CSFLogError(logTag, "%s Initialization Done", __FUNCTION__); return kMediaConduitNoError; }