nsresult PeerConnectionMedia::Init(const std::vector<NrIceStunServer>& stun_servers) { mMainThread = mParent->GetMainThread(); mSTSThread = mParent->GetSTSThread(); // TODO([email protected]): need some way to set not offerer later // Looks like a bug in the NrIceCtx API. mIceCtx = NrIceCtx::Create("PC:" + mParent->GetHandle(), true); if(!mIceCtx) { CSFLogError(logTag, "%s: Failed to create Ice Context", __FUNCTION__); return NS_ERROR_FAILURE; } nsresult rv; if (NS_FAILED(rv = mIceCtx->SetStunServers(stun_servers))) { CSFLogError(logTag, "%s: Failed to set stun servers", __FUNCTION__); return rv; } if (NS_FAILED(rv = mDNSResolver->Init())) { CSFLogError(logTag, "%s: Failed to initialize dns resolver", __FUNCTION__); return rv; } if (NS_FAILED(rv = mIceCtx->SetResolver(mDNSResolver->AllocateResolver()))) { CSFLogError(logTag, "%s: Failed to get dns resolver", __FUNCTION__); return rv; } mIceCtx->SignalGatheringCompleted.connect(this, &PeerConnectionMedia::IceGatheringCompleted); mIceCtx->SignalCompleted.connect(this, &PeerConnectionMedia::IceCompleted); mIceCtx->SignalFailed.connect(this, &PeerConnectionMedia::IceFailed); // Create three streams to start with. // One each for audio, video and DataChannel // TODO: this will be re-visited RefPtr<NrIceMediaStream> audioStream = mIceCtx->CreateStream("stream1", 2); RefPtr<NrIceMediaStream> videoStream = mIceCtx->CreateStream("stream2", 2); RefPtr<NrIceMediaStream> dcStream = mIceCtx->CreateStream("stream3", 2); if (!audioStream) { CSFLogError(logTag, "%s: audio stream is NULL", __FUNCTION__); return NS_ERROR_FAILURE; } else { mIceStreams.push_back(audioStream); } if (!videoStream) { CSFLogError(logTag, "%s: video stream is NULL", __FUNCTION__); return NS_ERROR_FAILURE; } else { mIceStreams.push_back(videoStream); } if (!dcStream) { CSFLogError(logTag, "%s: datachannel stream is NULL", __FUNCTION__); return NS_ERROR_FAILURE; } else { mIceStreams.push_back(dcStream); } // TODO([email protected]): This is not connected to the PCCimpl. // Will need to do that later. for (std::size_t i=0; i<mIceStreams.size(); i++) { mIceStreams[i]->SignalReady.connect(this, &PeerConnectionMedia::IceStreamReady); } // TODO([email protected]): When we have a generic error reporting mechanism, // figure out how to report that StartGathering failed. Bug 827982. RUN_ON_THREAD(mIceCtx->thread(), WrapRunnable(mIceCtx, &NrIceCtx::StartGathering), NS_DISPATCH_NORMAL); return NS_OK; }
nsresult MediaEngineWebRTCVideoSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile) { /** * To get a Snapshot we do the following: * - Set a condition variable (mInSnapshotMode) to true * - Attach the external renderer and start the camera * - Wait for the condition variable to change to false * * Starting the camera has the effect of invoking DeliverFrame() when * the first frame arrives from the camera. We only need one frame for * GetCaptureDeviceSnapshot to work, so we immediately set the condition * variable to false and notify this method. * * This causes the current thread to continue (PR_CondWaitVar will return), * at which point we can grab a snapshot, convert it to a file and * return from this function after cleaning up the temporary stream object * and caling Stop() on the media source. */ #ifdef MOZ_B2G_CAMERA ReentrantMonitorAutoEnter sync(mCallbackMonitor); #endif *aFile = nullptr; if (!mInitDone || mState != kAllocated) { return NS_ERROR_FAILURE; } #ifdef MOZ_B2G_CAMERA mLastCapture = nullptr; NS_DispatchToMainThread(WrapRunnable(this, &MediaEngineWebRTCVideoSource::StartImpl, mCapability)); mCallbackMonitor.Wait(); if (mState != kStarted) { return NS_ERROR_FAILURE; } NS_DispatchToMainThread(WrapRunnable(this, &MediaEngineWebRTCVideoSource::SnapshotImpl)); mCallbackMonitor.Wait(); if (mLastCapture == nullptr) return NS_ERROR_FAILURE; mState = kStopped; NS_DispatchToMainThread(WrapRunnable(this, &MediaEngineWebRTCVideoSource::StopImpl)); // The camera return nsDOMMemoryFile indeed, and the inheritance tree is: // nsIDOMBlob <- nsIDOMFile <- nsDOMFileBase <- nsDOMFile <- nsDOMMemoryFile *aFile = mLastCapture.get(); return NS_OK; #else { MonitorAutoLock lock(mMonitor); mInSnapshotMode = true; } // Start the rendering (equivalent to calling Start(), but without a track). int error = 0; if (!mInitDone || mState != kAllocated) { return NS_ERROR_FAILURE; } error = mViERender->AddRenderer(mCaptureIndex, webrtc::kVideoI420, (webrtc::ExternalRenderer*)this); if (error == -1) { return NS_ERROR_FAILURE; } error = mViERender->StartRender(mCaptureIndex); if (error == -1) { return NS_ERROR_FAILURE; } if (mViECapture->StartCapture(mCaptureIndex, mCapability) < 0) { return NS_ERROR_FAILURE; } // Wait for the condition variable, will be set in DeliverFrame. // We use a while loop, because even if Wait() returns, it's not // guaranteed that the condition variable changed. // FIX: we need need a way to cancel this and to bail if it appears to not be working // Perhaps a maximum time, though some cameras can take seconds to start. 10 seconds? { MonitorAutoLock lock(mMonitor); while (mInSnapshotMode) { lock.Wait(); } } // If we get here, DeliverFrame received at least one frame. webrtc::ViEFile* vieFile = webrtc::ViEFile::GetInterface(mVideoEngine); if (!vieFile) { return NS_ERROR_FAILURE; } // Create a temporary file on the main thread and put the snapshot in it. // See Run() in MediaEngineWebRTCVideo.h (sets mSnapshotPath). NS_DispatchToMainThread(this, NS_DISPATCH_SYNC); if (!mSnapshotPath) { return NS_ERROR_FAILURE; } NS_ConvertUTF16toUTF8 path(*mSnapshotPath); if (vieFile->GetCaptureDeviceSnapshot(mCaptureIndex, path.get()) < 0) { delete mSnapshotPath; mSnapshotPath = NULL; return NS_ERROR_FAILURE; } // Stop the camera. mViERender->StopRender(mCaptureIndex); mViERender->RemoveRenderer(mCaptureIndex); nsCOMPtr<nsIFile> file; nsresult rv = NS_NewLocalFile(*mSnapshotPath, false, getter_AddRefs(file)); delete mSnapshotPath; mSnapshotPath = NULL; NS_ENSURE_SUCCESS(rv, rv); NS_ADDREF(*aFile = new nsDOMFileFile(file)); #endif return NS_OK; }
nsresult PeerConnectionMedia::Init(const std::vector<NrIceStunServer>& stun_servers, const std::vector<NrIceTurnServer>& turn_servers) { // TODO([email protected]): need some way to set not offerer later // Looks like a bug in the NrIceCtx API. mIceCtx = NrIceCtx::Create("PC:" + mParent->GetName(), true); if(!mIceCtx) { CSFLogError(logTag, "%s: Failed to create Ice Context", __FUNCTION__); return NS_ERROR_FAILURE; } nsresult rv; if (NS_FAILED(rv = mIceCtx->SetStunServers(stun_servers))) { CSFLogError(logTag, "%s: Failed to set stun servers", __FUNCTION__); return rv; } // Give us a way to globally turn off TURN support #ifdef MOZILLA_INTERNAL_API bool disabled = Preferences::GetBool("media.peerconnection.turn.disable", false); #else bool disabled = false; #endif if (!disabled) { if (NS_FAILED(rv = mIceCtx->SetTurnServers(turn_servers))) { CSFLogError(logTag, "%s: Failed to set turn servers", __FUNCTION__); return rv; } } else if (turn_servers.size() != 0) { CSFLogError(logTag, "%s: Setting turn servers disabled", __FUNCTION__); } if (NS_FAILED(rv = mDNSResolver->Init())) { CSFLogError(logTag, "%s: Failed to initialize dns resolver", __FUNCTION__); return rv; } if (NS_FAILED(rv = mIceCtx->SetResolver(mDNSResolver->AllocateResolver()))) { CSFLogError(logTag, "%s: Failed to get dns resolver", __FUNCTION__); return rv; } mIceCtx->SignalGatheringStateChange.connect( this, &PeerConnectionMedia::IceGatheringStateChange); mIceCtx->SignalConnectionStateChange.connect( this, &PeerConnectionMedia::IceConnectionStateChange); // Create three streams to start with. // One each for audio, video and DataChannel // TODO: this will be re-visited RefPtr<NrIceMediaStream> audioStream = mIceCtx->CreateStream((mParent->GetName()+": stream1/audio").c_str(), 2); RefPtr<NrIceMediaStream> videoStream = mIceCtx->CreateStream((mParent->GetName()+": stream2/video").c_str(), 2); RefPtr<NrIceMediaStream> dcStream = mIceCtx->CreateStream((mParent->GetName()+": stream3/data").c_str(), 2); if (!audioStream) { CSFLogError(logTag, "%s: audio stream is NULL", __FUNCTION__); return NS_ERROR_FAILURE; } else { mIceStreams.push_back(audioStream); } if (!videoStream) { CSFLogError(logTag, "%s: video stream is NULL", __FUNCTION__); return NS_ERROR_FAILURE; } else { mIceStreams.push_back(videoStream); } if (!dcStream) { CSFLogError(logTag, "%s: datachannel stream is NULL", __FUNCTION__); return NS_ERROR_FAILURE; } else { mIceStreams.push_back(dcStream); } // TODO([email protected]): This is not connected to the PCCimpl. // Will need to do that later. for (std::size_t i=0; i<mIceStreams.size(); i++) { mIceStreams[i]->SignalReady.connect(this, &PeerConnectionMedia::IceStreamReady); } // TODO([email protected]): When we have a generic error reporting mechanism, // figure out how to report that StartGathering failed. Bug 827982. RUN_ON_THREAD(mIceCtx->thread(), WrapRunnable(mIceCtx, &NrIceCtx::StartGathering), NS_DISPATCH_NORMAL); return NS_OK; }
void MediaEngineWebRTCAudioSource::Process(int channel, webrtc::ProcessingTypes type, sample* audio10ms, int length, int samplingFreq, bool isStereo) { // On initial capture, throw away all far-end data except the most recent sample // since it's already irrelevant and we want to keep avoid confusing the AEC far-end // input code with "old" audio. if (!mStarted) { mStarted = true; while (gFarendObserver->Size() > 1) { moz_free(gFarendObserver->Pop()); // only call if size() > 0 } } while (gFarendObserver->Size() > 0) { FarEndAudioChunk *buffer = gFarendObserver->Pop(); // only call if size() > 0 if (buffer) { int length = buffer->mSamples; int res = mVoERender->ExternalPlayoutData(buffer->mData, gFarendObserver->PlayoutFrequency(), gFarendObserver->PlayoutChannels(), mPlayoutDelay, length); moz_free(buffer); if (res == -1) { return; } } } #ifdef PR_LOGGING mSamples += length; if (mSamples > samplingFreq) { mSamples %= samplingFreq; // just in case mSamples >> samplingFreq if (PR_LOG_TEST(GetMediaManagerLog(), PR_LOG_DEBUG)) { webrtc::EchoStatistics echo; mVoECallReport->GetEchoMetricSummary(echo); #define DUMP_STATVAL(x) (x).min, (x).max, (x).average LOG(("Echo: ERL: %d/%d/%d, ERLE: %d/%d/%d, RERL: %d/%d/%d, NLP: %d/%d/%d", DUMP_STATVAL(echo.erl), DUMP_STATVAL(echo.erle), DUMP_STATVAL(echo.rerl), DUMP_STATVAL(echo.a_nlp))); } } #endif MonitorAutoLock lock(mMonitor); if (mState != kStarted) return; uint32_t len = mSources.Length(); for (uint32_t i = 0; i < len; i++) { nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample)); sample* dest = static_cast<sample*>(buffer->Data()); memcpy(dest, audio10ms, length * sizeof(sample)); nsAutoPtr<AudioSegment> segment(new AudioSegment()); nsAutoTArray<const sample*,1> channels; channels.AppendElement(dest); segment->AppendFrames(buffer.forget(), channels, length); TimeStamp insertTime; segment->GetStartTime(insertTime); if (mSources[i]) { // Make sure we include the stream and the track. // The 0:1 is a flag to note when we've done the final insert for a given input block. LogTime(AsyncLatencyLogger::AudioTrackInsertion, LATENCY_STREAM_ID(mSources[i], mTrackID), (i+1 < len) ? 0 : 1, insertTime); // This is safe from any thread, and is safe if the track is Finished // or Destroyed. // Note: due to evil magic, the nsAutoPtr<AudioSegment>'s ownership transfers to // the Runnable (AutoPtr<> = AutoPtr<>) RUN_ON_THREAD(mThread, WrapRunnable(mSources[i], &SourceMediaStream::AppendToTrack, mTrackID, segment, (AudioSegment *) nullptr), NS_DISPATCH_NORMAL); } } return; }
~TestNrSocketTest() { sts_->Dispatch(WrapRunnable(this, &TestNrSocketTest::TearDown_s), NS_DISPATCH_SYNC); }