void PeerConnectionManager::AddStreams(webrtc::PeerConnectionInterface* peer_connection, const std::string & url) { cricket::VideoCapturer* capturer = OpenVideoCaptureDevice(url); if (!capturer) { LOG(LS_ERROR) << "Cannot create capturer " << url; } else { VideoCapturerListener listener(capturer); rtc::scoped_refptr<webrtc::VideoSourceInterface> source = peer_connection_factory_->CreateVideoSource(capturer, NULL); rtc::scoped_refptr<webrtc::VideoTrackInterface> video_track(peer_connection_factory_->CreateVideoTrack(kVideoLabel, source)); rtc::scoped_refptr<webrtc::MediaStreamInterface> stream = peer_connection_factory_->CreateLocalMediaStream(kStreamLabel); if (!stream.get()) { LOG(LS_ERROR) << "Cannot create stream"; } else { stream->AddTrack(video_track); if (!peer_connection->AddStream(stream)) { LOG(LS_ERROR) << "Adding stream to PeerConnection failed"; } } } }
bool Init( const std::string kind, const std::string label, talk_base::scoped_refptr<webrtc::PeerConnectionFactoryInterface> pc_factory, talk_base::scoped_refptr<webrtc::MediaStreamTrackInterface> track) { m_track = track; if (m_track != NULL) { if (kind == kAudioKind) m_source = ((webrtc::AudioTrackInterface *)m_track.get())->GetSource(); else m_source = ((webrtc::VideoTrackInterface *)m_track.get())->GetSource(); return true; } returnv_assert(pc_factory.get(), false); if (kind == kAudioKind) { if (!m_source.get()) { WebrtcMediaConstraints constraints; if (m_constraints.mtype == XRTC_AUDIO && m_constraints.ptr) { audio_constraints_t *audio = (audio_constraints_t *)m_constraints.ptr; if (audio->aec.valid) constraints.AddItem(webrtc::MediaConstraintsInterface::kEchoCancellation, audio->aec.val, audio->aec.optional); if (audio->agc.valid) constraints.AddItem(webrtc::MediaConstraintsInterface::kAutoGainControl, audio->agc.val, audio->agc.optional); if (audio->ns.valid) constraints.AddItem(webrtc::MediaConstraintsInterface::kNoiseSuppression, audio->ns.val, audio->ns.optional); if (audio->highPassFilter.valid) constraints.AddItem(webrtc::MediaConstraintsInterface::kHighpassFilter, audio->highPassFilter.val, audio->highPassFilter.optional); } m_source = pc_factory->CreateAudioSource(&constraints); } m_track = pc_factory->CreateAudioTrack(label, (webrtc::AudioSourceInterface *)(m_source.get())); }else if (kind == kVideoKind) { if (!m_source.get()) { std::string vname = ""; if (m_constraints.mtype == XRTC_VIDEO && m_constraints.ptr) { video_constraints_t *video = (video_constraints_t *)m_constraints.ptr; if (video && video->device.valid) { vname = video->device.val.did; } } // if vname empty, select default device LOGI("vname="<<vname); cricket::VideoCapturer* capturer = OpenVideoCaptureDevice(vname); if (capturer) { m_source = pc_factory->CreateVideoSource(capturer, NULL); } } LOGD("create video track by source"); if (m_source) { m_track = pc_factory->CreateVideoTrack(label, (webrtc::VideoSourceInterface *)(m_source.get())); } } return (m_track != NULL); }
void RtcStream::SetupLocalStream(bool enableVoice, bool enableVideo) { if ( enableVoice == false && enableVideo == false) return; talk_base::scoped_refptr<webrtc::MediaStreamInterface> stream = factory_->CreateLocalMediaStream("simple_stream"); if ( enableVoice) { #ifdef GOOGLE_ENGINE talk_base::scoped_refptr<webrtc::AudioTrackInterface> audio_track( factory_->CreateAudioTrack( "simple_voice", factory_->CreateAudioSource(NULL))); #else talk_base::scoped_refptr<webrtc::AudioTrackInterface> audio_track( factory_->CreateAudioTrack( "mixer_voice", NULL)); #endif stream->AddTrack( audio_track); } if ( enableVideo) { #ifdef GOOGLE_ENGINE talk_base::scoped_refptr<webrtc::VideoTrackInterface> video_track( factory_->CreateVideoTrack( "simplertc", factory_->CreateVideoSource(OpenVideoCaptureDevice(), NULL))); stream->AddTrack( video_track); #else talk_base::scoped_refptr<webrtc::VideoTrackInterface> video_track( factory_->CreateVideoTrack( "mixer_video", NULL)); stream->AddTrack( video_track); #endif } connection_->AddStream(stream, NULL); }
void RTCPeer::createStreams(){ rtc::scoped_refptr<webrtc::AudioTrackInterface> audio_track( peerConnectionFactory->CreateAudioTrack(kAudioLabel, peerConnectionFactory->CreateAudioSource(NULL))); rtc::scoped_refptr<webrtc::VideoTrackInterface> video_track( peerConnectionFactory->CreateVideoTrack(kVideoLabel,peerConnectionFactory->CreateVideoSource(OpenVideoCaptureDevice(), NULL))); // <---- can start local renderer here if needed rtc::scoped_refptr<webrtc::MediaStreamInterface> stream(peerConnectionFactory->CreateLocalMediaStream(kStreamLabel)); stream->AddTrack(audio_track); stream->AddTrack(video_track); streams["video"] = stream; }
ActivityManager::ActivityManager(QObject *parent) :QObject(parent) { _conductorOne = new talk_base::RefCountedObject<Conductor>(this,"ONE"); _conductorTwo = new talk_base::RefCountedObject<Conductor>(this,"TWO"); QObject::connect(_conductorOne,SIGNAL(sendBye()),_conductorTwo,SLOT(callbackBye())); QObject::connect(_conductorOne,SIGNAL(sendDescription(std::string,std::string)), _conductorTwo,SLOT(callbackDescription(std::string,std::string))); QObject::connect(_conductorOne,SIGNAL(sendCandidate(std::string,int,std::string)), _conductorTwo,SLOT(callbackCandidate(std::string,int,std::string))); QObject::connect(_conductorTwo,SIGNAL(sendBye()),_conductorOne,SLOT(callbackBye())); QObject::connect(_conductorTwo,SIGNAL(sendDescription(std::string,std::string)), _conductorOne,SLOT(callbackDescription(std::string,std::string))); QObject::connect(_conductorTwo,SIGNAL(sendCandidate(std::string,int,std::string)), _conductorOne,SLOT(callbackCandidate(std::string,int,std::string))); _peer_connection_factory = webrtc::CreatePeerConnectionFactory(); talk_base::scoped_refptr<webrtc::AudioTrackInterface> audio_track(_peer_connection_factory->CreateAudioTrack("Audio", _peer_connection_factory->CreateAudioSource(NULL))); talk_base::scoped_refptr<webrtc::VideoTrackInterface> video_track( _peer_connection_factory->CreateVideoTrack("Video",_peer_connection_factory->CreateVideoSource(OpenVideoCaptureDevice(),NULL))); _globalstream = _peer_connection_factory->CreateLocalMediaStream("Stream"); _globalstream->AddTrack(audio_track); _globalstream->AddTrack(video_track); }
bool PeerManager::AddStreams(webrtc::PeerConnectionInterface* peer_connection) { if (media_stream.get() == NULL) { cricket::VideoCapturer *capturer = OpenVideoCaptureDevice(); if (!capturer) { UMBO_WARN("Cannot create capturer"); return false; } // Register video capturer listener //VideoCapturerListener listener(capturer); // Create media stream media_stream = peer_connection_factory->CreateLocalMediaStream(STREAM_LABEL); if (!media_stream.get()) { UMBO_WARN("Fail to create stream"); return false; } // Create video track rtc::scoped_refptr<webrtc::VideoTrackInterface> video_track( peer_connection_factory->CreateVideoTrack(VIDEO_LABEL, peer_connection_factory->CreateVideoSource(capturer, NULL)) ); // Create audio track rtc::scoped_refptr<webrtc::AudioTrackInterface> audio_track( peer_connection_factory->CreateAudioTrack(AUDIO_LABEL, peer_connection_factory->CreateAudioSource(NULL)) ); if (!media_stream->AddTrack(video_track)) { UMBO_WARN("Fail to add video track"); return false; } if (!media_stream->AddTrack(audio_track)) { UMBO_WARN("Fail to add audio track"); return false; } } if (!peer_connection->AddStream(media_stream)) { UMBO_WARN("Fail to add media stream to PeerConnection"); return false; } return true; }