void WebRtcConnection::writeSsrc(char* buf, int len, unsigned int ssrc) { ELOG_DEBUG("LEN %d", len); RtpHeader *head = reinterpret_cast<RtpHeader*> (buf); RtcpHeader *chead = reinterpret_cast<RtcpHeader*> (buf); //if it is RTCP we check it it is a compound packet if (chead->isRtcp()) { char* movingBuf = buf; int rtcpLength = 0; int totalLength = 0; do{ movingBuf+=rtcpLength; RtcpHeader *chead= reinterpret_cast<RtcpHeader*>(movingBuf); rtcpLength= (ntohs(chead->length)+1)*4; totalLength+= rtcpLength; ELOG_DEBUG("Is RTCP, prev SSRC %u, new %u, len %d ", chead->getSSRC(), ssrc, rtcpLength); chead->ssrc=htonl(ssrc); if (chead->packettype == RTCP_PS_Feedback_PT){ FirHeader *thefir = reinterpret_cast<FirHeader*>(movingBuf); if (thefir->fmt == 4){ // It is a FIR Packet, we generate it this->sendPLI(); } } } while(totalLength<len); } else { head->setSSRC(ssrc); } }
bool NiceConnection::setRemoteCandidates( std::vector<CandidateInfo> &candidates) { ELOG_DEBUG("Setting remote candidates %d", candidates.size()); for (unsigned int compId = 1; compId <= iceComponents_; compId++) { GSList* candList = NULL; for (unsigned int it = 0; it < candidates.size(); it++) { NiceCandidateType nice_cand_type; CandidateInfo cinfo = candidates[it]; if (cinfo.mediaType != this->mediaType || this->transportName->compare(cinfo.transProtocol) || cinfo.componentId != compId) continue; switch (cinfo.hostType) { case HOST: nice_cand_type = NICE_CANDIDATE_TYPE_HOST; break; case SRLFX: nice_cand_type = NICE_CANDIDATE_TYPE_SERVER_REFLEXIVE; break; case PRFLX: nice_cand_type = NICE_CANDIDATE_TYPE_PEER_REFLEXIVE; break; case RELAY: nice_cand_type = NICE_CANDIDATE_TYPE_RELAYED; break; default: nice_cand_type = NICE_CANDIDATE_TYPE_HOST; break; } NiceCandidate* thecandidate = nice_candidate_new(nice_cand_type); NiceAddress* naddr = nice_address_new(); nice_address_set_from_string(naddr, cinfo.hostAddress.c_str()); nice_address_set_port(naddr, cinfo.hostPort); thecandidate->addr = *naddr; sprintf(thecandidate->foundation, "%s", cinfo.foundation.c_str()); thecandidate->username = strdup(cinfo.username.c_str()); thecandidate->password = strdup(cinfo.password.c_str()); thecandidate->stream_id = (guint) 1; thecandidate->component_id = cinfo.componentId; thecandidate->priority = cinfo.priority; thecandidate->transport = NICE_CANDIDATE_TRANSPORT_UDP; candList = g_slist_append(candList, thecandidate); ELOG_DEBUG("New Candidate SET %s %d", cinfo.hostAddress.c_str(), cinfo.hostPort); } nice_agent_set_remote_candidates(agent_, (guint) 1, compId, candList); } ELOG_DEBUG("Candidates SET"); this->updateIceState(NICE_CANDIDATES_RECEIVED); return true; }
ExternalOutput::~ExternalOutput(){ ELOG_DEBUG("Destructor"); ELOG_DEBUG("Closing Sink"); delete in; in = NULL; if (context_!=NULL){ if (writeheadres_>=0) av_write_trailer(context_); if (avio_close>=0) avio_close(context_->pb); avformat_free_context(context_); context_=NULL; } if (videoCodec_!=NULL){ avcodec_close(videoCodecCtx_); videoCodec_=NULL; } if (audioCodec_!=NULL){ avcodec_close(audioCodecCtx_); audioCodec_ = NULL; } sending_ = false; cond_.notify_one(); thread_.join(); /* boost::unique_lock<boost::mutex> lock(queueMutex_); */ ELOG_DEBUG("ExternalOutput closed Successfully"); }
bool OutputProcessor::initAudioCoder() { aCoder = avcodec_find_encoder(static_cast<AVCodecID>(mediaInfo.audioCodec.codec)); if (!aCoder) { ELOG_DEBUG("Encoder de audio no encontrado"); return false; } aCoderContext = avcodec_alloc_context3(aCoder); if (!aCoderContext) { ELOG_DEBUG("Error de memoria en coder de audio"); return false; } aCoderContext->sample_fmt = AV_SAMPLE_FMT_S16; aCoderContext->bit_rate = mediaInfo.audioCodec.bitRate; aCoderContext->sample_rate = mediaInfo.audioCodec.sampleRate; aCoderContext->channels = 1; if (avcodec_open2(aCoderContext, aCoder, NULL) < 0) { ELOG_DEBUG("Error al abrir el coder de audio"); return false; } audioCoder = 1; return true; }
void RRGenerationHandler::notifyUpdate() { if (initialized_) { return; } auto pipeline = getContext()->getPipelineShared(); if (!pipeline) { return; } connection_ = pipeline->getService<WebRtcConnection>().get(); if (!connection_) { return; } uint32_t video_ssrc = connection_->getVideoSourceSSRC(); if (video_ssrc != 0) { auto video_packets = std::make_shared<RRPackets>(); video_packets->ssrc = video_ssrc; video_packets->type = VIDEO_PACKET; rr_info_map_[video_ssrc] = video_packets; ELOG_DEBUG("%s, message: Initialized video, ssrc: %u", connection_->toLog(), video_ssrc); initialized_ = true; } uint32_t audio_ssrc = connection_->getAudioSourceSSRC(); if (audio_ssrc != 0) { auto audio_packets = std::make_shared<RRPackets>(); audio_packets->ssrc = audio_ssrc; audio_packets->type = AUDIO_PACKET; rr_info_map_[audio_ssrc] = audio_packets; initialized_ = true; ELOG_DEBUG("%s, message: Initialized audio, ssrc: %u", connection_->toLog(), audio_ssrc); } }
int OutputProcessor::init(const MediaInfo& info, RTPDataReceiver* rtpReceiver) { this->mediaInfo = info; this->rtpReceiver_ = rtpReceiver; encodedBuffer_ = (unsigned char*) malloc(UNPACKAGED_BUFFER_SIZE); packagedBuffer_ = (unsigned char*) malloc(PACKAGED_BUFFER_SIZE); rtpBuffer_ = (unsigned char*) malloc(PACKAGED_BUFFER_SIZE); if(info.processorType == PACKAGE_ONLY){ this->initVideoPackager(); this->initAudioPackager(); return 0; } if (mediaInfo.hasVideo) { this->mediaInfo.videoCodec.codec = VIDEO_CODEC_VP8; if (vCoder.initEncoder(mediaInfo.videoCodec)) { ELOG_DEBUG("Error initing encoder"); } this->initVideoPackager(); } if (mediaInfo.hasAudio) { ELOG_DEBUG("Init AUDIO processor"); mediaInfo.audioCodec.codec = AUDIO_CODEC_PCM_U8; mediaInfo.audioCodec.sampleRate= 44100; mediaInfo.audioCodec.bitRate = 64000; encodedAudioBuffer_ = (unsigned char*) malloc(UNPACKAGED_BUFFER_SIZE); packagedAudioBuffer_ = (unsigned char*) malloc(UNPACKAGED_BUFFER_SIZE); this->initAudioCoder(); this->initAudioPackager(); } return 0; }
void WebRtcConnection::onTransportData(char* buf, int len, Transport *transport) { boost::mutex::scoped_lock lock(writeMutex_); if (audioSink_ == NULL && videoSink_ == NULL && fbSink_==NULL) return; int length = len; rtcpheader *chead = reinterpret_cast<rtcpheader*> (buf); if (chead->packettype == RTCP_Receiver_PT || chead->packettype == RTCP_PS_Feedback_PT || chead->packettype == RTCP_RTP_Feedback_PT){ if (fbSink_ != NULL) { fbSink_->deliverFeedback(buf,length); } } else { // RTP or RTCP Sender Report if (bundle_) { // Check incoming SSRC rtpheader *head = reinterpret_cast<rtpheader*> (buf); rtcpheader *chead = reinterpret_cast<rtcpheader*> (buf); unsigned int recvSSRC = ntohl(head->ssrc); if (chead->packettype == RTCP_Sender_PT) { //Sender Report ELOG_DEBUG ("RTP Sender Report %d length %d ", chead->packettype, ntohs(chead->length)); recvSSRC = ntohl(chead->ssrc); } // Deliver data if (recvSSRC==this->getVideoSourceSSRC() || recvSSRC==this->getVideoSinkSSRC()) { videoSink_->deliverVideoData(buf, length); } else if (recvSSRC==this->getAudioSourceSSRC() || recvSSRC==this->getAudioSinkSSRC()) { audioSink_->deliverAudioData(buf, length); } else { ELOG_DEBUG("Unknown SSRC %u, localVideo %u, remoteVideo %u, ignoring", recvSSRC, this->getVideoSourceSSRC(), this->getVideoSinkSSRC()); } } else if (transport->mediaType == AUDIO_TYPE) { if (audioSink_ != NULL) { rtpheader *head = (rtpheader*) buf; // Firefox does not send SSRC in SDP if (this->getAudioSourceSSRC() == 0) { ELOG_DEBUG("Audio Source SSRC is %u", ntohl(head->ssrc)); this->setAudioSourceSSRC(ntohl(head->ssrc)); this->updateState(TRANSPORT_READY, transport); } head->ssrc = htonl(this->getAudioSinkSSRC()); audioSink_->deliverAudioData(buf, length); } } else if (transport->mediaType == VIDEO_TYPE) { if (videoSink_ != NULL) { rtpheader *head = (rtpheader*) buf; // Firefox does not send SSRC in SDP if (this->getVideoSourceSSRC() == 0) { ELOG_DEBUG("Video Source SSRC is %u", ntohl(head->ssrc)); this->setVideoSourceSSRC(ntohl(head->ssrc)); this->updateState(TRANSPORT_READY, transport); } head->ssrc = htonl(this->getVideoSinkSSRC()); videoSink_->deliverVideoData(buf, length); } } } }
bool WebRtcConnection::addRemoteCandidate(const std::string &mid, int mLineIndex, const std::string &sdp) { // TODO Check type of transport. ELOG_DEBUG("Adding remote Candidate %s, mid %s, sdpMLine %d",sdp.c_str(), mid.c_str(), mLineIndex); MediaType theType; std::string theMid; if ((!mid.compare("video"))||(mLineIndex ==remoteSdp_.videoSdpMLine)){ theType = VIDEO_TYPE; theMid = "video"; }else{ theType = AUDIO_TYPE; theMid = "audio"; } SdpInfo tempSdp; std::string username, password; remoteSdp_.getCredentials(username, password, theType); tempSdp.setCredentials(username, password, OTHER); bool res = false; if(tempSdp.initWithSdp(sdp, theMid)){ if (theType == VIDEO_TYPE||bundle_){ ELOG_DEBUG("Setting VIDEO CANDIDATE" ); res = videoTransport_->setRemoteCandidates(tempSdp.getCandidateInfos(), bundle_); } else if (theType==AUDIO_TYPE){ ELOG_DEBUG("Setting AUDIO CANDIDATE"); res = audioTransport_->setRemoteCandidates(tempSdp.getCandidateInfos(), bundle_); }else{ ELOG_ERROR("Cannot add remote candidate with no Media (video or audio)"); } } for (uint8_t it = 0; it < tempSdp.getCandidateInfos().size(); it++){ remoteSdp_.addCandidate(tempSdp.getCandidateInfos()[it]); } return res; }
bool NicerConnection::setRemoteCandidates(const std::vector<CandidateInfo> &candidates, bool is_bundle) { std::vector<CandidateInfo> cands(candidates); auto remote_candidates_promise = std::make_shared<std::promise<void>>(); nr_ice_peer_ctx *peer = peer_; nr_ice_media_stream *stream = stream_; std::shared_ptr<NicerInterface> nicer = nicer_; async([cands, is_bundle, nicer, peer, stream, this, remote_candidates_promise] { ELOG_DEBUG("%s message: adding remote candidates (%ld)", toLog(), cands.size()); for (const CandidateInfo &cand : cands) { std::string sdp = cand.sdp; std::size_t pos = sdp.find(","); std::string candidate = sdp.substr(0, pos); ELOG_DEBUG("%s message: New remote ICE candidate (%s)", toLog(), candidate.c_str()); UINT4 r = nicer->IcePeerContextParseTrickleCandidate(peer, stream, const_cast<char *>(candidate.c_str())); if (r && r != R_ALREADY) { ELOG_WARN("%s message: Couldn't add remote ICE candidate (%s) (%d)", toLog(), candidate.c_str(), r); } } remote_candidates_promise->set_value(); }); std::future<void> remote_candidates_future = remote_candidates_promise->get_future(); std::future_status status = remote_candidates_future.wait_for(std::chrono::seconds(1)); if (status == std::future_status::timeout) { ELOG_WARN("%s message: Could not set remote candidates", toLog()); return false; } return true; }
ExternalOutput::~ExternalOutput(){ ELOG_DEBUG("Destructing"); // Stop our thread so we can safely nuke libav stuff and close our // our file. recording_ = false; cond_.notify_one(); thread_.join(); if (audio_stream_ != NULL && video_stream_ != NULL && context_ != NULL){ av_write_trailer(context_); } if (video_stream_ && video_stream_->codec != NULL){ avcodec_close(video_stream_->codec); } if (audio_stream_ && audio_stream_->codec != NULL){ avcodec_close(audio_stream_->codec); } if (context_ != NULL){ avio_close(context_->pb); avformat_free_context(context_); context_ = NULL; } ELOG_DEBUG("Closed Successfully"); }
bool MediaStream::setRemoteSdp(std::shared_ptr<SdpInfo> sdp) { ELOG_DEBUG("%s message: setting remote SDP", toLog()); remote_sdp_ = sdp; if (remote_sdp_->videoBandwidth != 0) { ELOG_DEBUG("%s message: Setting remote BW, maxVideoBW: %u", toLog(), remote_sdp_->videoBandwidth); this->rtcp_processor_->setMaxVideoBW(remote_sdp_->videoBandwidth*1000); } if (pipeline_initialized_) { pipeline_->notifyUpdate(); return true; } bundle_ = remote_sdp_->isBundle; setVideoSourceSSRCList(remote_sdp_->video_ssrc_list); setAudioSourceSSRC(remote_sdp_->audio_ssrc); audio_enabled_ = remote_sdp_->hasAudio; video_enabled_ = remote_sdp_->hasVideo; rtcp_processor_->addSourceSsrc(getAudioSourceSSRC()); std::for_each(video_source_ssrc_list_.begin(), video_source_ssrc_list_.end(), [this] (uint32_t new_ssrc){ rtcp_processor_->addSourceSsrc(new_ssrc); }); initializePipeline(); return true; }
bool ExternalOutput::initContext() { if (context_->oformat->video_codec != AV_CODEC_ID_NONE && context_->oformat->audio_codec != AV_CODEC_ID_NONE && video_stream_ == NULL && audio_stream_ == NULL) { AVCodec* videoCodec = avcodec_find_encoder(context_->oformat->video_codec); ELOG_DEBUG("Found Video Codec %s", videoCodec->name); if (videoCodec==NULL) { ELOG_ERROR("Could not find video codec"); return false; } video_stream_ = avformat_new_stream (context_, videoCodec); video_stream_->id = 0; video_stream_->codec->codec_id = context_->oformat->video_codec; video_stream_->codec->width = 640; video_stream_->codec->height = 480; video_stream_->codec->time_base = (AVRational) { 1,30 }; // A decent guess here suffices; if processing the file with ffmpeg, // use -vsync 0 to force it not to duplicate frames. video_stream_->codec->pix_fmt = PIX_FMT_YUV420P; if (context_->oformat->flags & AVFMT_GLOBALHEADER) { video_stream_->codec->flags|=CODEC_FLAG_GLOBAL_HEADER; } context_->oformat->flags |= AVFMT_VARIABLE_FPS; AVCodec* audioCodec = avcodec_find_encoder(context_->oformat->audio_codec); if (audioCodec==NULL) { ELOG_ERROR("Could not find audio codec"); return false; } ELOG_DEBUG("Found Audio Codec %s", audioCodec->name); audio_stream_ = avformat_new_stream (context_, audioCodec); audio_stream_->id = 1; audio_stream_->codec->codec_id = context_->oformat->audio_codec; audio_stream_->codec->sample_rate = context_->oformat->audio_codec == AV_CODEC_ID_PCM_MULAW ? 8000 : 48000; // TODO is it always 48 khz for opus? audio_stream_->codec->time_base = (AVRational) { 1, audio_stream_->codec->sample_rate }; audio_stream_->codec->channels = context_->oformat->audio_codec == AV_CODEC_ID_PCM_MULAW ? 1 : 2; // TODO is it always two channels for opus? if (context_->oformat->flags & AVFMT_GLOBALHEADER) { audio_stream_->codec->flags|=CODEC_FLAG_GLOBAL_HEADER; } context_->streams[0] = video_stream_; context_->streams[1] = audio_stream_; if (avio_open(&context_->pb, context_->filename, AVIO_FLAG_WRITE) < 0) { ELOG_ERROR("Error opening output file"); return false; } if (avformat_write_header(context_, NULL) < 0) { ELOG_ERROR("Error writing header"); return false; } ELOG_DEBUG("avformat configured"); } return true; }
void DtlsTransport::onHandshakeCompleted(DtlsSocketContext* ctx, std::string clientKey, std::string serverKey, std::string srtp_profile) { boost::mutex::scoped_lock lock(sessionMutex_); if (ctx == dtlsRtp_.get()) { ELOG_DEBUG("%s - Setting RTP srtp params", transport_name.c_str()); srtp_.reset(new SrtpChannel()); if (srtp_->setRtpParams((char*)clientKey.c_str(), (char*)serverKey.c_str())) { readyRtp_ = true; } else { updateTransportState(TRANSPORT_FAILED); } if (dtlsRtcp_ == NULL) { readyRtcp_ = true; } } if (ctx == dtlsRtcp_.get()) { ELOG_DEBUG("%s - Setting RTCP srtp params", transport_name.c_str()); srtcp_.reset(new SrtpChannel()); if (srtcp_->setRtpParams((char*)clientKey.c_str(), (char*)serverKey.c_str())) { readyRtcp_ = true; } else { updateTransportState(TRANSPORT_FAILED); } } ELOG_DEBUG("%s - Ready? %d %d", transport_name.c_str(), readyRtp_, readyRtcp_); if (readyRtp_ && readyRtcp_) { ELOG_DEBUG("%s - Ready!!!", transport_name.c_str()); updateTransportState(TRANSPORT_READY); } }
MediaStream::~MediaStream() { ELOG_DEBUG("%s message:Destructor called", toLog()); if (sending_) { close(); } ELOG_DEBUG("%s message: Destructor ended", toLog()); }
int VideoDecoder::initDecoder (const VideoCodecInfo& info){ ELOG_DEBUG("Init Decoder"); vDecoder = avcodec_find_decoder(VideoCodecID2ffmpegDecoderID(info.codec)); if (!vDecoder) { ELOG_DEBUG("Error getting video decoder"); return -1; } vDecoderContext = avcodec_alloc_context3(vDecoder); if (!vDecoderContext) { ELOG_DEBUG("Error getting allocating decoder context"); return -1; } vDecoderContext->width = info.width; vDecoderContext->height = info.height; if (avcodec_open2(vDecoderContext, vDecoder, NULL) < 0) { ELOG_DEBUG("Error opening video decoder"); return -1; } dPicture = av_frame_alloc(); if (!dPicture) { ELOG_DEBUG("Error allocating video frame"); return -1; } return 0; }
// memory is only valid for duration of callback; must be copied if queueing // is required DtlsSocketContext::DtlsSocketContext() { started = false; mSocket = NULL; receiver = NULL; DtlsSocketContext::Init(); ELOG_DEBUG("Creating Dtls factory, Openssl v %s", OPENSSL_VERSION_TEXT); mContext = SSL_CTX_new(DTLSv1_method()); assert(mContext); int r = SSL_CTX_use_certificate(mContext, mCert); assert(r == 1); r = SSL_CTX_use_PrivateKey(mContext, privkey); assert(r == 1); SSL_CTX_set_cipher_list(mContext, "ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH"); SSL_CTX_set_info_callback(mContext, SSLInfoCallback); SSL_CTX_set_verify(mContext, SSL_VERIFY_PEER |SSL_VERIFY_FAIL_IF_NO_PEER_CERT, SSLVerifyCallback); // SSL_CTX_set_session_cache_mode(mContext, SSL_SESS_CACHE_OFF); // SSL_CTX_set_options(mContext, SSL_OP_NO_TICKET); // Set SRTP profiles r = SSL_CTX_set_tlsext_use_srtp(mContext, DefaultSrtpProfile); assert(r == 0); SSL_CTX_set_verify_depth(mContext, 2); SSL_CTX_set_read_ahead(mContext, 1); ELOG_DEBUG("DtlsSocketContext %p created", this); }
int AudioEncoder::initEncoder(const AudioCodecInfo& mediaInfo) { ELOG_DEBUG("Init audioEncoder begin"); aCoder_ = avcodec_find_encoder(AudioCodecID2ffmpegDecoderID(mediaInfo.codec)); if (!aCoder_) { ELOG_DEBUG("Audio Codec not found"); return false; } aCoderContext_ = avcodec_alloc_context3(aCoder_); if (!aCoderContext_) { ELOG_DEBUG("Memory error allocating audio coder context"); return false; } aCoderContext_->sample_fmt = AV_SAMPLE_FMT_FLT; // aCoderContext_->bit_rate = mediaInfo.bitRate; aCoderContext_->sample_rate = 8 /*mediaInfo.sampleRate*/; aCoderContext_->channels = 1; char errbuff[500]; int res = avcodec_open2(aCoderContext_, aCoder_, NULL); if (res != 0) { av_strerror(res, reinterpret_cast<char*>(&errbuff), 500); ELOG_DEBUG("fail when opening input %s", errbuff); return -1; } ELOG_DEBUG("Init audioEncoder end"); return true; }
void NiceConnection::close() { if(this->checkIceState()==NICE_FINISHED){ return; } running_ = false; ELOG_DEBUG("Closing nice %p", this); this->updateIceState(NICE_FINISHED); listener_ = NULL; boost::system_time const timeout=boost::get_system_time()+ boost::posix_time::milliseconds(500); ELOG_DEBUG("m_thread join %p", this); if (!m_Thread_.timed_join(timeout) ){ ELOG_DEBUG("Taking too long to close thread, trying to interrupt %p", this); m_Thread_.interrupt(); } { // New scope for lock. boost::unique_lock<boost::mutex> lock(agentMutex_); if (agent_!=NULL){ g_object_unref(agent_); agent_ = NULL; } if (context_!=NULL) { g_main_context_unref(context_); context_=NULL; } } this->queueData(1, NULL, -1 ); ELOG_DEBUG("Nice Closed %p", this); }
int OutputProcessor::encodeAudio(unsigned char* inBuff, int nSamples, AVPacket* pkt) { if (audioCoder == 0) { ELOG_DEBUG("No se han inicializado los parĂ¡metros del audioCoder"); return -1; } AVFrame *frame; /* frame containing input raw audio */ frame = avcodec_alloc_frame(); if (!frame) { ELOG_ERROR("could not allocate audio frame"); exit(1); } uint16_t* samples; int ret, got_output, buffer_size; //float t, tincr; frame->nb_samples = aCoderContext->frame_size; frame->format = aCoderContext->sample_fmt; // frame->channel_layout = aCoderContext->channel_layout; /* the codec gives us the frame size, in samples, * we calculate the size of the samples buffer in bytes */ ELOG_DEBUG("channels %d, frame_size %d, sample_fmt %d", aCoderContext->channels, aCoderContext->frame_size, aCoderContext->sample_fmt); buffer_size = av_samples_get_buffer_size(NULL, aCoderContext->channels, aCoderContext->frame_size, aCoderContext->sample_fmt, 0); samples = (uint16_t*) av_malloc(buffer_size); if (!samples) { ELOG_ERROR("could not allocate %d bytes for samples buffer", buffer_size); exit(1); } /* setup the data pointers in the AVFrame */ ret = avcodec_fill_audio_frame(frame, aCoderContext->channels, aCoderContext->sample_fmt, (const uint8_t*) samples, buffer_size, 0); if (ret < 0) { ELOG_ERROR("could not setup audio frame"); exit(1); } ret = avcodec_encode_audio2(aCoderContext, pkt, frame, &got_output); if (ret < 0) { ELOG_ERROR("error encoding audio frame"); exit(1); } if (got_output) { //fwrite(pkt.data, 1, pkt.size, f); ELOG_DEBUG("Got OUTPUT"); } return ret; }
void RtpPacketQueue::pushPacket(const char *data, int length) { const RTPHeader *header = reinterpret_cast<const RTPHeader*>(data); uint16_t nseq = header->getSeqNumber(); uint32_t ts = header->getTimestamp(); long long int ltsdiff = (long long int)ts - (long long int)lastTs_; int tsdiff = (int)ltsdiff; int nseqdiff = nseq - lastNseq_; /* // nseq sequence cicle test if ( abs(nseqdiff) > ( USHRT_MAX - MAX_DIFF ) ) { NOTIFY("Vuelta del NSeq ns=%d last=%d\n", nseq, lastNseq_); if (nseqdiff > 0) nseqdiff-= (USHRT_MAX + 1); else nseqdiff+= (USHRT_MAX + 1); } */ if (abs(tsdiff) > MAX_DIFF_TS || abs(nseqdiff) > MAX_DIFF ) { // new flow, process and clean queue ELOG_DEBUG("Max diff reached, new Flow? nsqediff %d , tsdiff %d", nseqdiff, tsdiff); ELOG_DEBUG("PT %d", header->getPayloadType()); lastNseq_ = nseq; lastTs_ = ts; cleanQueue(); enqueuePacket(data, length, nseq); } else if (nseqdiff > 1) { // Jump in nseq, enqueue ELOG_DEBUG("Jump in nseq"); enqueuePacket(data, length, nseq); } else if (nseqdiff == 1) { // next packet, process lastNseq_ = nseq; lastTs_ = ts; enqueuePacket(data, length, nseq); } else if (nseqdiff < 0) { ELOG_DEBUG("Old Packet Received"); // old packet, discard? // stats? } else if (nseqdiff == 0) { ELOG_DEBUG("Duplicate Packet received"); //duplicate packet, process (for stats)? } }
DtlsTransport::~DtlsTransport() { ELOG_DEBUG("DtlsTransport destructor"); running_ = false; nice_->close(); ELOG_DEBUG("Join thread getNice"); getNice_Thread_.join(); ELOG_DEBUG("DTLSTransport destructor END"); }
Resender::~Resender() { ELOG_DEBUG("Resender destructor"); timer_.cancel(); if (thread_.get() != NULL) { ELOG_DEBUG("Resender destructor, joining thread"); thread_->join(); ELOG_DEBUG("Resender thread terminated on destructor"); } }
bool WebRtcConnection::setRemoteSdp(const std::string &sdp) { ELOG_DEBUG("Set Remote SDP %s", sdp.c_str()); remoteSdp_.initWithSdp(sdp); //std::vector<CryptoInfo> crypto_remote = remoteSdp_.getCryptoInfos(); video_ = (remoteSdp_.videoSsrc==0?false:true); audio_ = (remoteSdp_.audioSsrc==0?false:true); CryptoInfo cryptLocal_video; CryptoInfo cryptLocal_audio; CryptoInfo cryptRemote_video; CryptoInfo cryptRemote_audio; bundle_ = remoteSdp_.isBundle; ELOG_DEBUG("Is bundle? %d %d ", bundle_, true); std::vector<RtpMap> payloadRemote = remoteSdp_.getPayloadInfos(); localSdp_.getPayloadInfos() = remoteSdp_.getPayloadInfos(); localSdp_.isBundle = bundle_; localSdp_.isRtcpMux = remoteSdp_.isRtcpMux; ELOG_DEBUG("Video %d videossrc %u Audio %d audio ssrc %u Bundle %d", video_, remoteSdp_.videoSsrc, audio_, remoteSdp_.audioSsrc, bundle_); ELOG_DEBUG("Setting SSRC to localSdp %u", this->getVideoSinkSSRC()); localSdp_.videoSsrc = this->getVideoSinkSSRC(); localSdp_.audioSsrc = this->getAudioSinkSSRC(); this->setVideoSourceSSRC(remoteSdp_.videoSsrc); this->setAudioSourceSSRC(remoteSdp_.audioSsrc); if (remoteSdp_.profile == SAVPF) { if (remoteSdp_.isFingerprint) { // DTLS-SRTP if (remoteSdp_.hasVideo) { videoTransport_ = new DtlsTransport(VIDEO_TYPE, "", bundle_, remoteSdp_.isRtcpMux, this, stunServer_, stunPort_, minPort_, maxPort_); } if (remoteSdp_.hasAudio) { audioTransport_ = new DtlsTransport(AUDIO_TYPE, "", bundle_, remoteSdp_.isRtcpMux, this, stunServer_, stunPort_, minPort_, maxPort_); } } else { // SDES std::vector<CryptoInfo> crypto_remote = remoteSdp_.getCryptoInfos(); for (unsigned int it = 0; it < crypto_remote.size(); it++) { CryptoInfo cryptemp = crypto_remote[it]; if (cryptemp.mediaType == VIDEO_TYPE && !cryptemp.cipherSuite.compare("AES_CM_128_HMAC_SHA1_80")) { videoTransport_ = new SdesTransport(VIDEO_TYPE, "", bundle_, remoteSdp_.isRtcpMux, &cryptemp, this, stunServer_, stunPort_, minPort_, maxPort_); } else if (!bundle_ && cryptemp.mediaType == AUDIO_TYPE && !cryptemp.cipherSuite.compare("AES_CM_128_HMAC_SHA1_80")) { audioTransport_ = new SdesTransport(AUDIO_TYPE, "", bundle_, remoteSdp_.isRtcpMux, &cryptemp, this, stunServer_, stunPort_, minPort_, maxPort_); } } } } return true; }
void DtlsSocketContext::handshakeCompleted() { char fprint[100]; SRTP_PROTECTION_PROFILE* srtp_profile; if (mSocket->getRemoteFingerprint(fprint)) { ELOG_TRACE("Remote fingerprint == %s", fprint); bool check = mSocket->checkFingerprint(fprint, strlen(fprint)); ELOG_DEBUG("Fingerprint check == %d", check); SrtpSessionKeys* keys = mSocket->getSrtpSessionKeys(); unsigned char* cKey = (unsigned char*)malloc(keys->clientMasterKeyLen + keys->clientMasterSaltLen); unsigned char* sKey = (unsigned char*)malloc(keys->serverMasterKeyLen + keys->serverMasterSaltLen); memcpy(cKey, keys->clientMasterKey, keys->clientMasterKeyLen); memcpy(cKey + keys->clientMasterKeyLen, keys->clientMasterSalt, keys->clientMasterSaltLen); memcpy(sKey, keys->serverMasterKey, keys->serverMasterKeyLen); memcpy(sKey + keys->serverMasterKeyLen, keys->serverMasterSalt, keys->serverMasterSaltLen); // g_base64_encode must be free'd with g_free. Also, std::string's assignment operator does *not* take // ownership of the passed in ptr; under the hood it copies up to the first nullptr character. gchar* temp = g_base64_encode((const guchar*)cKey, keys->clientMasterKeyLen + keys->clientMasterSaltLen); std::string clientKey = temp; g_free(temp); temp = nullptr; temp = g_base64_encode((const guchar*)sKey, keys->serverMasterKeyLen + keys->serverMasterSaltLen); std::string serverKey = temp; g_free(temp); temp = nullptr; ELOG_DEBUG("ClientKey: %s", clientKey.c_str()); ELOG_DEBUG("ServerKey: %s", serverKey.c_str()); free(cKey); free(sKey); delete keys; srtp_profile = mSocket->getSrtpProfile(); if (srtp_profile) { ELOG_DEBUG("SRTP Extension negotiated profile=%s", srtp_profile->name); } if (receiver != nullptr) { receiver->onHandshakeCompleted(this, clientKey, serverKey, srtp_profile->name); } } else { ELOG_DEBUG("Peer did not authenticate"); } }
void MediaStream::read(std::shared_ptr<DataPacket> packet) { char* buf = packet->data; int len = packet->length; // PROCESS RTCP RtpHeader *head = reinterpret_cast<RtpHeader*> (buf); RtcpHeader *chead = reinterpret_cast<RtcpHeader*> (buf); uint32_t recvSSRC = 0; if (!chead->isRtcp()) { recvSSRC = head->getSSRC(); } else if (chead->packettype == RTCP_Sender_PT) { // Sender Report recvSSRC = chead->getSSRC(); } // DELIVER FEEDBACK (RR, FEEDBACK PACKETS) if (chead->isFeedback()) { if (fb_sink_ != nullptr && should_send_feedback_) { fb_sink_->deliverFeedback(std::move(packet)); } } else { // RTP or RTCP Sender Report if (bundle_) { // Check incoming SSRC // Deliver data if (isVideoSourceSSRC(recvSSRC)) { parseIncomingPayloadType(buf, len, VIDEO_PACKET); video_sink_->deliverVideoData(std::move(packet)); } else if (isAudioSourceSSRC(recvSSRC)) { parseIncomingPayloadType(buf, len, AUDIO_PACKET); audio_sink_->deliverAudioData(std::move(packet)); } else { ELOG_DEBUG("%s read video unknownSSRC: %u, localVideoSSRC: %u, localAudioSSRC: %u", toLog(), recvSSRC, this->getVideoSourceSSRC(), this->getAudioSourceSSRC()); } } else { if (packet->type == AUDIO_PACKET && audio_sink_ != nullptr) { parseIncomingPayloadType(buf, len, AUDIO_PACKET); // Firefox does not send SSRC in SDP if (getAudioSourceSSRC() == 0) { ELOG_DEBUG("%s discoveredAudioSourceSSRC:%u", toLog(), recvSSRC); this->setAudioSourceSSRC(recvSSRC); } audio_sink_->deliverAudioData(std::move(packet)); } else if (packet->type == VIDEO_PACKET && video_sink_ != nullptr) { parseIncomingPayloadType(buf, len, VIDEO_PACKET); // Firefox does not send SSRC in SDP if (getVideoSourceSSRC() == 0) { ELOG_DEBUG("%s discoveredVideoSourceSSRC:%u", toLog(), recvSSRC); this->setVideoSourceSSRC(recvSSRC); } // change ssrc for RTP packets, don't touch here if RTCP video_sink_->deliverVideoData(std::move(packet)); } } // if not bundle } // if not Feedback }
/** * Initialize the audio resampler based on the input and output codec settings. * If the input and output sample formats differ, a conversion is required * libswresample takes care of this, but requires initialization. */ int AudioDecoder::init_resampler(AVCodecContext *input_codec_context, AVCodecContext *output_codec_context) { int error; /** * Create a resampler context for the conversion. * Set the conversion parameters. * Default channel layouts based on the number of channels * are assumed for simplicity (they are sometimes not detected * properly by the demuxer and/or decoder). */ resample_context = swr_alloc_set_opts(NULL, av_get_default_channel_layout(output_codec_context->channels), output_codec_context->sample_fmt, output_codec_context->sample_rate, av_get_default_channel_layout(input_codec_context->channels), input_codec_context->sample_fmt, input_codec_context->sample_rate, 0, NULL); if (!resample_context) { ELOG_WARN( "Could not allocate resample context\n"); return AVERROR(ENOMEM); } /** * Perform a sanity check so that the number of converted samples is * not greater than the number of samples to be converted. * If the sample rates differ, this case has to be handled differently */ ELOG_DEBUG( "audio input sample_rate = %d, out %d", input_codec_context->sample_rate, output_codec_context->sample_rate); /** Open the resampler with the specified parameters. */ if ((error = swr_init(resample_context)) < 0) { ELOG_WARN( "Could not open resample context"); swr_free(&resample_context); return error; } /** Open the resampler with the specified parameters. */ if ((error = swr_init(resample_context)) < 0) { ELOG_DEBUG( "Could not open resample context"); swr_free(&resample_context); return error; } ELOG_DEBUG( "swr_init done"); return 0; }
ExternalInput::~ExternalInput() { ELOG_DEBUG("Destructor ExternalInput %s" , url_.c_str()); ELOG_DEBUG("Closing ExternalInput"); running_ = false; thread_.join(); if (needTranscoding_) encodeThread_.join(); av_free_packet(&avpacket_); if (context_ != NULL) avformat_free_context(context_); ELOG_DEBUG("ExternalInput closed"); }
void NiceConnection::init() { ELOG_DEBUG("Gathering candidates %p", this); nice_agent_gather_candidates(agent_, 1); // Attach to the component to receive the data while(running_){ if(this->checkIceState()>=NICE_FINISHED || !running_) break; g_main_context_iteration(context_, true); } ELOG_DEBUG("LibNice thread finished %p", this); }
int AudioDecoder::decodeAudio(AVPacket& input_packet, AVPacket& outPacket) { ELOG_DEBUG("decoding input packet, size %d", input_packet.size); AVFrame* input_frame; init_frame(&input_frame); int data_present; int error = avcodec_decode_audio4(input_codec_context, input_frame, &data_present,&input_packet); if (error < 0) { ELOG_DEBUG("decoding error %s", get_error_text(error)); return error; } if (data_present <= 0) { ELOG_DEBUG("data not present"); return 0; } // resample /** Initialize the temporary storage for the converted input samples. */ uint8_t **converted_input_samples = NULL; if (init_converted_samples(&converted_input_samples, output_codec_context, input_frame->nb_samples)) { ELOG_DEBUG("init_converted_samples fails"); return 0; } /** * Convert the input samples to the desired output sample format. * This requires a temporary storage provided by converted_input_samples */ if (convert_samples((const uint8_t**)input_frame->extended_data, converted_input_samples,input_frame->nb_samples, resample_context)) { ELOG_WARN("convert_samples failed!!"); return 0; } /** Add converted input samples to the FIFO buffer for later processing. */ if (add_samples_to_fifo(fifo, converted_input_samples, input_frame->nb_samples)) { ELOG_WARN("add_samples to fifo failed !!"); } outPacket.pts = input_packet.pts; // meanwhile, encode; package return load_encode(outPacket); }
void ExternalInput::receiveLoop() { av_read_play(context_); // play RTSP int gotDecodedFrame = 0; int length; startTime_ = av_gettime(); ELOG_DEBUG("Start playing external input %s", url_.c_str() ); while (av_read_frame(context_, &avpacket_) >= 0&& running_ == true) { AVPacket orig_pkt = avpacket_; if (needTranscoding_) { if (avpacket_.stream_index == video_stream_index_) { // packet is video inCodec_.decodeVideo(avpacket_.data, avpacket_.size, decodedBuffer_.get(), bufflen_, &gotDecodedFrame); RawDataPacket packetR; if (gotDecodedFrame) { packetR.data = decodedBuffer_.get(); packetR.length = bufflen_; packetR.type = VIDEO; queueMutex_.lock(); packetQueue_.push(packetR); queueMutex_.unlock(); gotDecodedFrame = 0; } } } else { if (avpacket_.stream_index == video_stream_index_) { // packet is video // av_rescale(input, new_scale, old_scale) int64_t pts = av_rescale(lastPts_, 1000000, (long int) video_time_base_); // NOLINT int64_t now = av_gettime() - startTime_; if (pts > now) { av_usleep(pts - now); } lastPts_ = avpacket_.pts; op_->packageVideo(avpacket_.data, avpacket_.size, decodedBuffer_.get(), avpacket_.pts); } else if (avpacket_.stream_index == audio_stream_index_) { // packet is audio int64_t pts = av_rescale(lastAudioPts_, 1000000, (long int)audio_time_base_); // NOLINT int64_t now = av_gettime() - startTime_; if (pts > now) { av_usleep(pts - now); } lastAudioPts_ = avpacket_.pts; length = op_->packageAudio(avpacket_.data, avpacket_.size, decodedBuffer_.get(), avpacket_.pts); if (length > 0) { audioSink_->deliverAudioData(reinterpret_cast<char*>(decodedBuffer_.get()), length); } } } av_free_packet(&orig_pkt); } ELOG_DEBUG("Ended stream to play %s", url_.c_str()); running_ = false; av_read_pause(context_); }