void MediaStream::onTransportData(std::shared_ptr<DataPacket> incoming_packet, Transport *transport) { if ((audio_sink_ == nullptr && video_sink_ == nullptr && fb_sink_ == nullptr)) { return; } std::shared_ptr<DataPacket> packet = std::make_shared<DataPacket>(*incoming_packet); if (transport->mediaType == AUDIO_TYPE) { packet->type = AUDIO_PACKET; } else if (transport->mediaType == VIDEO_TYPE) { packet->type = VIDEO_PACKET; } auto stream_ptr = shared_from_this(); worker_->task([stream_ptr, packet]{ if (!stream_ptr->pipeline_initialized_) { ELOG_DEBUG("%s message: Pipeline not initialized yet.", stream_ptr->toLog()); return; } char* buf = packet->data; RtpHeader *head = reinterpret_cast<RtpHeader*> (buf); RtcpHeader *chead = reinterpret_cast<RtcpHeader*> (buf); if (!chead->isRtcp()) { uint32_t recvSSRC = head->getSSRC(); if (stream_ptr->isVideoSourceSSRC(recvSSRC)) { packet->type = VIDEO_PACKET; } else if (stream_ptr->isAudioSourceSSRC(recvSSRC)) { packet->type = AUDIO_PACKET; } } stream_ptr->pipeline_->read(std::move(packet)); }); }
void SdesTransport::onNiceData(unsigned int component_id, char* data, int len, NiceConnection* nice) { //boost::mutex::scoped_lock lock(readMutex_); int length = len; SrtpChannel *srtp = srtp_; if (this->getTransportState() == TRANSPORT_READY) { memcpy(unprotectBuf_, data, len); if (component_id == 2) { srtp = srtcp_; } RtcpHeader *chead = reinterpret_cast<RtcpHeader*> (unprotectBuf_); if (chead->isRtcp()){ if(srtp->unprotectRtcp(unprotectBuf_, &length)<0) return; } else { if(srtp->unprotectRtp(unprotectBuf_, &length)<0) return; } if (length <= 0) return; getTransportListener()->onTransportData(unprotectBuf_, length, this); } }
void WebRtcConnection::writeSsrc(char* buf, int len, unsigned int ssrc) { ELOG_DEBUG("LEN %d", len); RtpHeader *head = reinterpret_cast<RtpHeader*> (buf); RtcpHeader *chead = reinterpret_cast<RtcpHeader*> (buf); //if it is RTCP we check it it is a compound packet if (chead->isRtcp()) { char* movingBuf = buf; int rtcpLength = 0; int totalLength = 0; do{ movingBuf+=rtcpLength; RtcpHeader *chead= reinterpret_cast<RtcpHeader*>(movingBuf); rtcpLength= (ntohs(chead->length)+1)*4; totalLength+= rtcpLength; ELOG_DEBUG("Is RTCP, prev SSRC %u, new %u, len %d ", chead->getSSRC(), ssrc, rtcpLength); chead->ssrc=htonl(ssrc); if (chead->packettype == RTCP_PS_Feedback_PT){ FirHeader *thefir = reinterpret_cast<FirHeader*>(movingBuf); if (thefir->fmt == 4){ // It is a FIR Packet, we generate it this->sendPLI(); } } } while(totalLength<len); } else { head->setSSRC(ssrc); } }
void MediaStream::onTransportData(std::shared_ptr<DataPacket> packet, Transport *transport) { if ((audio_sink_ == nullptr && video_sink_ == nullptr && fb_sink_ == nullptr)) { return; } if (transport->mediaType == AUDIO_TYPE) { packet->type = AUDIO_PACKET; } else if (transport->mediaType == VIDEO_TYPE) { packet->type = VIDEO_PACKET; } char* buf = packet->data; RtpHeader *head = reinterpret_cast<RtpHeader*> (buf); RtcpHeader *chead = reinterpret_cast<RtcpHeader*> (buf); if (!chead->isRtcp()) { uint32_t recvSSRC = head->getSSRC(); if (isVideoSourceSSRC(recvSSRC)) { packet->type = VIDEO_PACKET; } else if (isAudioSourceSSRC(recvSSRC)) { packet->type = AUDIO_PACKET; } } if (!pipeline_initialized_) { ELOG_DEBUG("%s message: Pipeline not initialized yet.", toLog()); return; } pipeline_->read(std::move(packet)); }
void RRGenerationHandler::read(Context *ctx, std::shared_ptr<dataPacket> packet) { RtcpHeader *chead = reinterpret_cast<RtcpHeader*>(packet->data); if (!chead->isRtcp() && enabled_) { handleRtpPacket(packet); } else if (chead->packettype == RTCP_Sender_PT && enabled_) { handleSR(packet); } ctx->fireRead(packet); }
void SenderBandwidthEstimationHandler::write(Context *ctx, std::shared_ptr<dataPacket> packet) { RtcpHeader *chead = reinterpret_cast<RtcpHeader*>(packet->data); if (!chead->isRtcp() && packet->type == VIDEO_PACKET) { period_packets_sent_++; } else if (chead->getPacketType() == RTCP_Sender_PT && chead->getSSRC() == connection_->getVideoSinkSSRC()) { analyzeSr(chead); } ctx->fireWrite(packet); }
void WebRtcConnection::writeSsrc(char* buf, int len, unsigned int ssrc) { RtpHeader *head = reinterpret_cast<RtpHeader*> (buf); RtcpHeader *chead = reinterpret_cast<RtcpHeader*> (buf); //if it is RTCP we check it it is a compound packet if (chead->isRtcp()) { processRtcpHeaders(buf,len,ssrc); } else { head->ssrc=htonl(ssrc); } }
void MediaStream::read(std::shared_ptr<DataPacket> packet) { char* buf = packet->data; int len = packet->length; // PROCESS RTCP RtpHeader *head = reinterpret_cast<RtpHeader*> (buf); RtcpHeader *chead = reinterpret_cast<RtcpHeader*> (buf); uint32_t recvSSRC = 0; if (!chead->isRtcp()) { recvSSRC = head->getSSRC(); } else if (chead->packettype == RTCP_Sender_PT) { // Sender Report recvSSRC = chead->getSSRC(); } // DELIVER FEEDBACK (RR, FEEDBACK PACKETS) if (chead->isFeedback()) { if (fb_sink_ != nullptr && should_send_feedback_) { fb_sink_->deliverFeedback(std::move(packet)); } } else { // RTP or RTCP Sender Report if (bundle_) { // Check incoming SSRC // Deliver data if (isVideoSourceSSRC(recvSSRC)) { parseIncomingPayloadType(buf, len, VIDEO_PACKET); video_sink_->deliverVideoData(std::move(packet)); } else if (isAudioSourceSSRC(recvSSRC)) { parseIncomingPayloadType(buf, len, AUDIO_PACKET); audio_sink_->deliverAudioData(std::move(packet)); } else { ELOG_DEBUG("%s read video unknownSSRC: %u, localVideoSSRC: %u, localAudioSSRC: %u", toLog(), recvSSRC, this->getVideoSourceSSRC(), this->getAudioSourceSSRC()); } } else { if (packet->type == AUDIO_PACKET && audio_sink_ != nullptr) { parseIncomingPayloadType(buf, len, AUDIO_PACKET); // Firefox does not send SSRC in SDP if (getAudioSourceSSRC() == 0) { ELOG_DEBUG("%s discoveredAudioSourceSSRC:%u", toLog(), recvSSRC); this->setAudioSourceSSRC(recvSSRC); } audio_sink_->deliverAudioData(std::move(packet)); } else if (packet->type == VIDEO_PACKET && video_sink_ != nullptr) { parseIncomingPayloadType(buf, len, VIDEO_PACKET); // Firefox does not send SSRC in SDP if (getVideoSourceSSRC() == 0) { ELOG_DEBUG("%s discoveredVideoSourceSSRC:%u", toLog(), recvSSRC); this->setVideoSourceSSRC(recvSSRC); } // change ssrc for RTP packets, don't touch here if RTCP video_sink_->deliverVideoData(std::move(packet)); } } // if not bundle } // if not Feedback }
void SRPacketHandler::write(Context *ctx, std::shared_ptr<dataPacket> packet) { if (initialized_ && enabled_) { RtcpHeader *chead = reinterpret_cast<RtcpHeader*>(packet->data); if (!chead->isRtcp() && enabled_) { handleRtpPacket(packet); } else if (chead->packettype == RTCP_Sender_PT && enabled_) { handleSR(packet); } } ctx->fireWrite(packet); }
void DtlsTransport::onNiceData(packetPtr packet) { int len = packet->length; char *data = packet->data; unsigned int component_id = packet->comp; int length = len; SrtpChannel *srtp = srtp_.get(); if (DtlsTransport::isDtlsPacket(data, len)) { ELOG_DEBUG("%s message: Received DTLS message, transportName: %s, componentId: %u", toLog(), transport_name.c_str(), component_id); if (component_id == 1) { if (rtp_resender_.get() != NULL) { rtp_resender_->cancel(); } dtlsRtp->read(reinterpret_cast<unsigned char*>(data), len); } else { if (rtcp_resender_.get() != NULL) { rtcp_resender_->cancel(); } dtlsRtcp->read(reinterpret_cast<unsigned char*>(data), len); } return; } else if (this->getTransportState() == TRANSPORT_READY) { unprotect_packet_->length = len; unprotect_packet_->received_time_ms = packet->received_time_ms; memcpy(unprotect_packet_->data, data, len); if (dtlsRtcp != NULL && component_id == 2) { srtp = srtcp_.get(); } if (srtp != NULL) { RtcpHeader *chead = reinterpret_cast<RtcpHeader*>(unprotect_packet_->data); if (chead->isRtcp()) { if (srtp->unprotectRtcp(unprotect_packet_->data, &unprotect_packet_->length) < 0) { return; } } else { if (srtp->unprotectRtp(unprotect_packet_->data, &unprotect_packet_->length) < 0) { return; } } } else { return; } if (length <= 0) { return; } getTransportListener()->onTransportData(unprotect_packet_, this); } }
void RtcpProcessorHandler::read(Context *ctx, std::shared_ptr<dataPacket> packet) { RtcpHeader *chead = reinterpret_cast<RtcpHeader*> (packet->data); if (chead->isRtcp()) { if (chead->packettype == RTCP_Sender_PT) { // Sender Report processor_->analyzeSr(chead); } } else { if (stats_->getNode()["total"].hasChild("bitrateCalculated")) { processor_->setPublisherBW(stats_->getNode()["total"]["bitrateCalculated"].value()); } } processor_->checkRtcpFb(); ctx->fireRead(packet); }
void PacketCodecParser::read(Context *ctx, std::shared_ptr<DataPacket> packet) { RtcpHeader *chead = reinterpret_cast<RtcpHeader*>(packet->data); if (!chead->isRtcp() && enabled_) { RtpHeader *rtp_header = reinterpret_cast<RtpHeader*>(packet->data); RtpMap *codec = stream_->getRemoteSdpInfo()->getCodecByExternalPayloadType( rtp_header->getPayloadType()); if (codec) { packet->codec = codec->encoding_name; packet->clock_rate = codec->clock_rate; ELOG_DEBUG("Reading codec: %s, clock: %u", packet->codec.c_str(), packet->clock_rate); } } ctx->fireRead(std::move(packet)); }
void MediaStream::changeDeliverPayloadType(DataPacket *dp, packetType type) { RtpHeader* h = reinterpret_cast<RtpHeader*>(dp->data); RtcpHeader *chead = reinterpret_cast<RtcpHeader*>(dp->data); if (!chead->isRtcp()) { int internalPT = h->getPayloadType(); int externalPT = internalPT; if (type == AUDIO_PACKET) { externalPT = remote_sdp_->getAudioExternalPT(internalPT); } else if (type == VIDEO_PACKET) { externalPT = remote_sdp_->getVideoExternalPT(externalPT); } if (internalPT != externalPT) { h->setPayloadType(externalPT); } } }
void ExternalOutput::queueData(char* buffer, int length, packetType type){ if (!recording_) { return; } RtcpHeader *head = reinterpret_cast<RtcpHeader*>(buffer); if (head->isRtcp()){ return; } if (firstDataReceived_ == -1) { timeval time; gettimeofday(&time, NULL); firstDataReceived_ = (time.tv_sec * 1000) + (time.tv_usec / 1000); if (this->getAudioSinkSSRC() == 0){ ELOG_DEBUG("No audio detected"); context_->oformat->audio_codec = AV_CODEC_ID_PCM_MULAW; } } timeval time; gettimeofday(&time, NULL); unsigned long long millis = (time.tv_sec * 1000) + (time.tv_usec / 1000); if (millis -lastFullIntraFrameRequest_ >FIR_INTERVAL_MS){ this->sendFirPacket(); lastFullIntraFrameRequest_ = millis; } if (type == VIDEO_PACKET){ if(this->videoOffsetMsec_ == -1) { videoOffsetMsec_ = ((time.tv_sec * 1000) + (time.tv_usec / 1000)) - firstDataReceived_; ELOG_DEBUG("File %s, video offset msec: %llu", context_->filename, videoOffsetMsec_); } videoQueue_.pushPacket(buffer, length); }else{ if(this->audioOffsetMsec_ == -1) { audioOffsetMsec_ = ((time.tv_sec * 1000) + (time.tv_usec / 1000)) - firstDataReceived_; ELOG_DEBUG("File %s, audio offset msec: %llu", context_->filename, audioOffsetMsec_); } audioQueue_.pushPacket(buffer, length); } if( audioQueue_.hasData() || videoQueue_.hasData()) { // One or both of our queues has enough data to write stuff out. Notify our writer. cond_.notify_one(); } }
void RtcpFeedbackGenerationHandler::read(Context *ctx, std::shared_ptr<DataPacket> packet) { // Pass packets to RR and NACK Generator RtcpHeader *chead = reinterpret_cast<RtcpHeader*>(packet->data); if (!initialized_) { ctx->fireRead(std::move(packet)); return; } if (chead->getPacketType() == RTCP_Sender_PT) { uint32_t ssrc = chead->getSSRC(); auto generator_it = generators_map_.find(ssrc); if (generator_it != generators_map_.end()) { generator_it->second->rr_generator->handleSr(packet); } else { ELOG_DEBUG("message: no RrGenerator found, ssrc: %u", ssrc); } ctx->fireRead(std::move(packet)); return; } bool should_send_rr = false; bool should_send_nack = false; if (!chead->isRtcp()) { RtpHeader *head = reinterpret_cast<RtpHeader*>(packet->data); uint32_t ssrc = head->getSSRC(); auto generator_it = generators_map_.find(ssrc); if (generator_it != generators_map_.end()) { should_send_rr = generator_it->second->rr_generator->handleRtpPacket(packet); if (nacks_enabled_) { should_send_nack = generator_it->second->nack_generator->handleRtpPacket(packet); } } else { ELOG_DEBUG("message: no Generator found, ssrc: %u", ssrc); } if (should_send_rr || should_send_nack) { ELOG_DEBUG("message: Should send Rtcp, ssrc %u", ssrc); std::shared_ptr<DataPacket> rtcp_packet = generator_it->second->rr_generator->generateReceiverReport(); if (nacks_enabled_ && generator_it->second->nack_generator != nullptr) { generator_it->second->nack_generator->addNackPacketToRr(rtcp_packet); } ctx->fireWrite(std::move(rtcp_packet)); } } ctx->fireRead(std::move(packet)); }
void DtlsTransport::onNiceData(unsigned int component_id, char* data, int len, NiceConnection* nice) { int length = len; SrtpChannel* srtp = srtp_.get(); if (DtlsTransport::isDtlsPacket(data, len)) { ELOG_DEBUG("%s - Received DTLS message from %u", transport_name.c_str(), component_id); if (component_id == 1) { if (rtpResender_.get() != NULL) { rtpResender_->cancel(); } dtlsRtp_->read(reinterpret_cast<unsigned char*>(data), len); } else { if (rtcpResender_.get() != NULL) { rtcpResender_->cancel(); } dtlsRtcp_->read(reinterpret_cast<unsigned char*>(data), len); } return; } else if (this->getTransportState() == TRANSPORT_READY) { memcpy(unprotectBuf_, data, len); if (dtlsRtcp_ != NULL && component_id == 2) { srtp = srtcp_.get(); } if (srtp != NULL) { RtcpHeader* chead = reinterpret_cast<RtcpHeader*>(unprotectBuf_); if (chead->isRtcp()) { if (srtp->unprotectRtcp(unprotectBuf_, &length) < 0) { return; } } else { if (srtp->unprotectRtp(unprotectBuf_, &length) < 0) { return; } } } else { return; } if (length <= 0) { return; } getTransportListener()->onTransportData(unprotectBuf_, length, this); } }
void RtpPaddingGeneratorHandler::write(Context *ctx, std::shared_ptr<DataPacket> packet) { RtcpHeader *chead = reinterpret_cast<RtcpHeader*>(packet->data); bool is_higher_sequence_number = false; if (packet->type == VIDEO_PACKET && !chead->isRtcp()) { stream_->getWorker()->unschedule(scheduled_task_); is_higher_sequence_number = isHigherSequenceNumber(packet); if (!first_packet_received_) { started_at_ = clock_->now(); } first_packet_received_ = true; } ctx->fireWrite(packet); if (is_higher_sequence_number) { onVideoPacket(std::move(packet)); } }
// parses incoming payload type, replaces occurence in buf void MediaStream::parseIncomingPayloadType(char *buf, int len, packetType type) { RtcpHeader* chead = reinterpret_cast<RtcpHeader*>(buf); RtpHeader* h = reinterpret_cast<RtpHeader*>(buf); if (!chead->isRtcp()) { int externalPT = h->getPayloadType(); int internalPT = externalPT; if (type == AUDIO_PACKET) { internalPT = remote_sdp_->getAudioInternalPT(externalPT); } else if (type == VIDEO_PACKET) { internalPT = remote_sdp_->getVideoInternalPT(externalPT); } if (externalPT != internalPT) { h->setPayloadType(internalPT); } else { // ELOG_WARN("onTransportData did not find mapping for %i", externalPT); } } }
// parses incoming payload type, replaces occurence in buf void WebRtcConnection::parseIncomingPayloadType(char *buf, int len, packetType type) { RtcpHeader* chead = reinterpret_cast<RtcpHeader*>(buf); RtpHeader* h = reinterpret_cast<RtpHeader*>(buf); if (!chead->isRtcp()) { int externalPT = h->getPayloadType(); int internalPT = externalPT; if (type == AUDIO_PACKET) { internalPT = remoteSdp_.getAudioInternalPT(externalPT); } else if (type == VIDEO_PACKET) { internalPT = remoteSdp_.getVideoInternalPT(externalPT); } if (externalPT != internalPT) { h->setPayloadType(internalPT); // ELOG_ERROR("onTransportData mapping %i to %i", externalPT, internalPT); } else { // ELOG_ERROR("onTransportData did not find mapping for %i", externalPT); } } }
void DtlsTransport::write(char* data, int len) { boost::mutex::scoped_lock lock(writeMutex_); if (nice_==NULL) return; int length = len; SrtpChannel *srtp = srtp_.get(); if (this->getTransportState() == TRANSPORT_READY) { memcpy(protectBuf_, data, len); int comp = 1; RtcpHeader *chead = reinterpret_cast<RtcpHeader*> (protectBuf_); if (chead->isRtcp()) { if (!rtcp_mux_) { comp = 2; } if (dtlsRtcp != NULL) { srtp = srtcp_.get(); } if (srtp && nice_->checkIceState() == NICE_READY) { if(srtp->protectRtcp(protectBuf_, &length)<0) { return; } } } else{ comp = 1; if (srtp && nice_->checkIceState() == NICE_READY) { if(srtp->protectRtp(protectBuf_, &length)<0) { return; } } } if (length <= 10) { return; } if (nice_->checkIceState() == NICE_READY) { this->writeOnNice(comp, protectBuf_, length); } } }
void RtpPaddingRemovalHandler::read(Context *ctx, std::shared_ptr<DataPacket> packet) { RtcpHeader *chead = reinterpret_cast<RtcpHeader*>(packet->data); RtpHeader *rtp_header = reinterpret_cast<RtpHeader*>(packet->data); if (!chead->isRtcp() && enabled_ && packet->type == VIDEO_PACKET) { uint32_t ssrc = rtp_header->getSSRC(); std::shared_ptr<SequenceNumberTranslator> translator = getTranslatorForSsrc(ssrc, true); if (!removePaddingBytes(packet, translator)) { return; } uint16_t sequence_number = rtp_header->getSeqNumber(); SequenceNumber sequence_number_info = translator->get(sequence_number, false); if (sequence_number_info.type != SequenceNumberType::Valid) { ELOG_DEBUG("Invalid translation %u, ssrc: %u", sequence_number, ssrc); return; } ELOG_DEBUG("Changing seq_number from %u to %u, ssrc %u", sequence_number, sequence_number_info.output, ssrc); rtp_header->setSeqNumber(sequence_number_info.output); } ctx->fireRead(std::move(packet)); }
void WebRtcConnection::onTransportData(char* buf, int len, Transport *transport) { if (audioSink_ == NULL && videoSink_ == NULL && fbSink_==NULL){ return; } // PROCESS RTCP RtcpHeader* chead = reinterpret_cast<RtcpHeader*>(buf); if (chead->isRtcp()) { thisStats_.processRtcpPacket(buf, len); if (chead->packettype == RTCP_Sender_PT) { //Sender Report rtcpProcessor_->analyzeSr(chead); } } // DELIVER FEEDBACK (RR, FEEDBACK PACKETS) if (chead->isFeedback()){ if (fbSink_ != NULL && shouldSendFeedback_) { fbSink_->deliverFeedback(buf,len); } } else { // RTP or RTCP Sender Report if (bundle_) { // Check incoming SSRC RtpHeader *head = reinterpret_cast<RtpHeader*> (buf); RtcpHeader *chead = reinterpret_cast<RtcpHeader*> (buf); uint32_t recvSSRC; if (chead->packettype == RTCP_Sender_PT) { //Sender Report recvSSRC = chead->getSSRC(); }else{ recvSSRC = head->getSSRC(); } // Deliver data if (recvSSRC==this->getVideoSourceSSRC()) { parseIncomingPayloadType(buf, len, VIDEO_PACKET); videoSink_->deliverVideoData(buf, len); } else if (recvSSRC==this->getAudioSourceSSRC()) { parseIncomingPayloadType(buf, len, AUDIO_PACKET); audioSink_->deliverAudioData(buf, len); } else { ELOG_ERROR("Unknown SSRC %u, localVideo %u, remoteVideo %u, ignoring", recvSSRC, this->getVideoSourceSSRC(), this->getVideoSinkSSRC()); } } else if (transport->mediaType == AUDIO_TYPE) { if (audioSink_ != NULL) { parseIncomingPayloadType(buf, len, AUDIO_PACKET); RtpHeader *head = reinterpret_cast<RtpHeader*> (buf); RtcpHeader *chead = reinterpret_cast<RtcpHeader*> (buf); // Firefox does not send SSRC in SDP if (this->getAudioSourceSSRC() == 0) { unsigned int recvSSRC; this->setAudioSourceSSRC(head->getSSRC()); if (chead->packettype == RTCP_Sender_PT) { // Sender Report recvSSRC = chead->getSSRC(); } else { recvSSRC = head->getSSRC(); } ELOG_DEBUG("Audio Source SSRC is %u", recvSSRC); this->setAudioSourceSSRC(recvSSRC); } audioSink_->deliverAudioData(buf, len); } } else if (transport->mediaType == VIDEO_TYPE) { if (videoSink_ != NULL) { parseIncomingPayloadType(buf, len, VIDEO_PACKET); RtpHeader *head = reinterpret_cast<RtpHeader*> (buf); RtcpHeader *chead = reinterpret_cast<RtcpHeader*> (buf); // Firefox does not send SSRC in SDP if (this->getVideoSourceSSRC() == 0) { unsigned int recvSSRC; if (chead->packettype == RTCP_Sender_PT) { //Sender Report recvSSRC = chead->getSSRC(); } else { recvSSRC = head->getSSRC(); } ELOG_DEBUG("Video Source SSRC is %u", recvSSRC); this->setVideoSourceSSRC(recvSSRC); } // change ssrc for RTP packets, don't touch here if RTCP videoSink_->deliverVideoData(buf, len); } } } // check if we need to send FB || RR messages rtcpProcessor_->checkRtcpFb(); }
void ExternalOutput::queueData(char* buffer, int length, packetType type){ if (!recording_) { return; } RtcpHeader *head = reinterpret_cast<RtcpHeader*>(buffer); if (head->isRtcp()){ return; } if (firstDataReceived_ == -1) { timeval time; gettimeofday(&time, NULL); firstDataReceived_ = (time.tv_sec * 1000) + (time.tv_usec / 1000); if (this->getAudioSinkSSRC() == 0){ ELOG_DEBUG("No audio detected"); context_->oformat->audio_codec = AV_CODEC_ID_PCM_MULAW; } } timeval time; gettimeofday(&time, NULL); unsigned long long millis = (time.tv_sec * 1000) + (time.tv_usec / 1000); if (millis -lastFullIntraFrameRequest_ >FIR_INTERVAL_MS){ this->sendFirPacket(); lastFullIntraFrameRequest_ = millis; } if (type == VIDEO_PACKET){ if(this->videoOffsetMsec_ == -1) { videoOffsetMsec_ = ((time.tv_sec * 1000) + (time.tv_usec / 1000)) - firstDataReceived_; ELOG_DEBUG("File %s, video offset msec: %llu", context_->filename, videoOffsetMsec_); } // If this is a red header, let's push it to our fec_receiver_, which will spit out frames in one of our other callbacks. // Otherwise, just stick it straight into the video queue. RtpHeader* h = reinterpret_cast<RtpHeader*>(buffer); if (h->getPayloadType() == RED_90000_PT) { // The only things AddReceivedRedPacket uses are headerLength and sequenceNumber. Unfortunately the amount of crap // we would have to pull in from the WebRtc project to fully construct a webrtc::RTPHeader object is obscene. So // let's just do this hacky fix. webrtc::RTPHeader hackyHeader; hackyHeader.headerLength = h->getHeaderLength(); hackyHeader.sequenceNumber = h->getSeqNumber(); fec_receiver_.AddReceivedRedPacket(hackyHeader, (const uint8_t*)buffer, length, ULP_90000_PT); fec_receiver_.ProcessReceivedFec(); } else { videoQueue_.pushPacket(buffer, length); } }else{ if(this->audioOffsetMsec_ == -1) { audioOffsetMsec_ = ((time.tv_sec * 1000) + (time.tv_usec / 1000)) - firstDataReceived_; ELOG_DEBUG("File %s, audio offset msec: %llu", context_->filename, audioOffsetMsec_); } audioQueue_.pushPacket(buffer, length); } if( audioQueue_.hasData() || videoQueue_.hasData()) { // One or both of our queues has enough data to write stuff out. Notify our writer. cond_.notify_one(); } }
void ExternalOutput::queueData(char* buffer, int length, packetType type) { if (!recording_) { return; } RtcpHeader *head = reinterpret_cast<RtcpHeader*>(buffer); if (head->isRtcp()) { return; } if (first_data_received_ == time_point()) { first_data_received_ = clock::now(); if (getAudioSinkSSRC() == 0) { ELOG_DEBUG("No audio detected"); audio_map_ = RtpMap{0, "PCMU", 8000, AUDIO_TYPE, 1}; audio_codec_ = AV_CODEC_ID_PCM_MULAW; } } if (need_to_send_fir_ && video_source_ssrc_) { sendFirPacket(); need_to_send_fir_ = false; } if (type == VIDEO_PACKET) { RtpHeader* h = reinterpret_cast<RtpHeader*>(buffer); uint8_t payloadtype = h->getPayloadType(); if (video_offset_ms_ == -1) { video_offset_ms_ = ClockUtils::durationToMs(clock::now() - first_data_received_); ELOG_DEBUG("File %s, video offset msec: %llu", context_->filename, video_offset_ms_); video_queue_.setTimebase(video_maps_[payloadtype].clock_rate); } // If this is a red header, let's push it to our fec_receiver_, which will spit out frames in one // of our other callbacks. // Otherwise, just stick it straight into the video queue. if (payloadtype == RED_90000_PT) { // The only things AddReceivedRedPacket uses are headerLength and sequenceNumber. // Unfortunately the amount of crap // we would have to pull in from the WebRtc project to fully construct // a webrtc::RTPHeader object is obscene. So // let's just do this hacky fix. webrtc::RTPHeader hacky_header; hacky_header.headerLength = h->getHeaderLength(); hacky_header.sequenceNumber = h->getSeqNumber(); // AddReceivedRedPacket returns 0 if there's data to process if (0 == fec_receiver_->AddReceivedRedPacket(hacky_header, (const uint8_t*)buffer, length, ULP_90000_PT)) { fec_receiver_->ProcessReceivedFec(); } } else { video_queue_.pushPacket(buffer, length); } } else { if (audio_offset_ms_ == -1) { audio_offset_ms_ = ClockUtils::durationToMs(clock::now() - first_data_received_); ELOG_DEBUG("File %s, audio offset msec: %llu", context_->filename, audio_offset_ms_); // Let's also take a moment to set our audio queue timebase. RtpHeader* h = reinterpret_cast<RtpHeader*>(buffer); if (h->getPayloadType() == PCMU_8000_PT) { audio_queue_.setTimebase(8000); } else if (h->getPayloadType() == OPUS_48000_PT) { audio_queue_.setTimebase(48000); } } audio_queue_.pushPacket(buffer, length); } if (audio_queue_.hasData() || video_queue_.hasData()) { // One or both of our queues has enough data to write stuff out. Notify our writer. cond_.notify_one(); } }