Esempio n. 1
0
void MediaStream::setTransportInfo(std::string audio_info, std::string video_info) {
  if (video_enabled_) {
    uint32_t video_sink_ssrc = getVideoSinkSSRC();
    uint32_t video_source_ssrc = getVideoSourceSSRC();

    if (video_sink_ssrc != kDefaultVideoSinkSSRC) {
      stats_->getNode()[video_sink_ssrc].insertStat("clientHostType", StringStat{video_info});
    }
    if (video_source_ssrc != 0) {
      stats_->getNode()[video_source_ssrc].insertStat("clientHostType", StringStat{video_info});
    }
  }

  if (audio_enabled_) {
    uint32_t audio_sink_ssrc = getAudioSinkSSRC();
    uint32_t audio_source_ssrc = getAudioSourceSSRC();

    if (audio_sink_ssrc != kDefaultAudioSinkSSRC) {
      stats_->getNode()[audio_sink_ssrc].insertStat("clientHostType", StringStat{audio_info});
    }
    if (audio_source_ssrc != 0) {
      stats_->getNode()[audio_source_ssrc].insertStat("clientHostType", StringStat{audio_info});
    }
  }
}
Esempio n. 2
0
void ExternalOutput::queueData(char* buffer, int length, packetType type) {
  if (!recording_) {
    return;
  }

  RtcpHeader *head = reinterpret_cast<RtcpHeader*>(buffer);
  if (head->isRtcp()) {
    return;
  }

  if (first_data_received_ == time_point()) {
    first_data_received_ = clock::now();
    if (getAudioSinkSSRC() == 0) {
      ELOG_DEBUG("No audio detected");
      audio_map_ = RtpMap{0, "PCMU", 8000, AUDIO_TYPE, 1};
      audio_codec_ = AV_CODEC_ID_PCM_MULAW;
    }
  }
  if (need_to_send_fir_ && video_source_ssrc_) {
    sendFirPacket();
    need_to_send_fir_ = false;
  }

  if (type == VIDEO_PACKET) {
    RtpHeader* h = reinterpret_cast<RtpHeader*>(buffer);
    uint8_t payloadtype = h->getPayloadType();
    if (video_offset_ms_ == -1) {
      video_offset_ms_ = ClockUtils::durationToMs(clock::now() - first_data_received_);
      ELOG_DEBUG("File %s, video offset msec: %llu", context_->filename, video_offset_ms_);
      video_queue_.setTimebase(video_maps_[payloadtype].clock_rate);
    }

    // If this is a red header, let's push it to our fec_receiver_, which will spit out frames in one
    // of our other callbacks.
    // Otherwise, just stick it straight into the video queue.
    if (payloadtype == RED_90000_PT) {
      // The only things AddReceivedRedPacket uses are headerLength and sequenceNumber.
      // Unfortunately the amount of crap
      // we would have to pull in from the WebRtc project to fully construct
      // a webrtc::RTPHeader object is obscene.  So
      // let's just do this hacky fix.
      webrtc::RTPHeader hacky_header;
      hacky_header.headerLength = h->getHeaderLength();
      hacky_header.sequenceNumber = h->getSeqNumber();

      // AddReceivedRedPacket returns 0 if there's data to process
      if (0 == fec_receiver_->AddReceivedRedPacket(hacky_header, (const uint8_t*)buffer,
                                                   length, ULP_90000_PT)) {
        fec_receiver_->ProcessReceivedFec();
      }
    } else {
      video_queue_.pushPacket(buffer, length);
    }
  } else {
    if (audio_offset_ms_ == -1) {
      audio_offset_ms_ = ClockUtils::durationToMs(clock::now() - first_data_received_);
      ELOG_DEBUG("File %s, audio offset msec: %llu", context_->filename, audio_offset_ms_);

      // Let's also take a moment to set our audio queue timebase.
      RtpHeader* h = reinterpret_cast<RtpHeader*>(buffer);
      if (h->getPayloadType() == PCMU_8000_PT) {
        audio_queue_.setTimebase(8000);
      } else if (h->getPayloadType() == OPUS_48000_PT) {
        audio_queue_.setTimebase(48000);
      }
    }
    audio_queue_.pushPacket(buffer, length);
  }

  if (audio_queue_.hasData() || video_queue_.hasData()) {
    // One or both of our queues has enough data to write stuff out.  Notify our writer.
    cond_.notify_one();
  }
}