示例#1
0
bool ExternalOutput::initContext() {
    if (context_->oformat->video_codec != AV_CODEC_ID_NONE &&
            context_->oformat->audio_codec != AV_CODEC_ID_NONE &&
            video_stream_ == NULL &&
            audio_stream_ == NULL) {
        AVCodec* videoCodec = avcodec_find_encoder(context_->oformat->video_codec);
        ELOG_DEBUG("Found Video Codec %s", videoCodec->name);
        if (videoCodec==NULL) {
            ELOG_ERROR("Could not find video codec");
            return false;
        }
        video_stream_ = avformat_new_stream (context_, videoCodec);
        video_stream_->id = 0;
        video_stream_->codec->codec_id = context_->oformat->video_codec;
        video_stream_->codec->width = 640;
        video_stream_->codec->height = 480;
        video_stream_->codec->time_base = (AVRational) {
            1,30
        };   // A decent guess here suffices; if processing the file with ffmpeg,
        // use -vsync 0 to force it not to duplicate frames.
        video_stream_->codec->pix_fmt = PIX_FMT_YUV420P;
        if (context_->oformat->flags & AVFMT_GLOBALHEADER) {
            video_stream_->codec->flags|=CODEC_FLAG_GLOBAL_HEADER;
        }
        context_->oformat->flags |= AVFMT_VARIABLE_FPS;

        AVCodec* audioCodec = avcodec_find_encoder(context_->oformat->audio_codec);
        if (audioCodec==NULL) {
            ELOG_ERROR("Could not find audio codec");
            return false;
        }
        ELOG_DEBUG("Found Audio Codec %s", audioCodec->name);
        audio_stream_ = avformat_new_stream (context_, audioCodec);
        audio_stream_->id = 1;
        audio_stream_->codec->codec_id = context_->oformat->audio_codec;
        audio_stream_->codec->sample_rate = context_->oformat->audio_codec == AV_CODEC_ID_PCM_MULAW ? 8000 : 48000; // TODO is it always 48 khz for opus?
        audio_stream_->codec->time_base = (AVRational) {
            1, audio_stream_->codec->sample_rate
        };
        audio_stream_->codec->channels = context_->oformat->audio_codec == AV_CODEC_ID_PCM_MULAW ? 1 : 2;   // TODO is it always two channels for opus?
        if (context_->oformat->flags & AVFMT_GLOBALHEADER) {
            audio_stream_->codec->flags|=CODEC_FLAG_GLOBAL_HEADER;
        }

        context_->streams[0] = video_stream_;
        context_->streams[1] = audio_stream_;
        if (avio_open(&context_->pb, context_->filename, AVIO_FLAG_WRITE) < 0) {
            ELOG_ERROR("Error opening output file");
            return false;
        }

        if (avformat_write_header(context_, NULL) < 0) {
            ELOG_ERROR("Error writing header");
            return false;
        }
        ELOG_DEBUG("avformat configured");
    }

    return true;
}
示例#2
0
// We'll allow our audioQueue to be significantly larger than our video queue
// This is safe because A) audio is a lot smaller, so it isn't a big deal to hold on to a lot of it and
// B) our audio sample rate is typically 20 msec packets; video is anywhere from 33 to 100 msec (30 to 10 fps)
// Allowing the audio queue to hold more will help prevent loss of data when the video framerate is low.
ExternalOutput::ExternalOutput(const std::string& outputUrl) : fec_receiver_(this), audioQueue_(600, 60), videoQueue_(120, 60), inited_(false), video_stream_(NULL), audio_stream_(NULL),
    firstVideoTimestamp_(-1), firstAudioTimestamp_(-1), firstDataReceived_(-1), videoOffsetMsec_(-1), audioOffsetMsec_(-1), vp8SearchState_(lookingForStart)
{
    ELOG_DEBUG("Creating output to %s", outputUrl.c_str());

    // TODO these should really only be called once per application run
    av_register_all();
    avcodec_register_all();


    context_ = avformat_alloc_context();
    if (context_==NULL){
        ELOG_ERROR("Error allocating memory for IO context");
    } else {

        outputUrl.copy(context_->filename, sizeof(context_->filename),0);

        context_->oformat = av_guess_format(NULL,  context_->filename, NULL);
        if (!context_->oformat){
            ELOG_ERROR("Error guessing format %s", context_->filename);
        } else {
            context_->oformat->video_codec = AV_CODEC_ID_VP8;
            context_->oformat->audio_codec = AV_CODEC_ID_NONE; // We'll figure this out once we start receiving data; it's either PCM or OPUS
        }
    }

    unpackagedBufferpart_ = unpackagedBuffer_;
    lastFullIntraFrameRequest_ = 0;
    sinkfbSource_ = this;
    fbSink_ = NULL;
    unpackagedSize_ = 0;
}
示例#3
0
  int OutputProcessor::encodeAudio(unsigned char* inBuff, int nSamples,
      AVPacket* pkt) {

    if (audioCoder == 0) {
      ELOG_DEBUG("No se han inicializado los parámetros del audioCoder");
      return -1;
    }

    AVFrame *frame;
    /* frame containing input raw audio */
    frame = avcodec_alloc_frame();
    if (!frame) {
      ELOG_ERROR("could not allocate audio frame");
      exit(1);
    }
    uint16_t* samples;
    int ret, got_output, buffer_size;
    //float t, tincr;

    frame->nb_samples = aCoderContext->frame_size;
    frame->format = aCoderContext->sample_fmt;
    //	frame->channel_layout = aCoderContext->channel_layout;

    /* the codec gives us the frame size, in samples,
     * we calculate the size of the samples buffer in bytes */
    ELOG_DEBUG("channels %d, frame_size %d, sample_fmt %d",
        aCoderContext->channels, aCoderContext->frame_size,
        aCoderContext->sample_fmt);
    buffer_size = av_samples_get_buffer_size(NULL, aCoderContext->channels,
        aCoderContext->frame_size, aCoderContext->sample_fmt, 0);
    samples = (uint16_t*) av_malloc(buffer_size);
    if (!samples) {
      ELOG_ERROR("could not allocate %d bytes for samples buffer",
          buffer_size);
      exit(1);
    }
    /* setup the data pointers in the AVFrame */
    ret = avcodec_fill_audio_frame(frame, aCoderContext->channels,
        aCoderContext->sample_fmt, (const uint8_t*) samples, buffer_size,
        0);
    if (ret < 0) {
      ELOG_ERROR("could not setup audio frame");
      exit(1);
    }

    ret = avcodec_encode_audio2(aCoderContext, pkt, frame, &got_output);
    if (ret < 0) {
      ELOG_ERROR("error encoding audio frame");
      exit(1);
    }
    if (got_output) {
      //fwrite(pkt.data, 1, pkt.size, f);
      ELOG_DEBUG("Got OUTPUT");
    }

    return ret;

  }
示例#4
0
  bool ExternalOutput::initContext() {
    ELOG_DEBUG("Init Context");
    if (oformat_->video_codec != AV_CODEC_ID_NONE && videoCodec_ == NULL) {
      videoCodec_ = avcodec_find_encoder(oformat_->video_codec);
      ELOG_DEBUG("Found Codec %s", videoCodec_->name);
      ELOG_DEBUG("Initing context with fps: %d", (int)prevEstimatedFps_);
      if (videoCodec_==NULL){
        ELOG_ERROR("Could not find codec");
        return false;
      }
      video_st = avformat_new_stream (context_, videoCodec_);
      video_st->id = 0;
      videoCodecCtx_ = video_st->codec;
      videoCodecCtx_->codec_id = oformat_->video_codec;
      videoCodecCtx_->width = 640;
      videoCodecCtx_->height = 480;
      videoCodecCtx_->time_base = (AVRational){1,(int)prevEstimatedFps_};
      videoCodecCtx_->pix_fmt = PIX_FMT_YUV420P;
      if (oformat_->flags & AVFMT_GLOBALHEADER){
        videoCodecCtx_->flags|=CODEC_FLAG_GLOBAL_HEADER;
      }
      oformat_->flags |= AVFMT_VARIABLE_FPS;
      ELOG_DEBUG("Init audio context");

      audioCodec_ = avcodec_find_encoder(oformat_->audio_codec);
      ELOG_DEBUG("Found Audio Codec %s", audioCodec_->name);
      if (audioCodec_==NULL){
        ELOG_ERROR("Could not find audio codec");
        return false;
      }
      audio_st = avformat_new_stream (context_, audioCodec_);
      audio_st->id = 1;
      audioCodecCtx_ = audio_st->codec;
      audioCodecCtx_->codec_id = oformat_->audio_codec;
      audioCodecCtx_->sample_rate = 8000;
      audioCodecCtx_->channels = 1;
      //      audioCodecCtx_->sample_fmt = AV_SAMPLE_FMT_S8;
      if (oformat_->flags & AVFMT_GLOBALHEADER){
        audioCodecCtx_->flags|=CODEC_FLAG_GLOBAL_HEADER;
      }

      context_->streams[0] = video_st;
      context_->streams[1] = audio_st;
      aviores_ = avio_open(&context_->pb, url.c_str(), AVIO_FLAG_WRITE);
      if (aviores_<0){
        ELOG_ERROR("Error opening output file");
        return false;
      }
      writeheadres_ = avformat_write_header(context_, NULL);
      if (writeheadres_<0){
        ELOG_ERROR("Error writing header");
        return false;
      }
      ELOG_DEBUG("AVFORMAT CONFIGURED");
    }
    return true;
  }
示例#5
0
bool ExternalOutput::initContext() {
  if (video_codec_ != AV_CODEC_ID_NONE &&
            audio_codec_ != AV_CODEC_ID_NONE &&
            video_stream_ == nullptr &&
            audio_stream_ == nullptr) {
    AVCodec* video_codec = avcodec_find_encoder(video_codec_);
    if (video_codec == nullptr) {
      ELOG_ERROR("Could not find video codec");
      return false;
    }
    need_to_send_fir_ = true;
    video_queue_.setTimebase(video_map_.clock_rate);
    video_stream_ = avformat_new_stream(context_, video_codec);
    video_stream_->id = 0;
    video_stream_->codec->codec_id = video_codec_;
    video_stream_->codec->width = 640;
    video_stream_->codec->height = 480;
    video_stream_->time_base = (AVRational) { 1, 30 };
    // A decent guess here suffices; if processing the file with ffmpeg,
      // use -vsync 0 to force it not to duplicate frames.
    video_stream_->codec->pix_fmt = PIX_FMT_YUV420P;
    if (context_->oformat->flags & AVFMT_GLOBALHEADER) {
      video_stream_->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }
    context_->oformat->flags |= AVFMT_VARIABLE_FPS;

    AVCodec* audio_codec = avcodec_find_encoder(audio_codec_);
    if (audio_codec == nullptr) {
      ELOG_ERROR("Could not find audio codec");
      return false;
    }

    audio_stream_ = avformat_new_stream(context_, audio_codec);
    audio_stream_->id = 1;
    audio_stream_->codec->codec_id = audio_codec_;
    audio_stream_->codec->sample_rate = audio_map_.clock_rate;
    audio_stream_->time_base = (AVRational) { 1, audio_stream_->codec->sample_rate };
    audio_stream_->codec->channels = audio_map_.channels;
    if (context_->oformat->flags & AVFMT_GLOBALHEADER) {
      audio_stream_->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }

    context_->streams[0] = video_stream_;
    context_->streams[1] = audio_stream_;
    if (avio_open(&context_->pb, context_->filename, AVIO_FLAG_WRITE) < 0) {
      ELOG_ERROR("Error opening output file");
      return false;
    }

    if (avformat_write_header(context_, nullptr) < 0) {
      ELOG_ERROR("Error writing header");
      return false;
    }
  }

  return true;
}
示例#6
0
void Resender::start()
{
    ELOG_DEBUG("start");
    sent_ = 0;
    timer_.cancel();
    if (thread_.get() != NULL) {
        ELOG_ERROR("Starting Resender, joining thread to terminate");
        thread_->join();
        ELOG_ERROR("Thread terminated on start");
    }
    timer_.expires_from_now(boost::posix_time::seconds(3));
    timer_.async_wait(boost::bind(&Resender::resend, this, boost::asio::placeholders::error));
    thread_.reset(new boost::thread(boost::bind(&Resender::run, this)));
}
示例#7
0
  void NiceConnection::updateIceState(IceState state) {

    if(iceState_==state)
      return;

    ELOG_INFO("%s - NICE State Changing from %u to %u %p", transportName->c_str(), this->iceState_, state, this);
    this->iceState_ = state;
    switch( iceState_) {
      case NICE_FINISHED:
        return;
      case NICE_FAILED:
        ELOG_ERROR("Nice Failed, stopping ICE");
        this->running_=false;
        break;

      case NICE_READY:
      case NICE_CANDIDATES_RECEIVED:
        break;
      default:
        break;
    }

    // Important: send this outside our state lock.  Otherwise, serious risk of deadlock.
    if (this->listener_ != NULL)
      this->listener_->updateIceState(state, this);
  }
示例#8
0
bool SrtpChannel::configureSrtpSession(srtp_t *session, const char* key,
        enum TransmissionType type) {
    srtp_policy_t policy;
    memset(&policy, 0, sizeof(policy));
    crypto_policy_set_aes_cm_128_hmac_sha1_80(&policy.rtp);
    crypto_policy_set_aes_cm_128_hmac_sha1_80(&policy.rtcp);
    if (type == SENDING) {
        policy.ssrc.type = ssrc_any_outbound;
    } else {

        policy.ssrc.type = ssrc_any_inbound;
    }

    policy.ssrc.value = 0;
    policy.window_size = 1024;
    policy.allow_repeat_tx = 1;
    policy.next = NULL;
    //ELOG_DEBUG("auth_tag_len %d", policy.rtp.auth_tag_len);

    gsize len = 0;
    uint8_t *akey = (uint8_t*) g_base64_decode((gchar*) key, &len);
    ELOG_DEBUG("set master key/salt to %s/", octet_string_hex_string(akey, 16));
    // allocate and initialize the SRTP session
    policy.key = akey;
    int res = srtp_create(session, &policy);
    if (res!=0){
      ELOG_ERROR("Failed to create srtp session with %s, %d", octet_string_hex_string(akey, 16), res);
    }
    return res!=0? false:true;
}
示例#9
0
  bool WebRtcConnection::addRemoteCandidate(const std::string &mid, int mLineIndex, const std::string &sdp) {
    // TODO Check type of transport.
    ELOG_DEBUG("Adding remote Candidate %s, mid %s, sdpMLine %d",sdp.c_str(), mid.c_str(), mLineIndex);
    MediaType theType;
    std::string theMid;
    if ((!mid.compare("video"))||(mLineIndex ==remoteSdp_.videoSdpMLine)){
      theType = VIDEO_TYPE;
      theMid = "video";
    }else{
      theType = AUDIO_TYPE;
      theMid = "audio";
    }
    SdpInfo tempSdp;
    std::string username, password;
    remoteSdp_.getCredentials(username, password, theType);
    tempSdp.setCredentials(username, password, OTHER);
    bool res = false;
    if(tempSdp.initWithSdp(sdp, theMid)){
      if (theType == VIDEO_TYPE||bundle_){
        ELOG_DEBUG("Setting VIDEO CANDIDATE" );
        res = videoTransport_->setRemoteCandidates(tempSdp.getCandidateInfos(), bundle_);
      } else if (theType==AUDIO_TYPE){
        ELOG_DEBUG("Setting AUDIO CANDIDATE");
        res = audioTransport_->setRemoteCandidates(tempSdp.getCandidateInfos(), bundle_);
      }else{
        ELOG_ERROR("Cannot add remote candidate with no Media (video or audio)");
      }
    }

    for (uint8_t it = 0; it < tempSdp.getCandidateInfos().size(); it++){
      remoteSdp_.addCandidate(tempSdp.getCandidateInfos()[it]);
    }
    return res;
  }
示例#10
0
int AudioEncoder::encodeAudio(unsigned char* inBuffer, int nSamples, AVPacket* pkt) {
  AVFrame *frame = av_frame_alloc();
  if (!frame) {
    ELOG_ERROR("could not allocate audio frame");
    return 0;
  }
  int ret, got_output, buffer_size;

  frame->nb_samples = aCoderContext_->frame_size;
  frame->format = aCoderContext_->sample_fmt;
  // frame->channel_layout = aCoderContext_->channel_layout;

  /* the codec gives us the frame size, in samples,
   * we calculate the size of the samples buffer in bytes */
  ELOG_DEBUG("channels %d, frame_size %d, sample_fmt %d",
      aCoderContext_->channels, aCoderContext_->frame_size,
      aCoderContext_->sample_fmt);
  buffer_size = av_samples_get_buffer_size(NULL, aCoderContext_->channels,
      aCoderContext_->frame_size, aCoderContext_->sample_fmt, 0);
  uint16_t* samples = reinterpret_cast<uint16_t*>(malloc(buffer_size));
  if (!samples) {
    ELOG_ERROR("could not allocate %d bytes for samples buffer", buffer_size);
    return 0;
  }
  /* setup the data pointers in the AVFrame */
  ret = avcodec_fill_audio_frame(frame, aCoderContext_->channels,
      aCoderContext_->sample_fmt, (const uint8_t*) samples, buffer_size,
      0);
  if (ret < 0) {
      free(samples);
      ELOG_ERROR("could not setup audio frame");
      return 0;
  }

  ret = avcodec_encode_audio2(aCoderContext_, pkt, frame, &got_output);
  if (ret < 0) {
    ELOG_ERROR("error encoding audio frame");
    free(samples);
    return 0;
  }
  if (got_output) {
    // fwrite(pkt.data, 1, pkt.size, f);
    ELOG_DEBUG("Got OUTPUT");
  }

  return ret;
}
示例#11
0
ExternalOutput::ExternalOutput(std::shared_ptr<Worker> worker, const std::string& output_url,
                               const std::vector<RtpMap> rtp_mappings)
  : worker_{worker}, pipeline_{Pipeline::create()}, audio_queue_{5.0, 10.0}, video_queue_{5.0, 10.0},
    inited_{false}, video_stream_{nullptr},
    audio_stream_{nullptr}, video_source_ssrc_{0},
    first_video_timestamp_{-1}, first_audio_timestamp_{-1},
    first_data_received_{}, video_offset_ms_{-1}, audio_offset_ms_{-1},
    need_to_send_fir_{true}, rtp_mappings_{rtp_mappings}, video_codec_{AV_CODEC_ID_NONE},
    audio_codec_{AV_CODEC_ID_NONE}, pipeline_initialized_{false} {
  ELOG_DEBUG("Creating output to %s", output_url.c_str());

  fb_sink_ = nullptr;
  sink_fb_source_ = this;

  // TODO(pedro): these should really only be called once per application run
  av_register_all();
  avcodec_register_all();

  fec_receiver_.reset(webrtc::UlpfecReceiver::Create(this));
  stats_ = std::make_shared<Stats>();
  quality_manager_ = std::make_shared<QualityManager>();

  for (auto rtp_map : rtp_mappings_) {
    switch (rtp_map.media_type) {
      case AUDIO_TYPE:
        audio_maps_[rtp_map.payload_type] = rtp_map;
        break;
      case VIDEO_TYPE:
        video_maps_[rtp_map.payload_type] = rtp_map;
        break;
      case OTHER:
        break;
    }
  }

  context_ = avformat_alloc_context();
  if (context_ == nullptr) {
    ELOG_ERROR("Error allocating memory for IO context");
  } else {
    output_url.copy(context_->filename, sizeof(context_->filename), 0);

    context_->oformat = av_guess_format(nullptr,  context_->filename, nullptr);
    if (!context_->oformat) {
      ELOG_ERROR("Error guessing format %s", context_->filename);
    }
  }
}
示例#12
0
  int InputProcessor::unpackageAudio(unsigned char* inBuff, int inBuffLen, unsigned char* outBuff) {
    int l = inBuffLen - RtpHeader::MIN_SIZE;
    if (l < 0){
      ELOG_ERROR ("Error unpackaging audio");
      return 0;
    }
    memcpy(outBuff, &inBuff[RtpHeader::MIN_SIZE], l);

    return l;
  }
示例#13
0
bool ExternalOutput::bufferCheck(RTPPayloadVP8* payload) {
  if (payload->dataLength + unpackagedSize_ >= UNPACKAGE_BUFFER_SIZE) {
    ELOG_ERROR("Not enough buffer. Dropping frame. Please adjust your UNPACKAGE_BUFFER_SIZE in ExternalOutput.h");
    unpackagedSize_ = 0;
    unpackagedBufferpart_ = unpackagedBuffer_;
    vp8SearchState_ = lookingForStart;
    return false;
  }
  return true;
}
示例#14
0
文件: log.cpp 项目: atria-soft/elog
void elog::setLogInFile(const std::string& _filename) {
	elog::unsetLogInFile();
	ELOG_PRINT("Log in file: '" << _filename << "'");
	g_lock.lock();
	FILE*& file = getLogFile();
	file = fopen(_filename.c_str(), "w");
	g_lock.unlock();
	if (file == nullptr) {
		ELOG_ERROR("Can not open file: '" << _filename << "'");
	}
}
示例#15
0
 bool ExternalOutput::init(){
   av_register_all();
   avcodec_register_all();
   context_ = avformat_alloc_context();
   if (context_==NULL){
     ELOG_ERROR("Error allocating memory for IO context");
     return false;
   }
   oformat_ = av_guess_format(NULL,  url.c_str(), NULL);
   if (!oformat_){
     ELOG_ERROR("Error opening output file %s", url.c_str());
     return false;
   }
   context_->oformat = oformat_;
   context_->oformat->video_codec = AV_CODEC_ID_VP8;
   context_->oformat->audio_codec = AV_CODEC_ID_PCM_MULAW;
   url.copy(context_->filename, sizeof(context_->filename),0);
   video_st = NULL;
   audio_st = NULL;
   in = new InputProcessor();
   MediaInfo m;
   //    m.processorType = RTP_ONLY;
   m.hasVideo = false;
   m.hasAudio = false;
   if (m.hasAudio) {
     m.audioCodec.sampleRate = 8000;
     m.audioCodec.bitRate = 64000;
     m.audioCodec.codec = AUDIO_CODEC_VORBIS;
     audioCoder_ = new AudioEncoder();
     if (!audioCoder_->initEncoder(m.audioCodec))
       exit(0);
   }
   gotUnpackagedFrame_ = 0;
   unpackagedSize_ = 0;
   in->init(m, this);
   thread_ = boost::thread(&ExternalOutput::sendLoop, this);
   sending_ = true;
   ELOG_DEBUG("Initialized successfully");
   return true;
 }
示例#16
0
ExternalOutput::ExternalOutput(const std::string& outputUrl)
  : audioQueue_(5.0, 10.0), videoQueue_(5.0, 10.0), inited_(false),
    video_stream_(NULL), audio_stream_(NULL), first_video_timestamp_(-1), first_audio_timestamp_(-1),
    first_data_received_(), video_offset_ms_(-1), audio_offset_ms_(-1), vp8SearchState_(lookingForStart),
    needToSendFir_(true) {
  ELOG_DEBUG("Creating output to %s", outputUrl.c_str());

  // TODO(pedro): these should really only be called once per application run
  av_register_all();
  avcodec_register_all();

  fec_receiver_.reset(webrtc::UlpfecReceiver::Create(this));

  // our video timebase is easy: always 90 khz.  We'll set audio once we receive a packet and can inspect its header.
  videoQueue_.setTimebase(90000);

  context_ = avformat_alloc_context();
  if (context_ == NULL) {
    ELOG_ERROR("Error allocating memory for IO context");
  } else {
    outputUrl.copy(context_->filename, sizeof(context_->filename), 0);

    context_->oformat = av_guess_format(NULL,  context_->filename, NULL);
    if (!context_->oformat) {
      ELOG_ERROR("Error guessing format %s", context_->filename);
    } else {
      context_->oformat->video_codec = AV_CODEC_ID_VP8;
      context_->oformat->audio_codec = AV_CODEC_ID_NONE;
      // We'll figure this out once we start receiving data; it's either PCM or OPUS
    }
  }

  unpackagedBufferpart_ = unpackagedBuffer_;
  sink_fb_source_ = this;
  fb_sink_ = nullptr;
  unpackagedSize_ = 0;
  videoSourceSsrc_ = 0;
}
示例#17
0
ExternalOutput::ExternalOutput(const std::string& outputUrl)
{
    ELOG_DEBUG("Creating output to %s", outputUrl.c_str());

    // TODO these should really only be called once per application run
    av_register_all();
    avcodec_register_all();


    context_ = avformat_alloc_context();
    if (context_==NULL) {
        ELOG_ERROR("Error allocating memory for IO context");
    } else {

        outputUrl.copy(context_->filename, sizeof(context_->filename),0);

        context_->oformat = av_guess_format(NULL,  context_->filename, NULL);
        if (!context_->oformat) {
            ELOG_ERROR("Error guessing format %s", context_->filename);
        } else {
            context_->oformat->video_codec = AV_CODEC_ID_VP8;
            context_->oformat->audio_codec = AV_CODEC_ID_NONE; // We'll figure this out once we start receiving data; it's either PCM or OPUS
        }
    }

    video_stream_ = NULL;
    audio_stream_ = NULL;
    unpackagedBufferpart_ = unpackagedBuffer_;
    initTimeVideo_ = -1;
    initTimeAudio_ = -1;
    lastFullIntraFrameRequest_ = 0;
    sinkfbSource_ = this;
    fbSink_ = NULL;
    unpackagedSize_ = 0;
    inputProcessor_ = new InputProcessor();
}
示例#18
0
  int ExternalOutput::writeAudioData(char* buf, int len){
    if (in!=NULL){
      if (videoCodec_ == NULL) {
        return 0;
      }
      rtpheader *head = (rtpheader*)buf;
      //We dont need any other payload at this time
      if(head->payloadtype != PCMU_8000_PT){
        return 0;
      }

      int ret = in->unpackageAudio(reinterpret_cast<unsigned char*>(buf), len,
          unpackagedAudioBuffer_);
      if (ret <= 0)
        return ret;
      timeval time;
      gettimeofday(&time, NULL);
      unsigned long long millis = (time.tv_sec * 1000) + (time.tv_usec / 1000);
      if (millis -lastTime_ >FIR_INTERVAL_MS){
        this->sendFirPacket();
        lastTime_ = millis;
      }
      if (initTime_ == 0) {
        initTime_ = millis;      
      }
      if (millis < initTime_){
        ELOG_WARN("initTime is smaller than currentTime, possible problems when recording ");
      }
      if (ret > UNPACKAGE_BUFFER_SIZE){
        ELOG_ERROR("Unpackaged Audio size too big %d", ret);
      }
      AVPacket avpkt;
      av_init_packet(&avpkt);
      avpkt.data = unpackagedAudioBuffer_;
      avpkt.size = ret;
      avpkt.pts = millis - initTime_;
      avpkt.stream_index = 1;
      av_write_frame(context_, &avpkt);
      av_free_packet(&avpkt);
      return ret;

    }
    return 0;
  }
示例#19
0
  int InputProcessor::unpackageAudio(unsigned char* inBuff, int inBuffLen,
      unsigned char* outBuff) {

    RTPHeader* head = reinterpret_cast<RTPHeader*>(inBuff);
    if (head->getPayloadType()!=0){
      ELOG_DEBUG("PT AUDIO %d", head->getPayloadType());
      //      return -1;
    }

    //    ELOG_DEBUG("Audio Timestamp %u", head->getTimestamp());
    int l = inBuffLen - RTPHeader::MIN_SIZE;
    if (l<0){
      ELOG_ERROR ("Error unpackaging audio");
      return 0;
    }
    memcpy(outBuff, &inBuff[RTPHeader::MIN_SIZE], l);

    return l;
  }
示例#20
0
文件: elog.cpp 项目: atria-soft/elog
static elog::level getLogLevel(const std::string& _value) {
	if (_value == "0") {
		return elog::level_none;
	} else if (_value == "1") {
		return elog::level_critical;
	} else if (_value == "2") {
		return elog::level_error;
	} else if (_value == "3") {
		return elog::level_warning;
	} else if (_value == "4") {
		return elog::level_info;
	} else if (_value == "5") {
		return elog::level_debug;
	} else if (_value == "6") {
		return elog::level_verbose;
	}
	ELOG_ERROR("Unknow log level : " << _value);
	return elog::level_verbose;
}
示例#21
0
 void NiceConnection::updateComponentState(unsigned int compId, IceState state) {
   ELOG_DEBUG("%s - NICE Component State Changed %u - %u, total comps %u", transportName->c_str(), compId, state, iceComponents_);
   comp_state_list_[compId] = state;
   if (state == NICE_READY) {
     for (unsigned int i = 1; i<=iceComponents_; i++) {
       if (comp_state_list_[i] != NICE_READY) {
         return;
       }
     }
   }else if (state == NICE_FAILED){
     ELOG_ERROR("%s - NICE Component %u FAILED", transportName->c_str(), compId);
     for (unsigned int i = 1; i<=iceComponents_; i++) {
       if (comp_state_list_[i] != NICE_FAILED) {
         return;
       }
     }
   }
   this->updateIceState(state);
 }
示例#22
0
int ExternalInput::init() {
  context_ = avformat_alloc_context();
  av_register_all();
  avcodec_register_all();
  avformat_network_init();
  // open rtsp
  av_init_packet(&avpacket_);
  avpacket_.data = NULL;
  ELOG_DEBUG("Trying to open input from url %s", url_.c_str());
  int res = avformat_open_input(&context_, url_.c_str(), NULL, NULL);
  char errbuff[500];
  ELOG_DEBUG("Opening input result %d", res);
  if (res != 0) {
    av_strerror(res, reinterpret_cast<char*>(&errbuff), 500);
    ELOG_ERROR("Error opening input %s", errbuff);
    return res;
  }
  res = avformat_find_stream_info(context_, NULL);
  if (res < 0) {
    av_strerror(res, reinterpret_cast<char*>(&errbuff), 500);
    ELOG_ERROR("Error finding stream info %s", errbuff);
    return res;
  }

  // VideoCodecInfo info;
  MediaInfo om;
  AVStream *st, *audio_st;

  int streamNo = av_find_best_stream(context_, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
  if (streamNo < 0) {
    ELOG_WARN("No Video stream found");
    // return streamNo;
  } else {
    om.hasVideo = true;
    video_stream_index_ = streamNo;
    st = context_->streams[streamNo];
  }

  int audioStreamNo = av_find_best_stream(context_, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0);
  if (audioStreamNo < 0) {
    ELOG_WARN("No Audio stream found");
    ELOG_DEBUG("Has video, audio stream number %d. time base = %d / %d ",
               video_stream_index_, st->time_base.num, st->time_base.den);
    // return streamNo;
  } else {
    om.hasAudio = true;
    audio_stream_index_ = audioStreamNo;
    audio_st = context_->streams[audio_stream_index_];
    ELOG_DEBUG("Has Audio, audio stream number %d. time base = %d / %d ",
               audio_stream_index_, audio_st->time_base.num, audio_st->time_base.den);
    audio_time_base_ = audio_st->time_base.den;
    ELOG_DEBUG("Audio Time base %d", audio_time_base_);
    if (audio_st->codec->codec_id == AV_CODEC_ID_PCM_MULAW) {
      ELOG_DEBUG("PCM U8");
      om.audioCodec.sampleRate = 8000;
      om.audioCodec.codec = AUDIO_CODEC_PCM_U8;
      om.rtpAudioInfo.PT = PCMU_8000_PT;
    } else if (audio_st->codec->codec_id == AV_CODEC_ID_OPUS) {
      ELOG_DEBUG("OPUS");
      om.audioCodec.sampleRate = 48000;
      om.audioCodec.codec = AUDIO_CODEC_OPUS;
      om.rtpAudioInfo.PT = OPUS_48000_PT;
    }
    if (!om.hasVideo)
      st = audio_st;
  }


  if (st->codec->codec_id == AV_CODEC_ID_VP8 || !om.hasVideo) {
    ELOG_DEBUG("No need for video transcoding, already VP8");
    video_time_base_ = st->time_base.den;
    needTranscoding_ = false;
    decodedBuffer_.reset((unsigned char*) malloc(100000));
    MediaInfo om;
    om.processorType = PACKAGE_ONLY;
    if (audio_st->codec->codec_id == AV_CODEC_ID_PCM_MULAW) {
      ELOG_DEBUG("PCM U8");
      om.audioCodec.sampleRate = 8000;
      om.audioCodec.codec = AUDIO_CODEC_PCM_U8;
      om.rtpAudioInfo.PT = PCMU_8000_PT;
    } else if (audio_st->codec->codec_id == AV_CODEC_ID_OPUS) {
      ELOG_DEBUG("OPUS");
      om.audioCodec.sampleRate = 48000;
      om.audioCodec.codec = AUDIO_CODEC_OPUS;
      om.rtpAudioInfo.PT = OPUS_48000_PT;
    }
    op_.reset(new OutputProcessor());
    op_->init(om, this);
  } else {
    needTranscoding_ = true;
    inCodec_.initDecoder(st->codec);

    bufflen_ = st->codec->width*st->codec->height*3/2;
    decodedBuffer_.reset((unsigned char*) malloc(bufflen_));


    om.processorType = RTP_ONLY;
    om.videoCodec.codec = VIDEO_CODEC_VP8;
    om.videoCodec.bitRate = 1000000;
    om.videoCodec.width = 640;
    om.videoCodec.height = 480;
    om.videoCodec.frameRate = 20;
    om.hasVideo = true;

    om.hasAudio = false;
    if (om.hasAudio) {
      om.audioCodec.sampleRate = 8000;
      om.audioCodec.bitRate = 64000;
    }

    op_.reset(new OutputProcessor());
    op_->init(om, this);
  }

  av_init_packet(&avpacket_);

  thread_ = boost::thread(&ExternalInput::receiveLoop, this);
  running_ = true;
  if (needTranscoding_)
    encodeThread_ = boost::thread(&ExternalInput::encodeLoop, this);

  return true;
}
示例#23
0
  void WebRtcConnection::onTransportData(char* buf, int len, Transport *transport) {
    if (audioSink_ == NULL && videoSink_ == NULL && fbSink_==NULL){
      return;
    }
    
    // PROCESS RTCP
    RtcpHeader* chead = reinterpret_cast<RtcpHeader*>(buf);
    if (chead->isRtcp()) {
      thisStats_.processRtcpPacket(buf, len);
      if (chead->packettype == RTCP_Sender_PT) { //Sender Report
        rtcpProcessor_->analyzeSr(chead);
      }
    }

    // DELIVER FEEDBACK (RR, FEEDBACK PACKETS)
    if (chead->isFeedback()){
      if (fbSink_ != NULL && shouldSendFeedback_) {
        fbSink_->deliverFeedback(buf,len);
      }
    } else {
      // RTP or RTCP Sender Report
      if (bundle_) {
        // Check incoming SSRC
        RtpHeader *head = reinterpret_cast<RtpHeader*> (buf);
        RtcpHeader *chead = reinterpret_cast<RtcpHeader*> (buf);
        uint32_t recvSSRC;
        if (chead->packettype == RTCP_Sender_PT) { //Sender Report
          recvSSRC = chead->getSSRC();             
        }else{
          recvSSRC = head->getSSRC();
        }
        // Deliver data
        if (recvSSRC==this->getVideoSourceSSRC()) {
          parseIncomingPayloadType(buf, len, VIDEO_PACKET);
          videoSink_->deliverVideoData(buf, len);
        } else if (recvSSRC==this->getAudioSourceSSRC()) {
          parseIncomingPayloadType(buf, len, AUDIO_PACKET);
          audioSink_->deliverAudioData(buf, len);
        } else {
          ELOG_ERROR("Unknown SSRC %u, localVideo %u, remoteVideo %u, ignoring", recvSSRC, this->getVideoSourceSSRC(), this->getVideoSinkSSRC());
        }
      } else if (transport->mediaType == AUDIO_TYPE) {
        if (audioSink_ != NULL) {
          parseIncomingPayloadType(buf, len, AUDIO_PACKET);
          RtpHeader *head = reinterpret_cast<RtpHeader*> (buf);
          RtcpHeader *chead = reinterpret_cast<RtcpHeader*> (buf);
          // Firefox does not send SSRC in SDP
          if (this->getAudioSourceSSRC() == 0) {
            unsigned int recvSSRC;
            this->setAudioSourceSSRC(head->getSSRC());		
            if (chead->packettype == RTCP_Sender_PT) { // Sender Report
              recvSSRC = chead->getSSRC();
            } else {
              recvSSRC = head->getSSRC();
            }
            ELOG_DEBUG("Audio Source SSRC is %u", recvSSRC);
            this->setAudioSourceSSRC(recvSSRC);
          }
          audioSink_->deliverAudioData(buf, len);
        }
      } else if (transport->mediaType == VIDEO_TYPE) {
        if (videoSink_ != NULL) {
          parseIncomingPayloadType(buf, len, VIDEO_PACKET);
          RtpHeader *head = reinterpret_cast<RtpHeader*> (buf);
          RtcpHeader *chead = reinterpret_cast<RtcpHeader*> (buf);
           // Firefox does not send SSRC in SDP
          if (this->getVideoSourceSSRC() == 0) {
            unsigned int recvSSRC;
            if (chead->packettype == RTCP_Sender_PT) { //Sender Report
              recvSSRC = chead->getSSRC();
            } else {
              recvSSRC = head->getSSRC();
            }
            ELOG_DEBUG("Video Source SSRC is %u", recvSSRC);
            this->setVideoSourceSSRC(recvSSRC);
          }
          // change ssrc for RTP packets, don't touch here if RTCP
          videoSink_->deliverVideoData(buf, len);
        }
      }
    }
    // check if we need to send FB || RR messages
    rtcpProcessor_->checkRtcpFb();      
  }
示例#24
0
  void WebRtcConnection::updateState(TransportState state, Transport * transport) {
    boost::mutex::scoped_lock lock(updateStateMutex_);
    WebRTCEvent temp = globalState_;
    std::string msg = "";
    ELOG_INFO("Update Transport State %s to %d", transport->transport_name.c_str(), state);
    if (videoTransport_ == NULL && audioTransport_ == NULL) {
      ELOG_ERROR("Update Transport State with Transport NULL, this should not happen!");
      return;
    }
    

    if (globalState_ == CONN_FAILED) {
      // if current state is failed -> noop
      return;
    }

    switch (state){
      case TRANSPORT_STARTED:
        if (bundle_){
          temp = CONN_STARTED;
        }else{
          if ((!remoteSdp_.hasAudio || (audioTransport_ != NULL && audioTransport_->getTransportState() == TRANSPORT_STARTED)) &&
            (!remoteSdp_.hasVideo || (videoTransport_ != NULL && videoTransport_->getTransportState() == TRANSPORT_STARTED))) {
              // WebRTCConnection will be ready only when all channels are ready.
              temp = CONN_STARTED;
            }
        }
        break;
      case TRANSPORT_GATHERED:
        if (bundle_){
          if(!trickleEnabled_){
            temp = CONN_GATHERED;
            msg = this->getLocalSdp();
          }
        }else{
          if ((!remoteSdp_.hasAudio || (audioTransport_ != NULL && audioTransport_->getTransportState() == TRANSPORT_GATHERED)) &&
            (!remoteSdp_.hasVideo || (videoTransport_ != NULL && videoTransport_->getTransportState() == TRANSPORT_GATHERED))) {
              // WebRTCConnection will be ready only when all channels are ready.
              if(!trickleEnabled_){
                temp = CONN_GATHERED;
                msg = this->getLocalSdp();
              }
            }
        }
        break;
      case TRANSPORT_READY:
        if (bundle_){
          temp = CONN_READY;

        }else{
          if ((!remoteSdp_.hasAudio || (audioTransport_ != NULL && audioTransport_->getTransportState() == TRANSPORT_READY)) &&
            (!remoteSdp_.hasVideo || (videoTransport_ != NULL && videoTransport_->getTransportState() == TRANSPORT_READY))) {
              // WebRTCConnection will be ready only when all channels are ready.
              temp = CONN_READY;            
            }
        }
        break;
      case TRANSPORT_FAILED:
        temp = CONN_FAILED;
        sending_ = false;
        msg = remoteSdp_.getSdp();
        ELOG_INFO("WebRtcConnection failed, stopping sending");
        cond_.notify_one();
        break;
      default:
        ELOG_DEBUG("New state %d", state);
        break;
    }

    if (audioTransport_ != NULL && videoTransport_ != NULL) {
      ELOG_INFO("%s - Update Transport State end, %d - %d, %d - %d, %d - %d", 
        transport->transport_name.c_str(),
        (int)audioTransport_->getTransportState(), 
        (int)videoTransport_->getTransportState(), 
        this->getAudioSourceSSRC(),
        this->getVideoSourceSSRC(),
        (int)temp, 
        (int)globalState_);
    }
    
    if (globalState_ == temp)
      return;

    globalState_ = temp;

    if (connEventListener_ != NULL) {
      connEventListener_->notifyEvent(globalState_, msg);
    }
  }
示例#25
0
void ExternalOutput::writeVideoData(char* buf, int len){
    RtpHeader* head = reinterpret_cast<RtpHeader*>(buf);
    if (head->getPayloadType() == RED_90000_PT) {
        int totalLength = head->getHeaderLength();
        int rtpHeaderLength = totalLength;
        RedHeader *redhead = reinterpret_cast<RedHeader*>(buf + totalLength);
        if (redhead->payloadtype == VP8_90000_PT) {
            while (redhead->follow) {
                totalLength += redhead->getLength() + 4; // RED header
                redhead = reinterpret_cast<RedHeader*>(buf + totalLength);
            }
            // Parse RED packet to VP8 packet.
            // Copy RTP header
            memcpy(deliverMediaBuffer_, buf, rtpHeaderLength);
            // Copy payload data
            memcpy(deliverMediaBuffer_ + totalLength, buf + totalLength + 1, len - totalLength - 1);
            // Copy payload type
            RtpHeader *mediahead = reinterpret_cast<RtpHeader*>(deliverMediaBuffer_);
            mediahead->setPayloadType(redhead->payloadtype);
            buf = reinterpret_cast<char*>(deliverMediaBuffer_);
            len = len - 1 - totalLength + rtpHeaderLength;
        }
    }

    if (firstVideoTimestamp_ == -1) {
        firstVideoTimestamp_ = head->getTimestamp();
    }

    int gotUnpackagedFrame = false;
    int ret = inputProcessor_->unpackageVideo(reinterpret_cast<unsigned char*>(buf), len, unpackagedBufferpart_, &gotUnpackagedFrame);
    if (ret < 0){
        ELOG_ERROR("Error Unpackaging Video");
        return;
    }

    initContext();

    if (video_stream_ == NULL) {
        // could not init our context yet.
        return;
    }

    unpackagedSize_ += ret;
    unpackagedBufferpart_ += ret;

    if (gotUnpackagedFrame) {
        unpackagedBufferpart_ -= unpackagedSize_;

        long long currentTimestamp = head->getTimestamp();
        if (currentTimestamp - firstVideoTimestamp_ < 0) {
            // we wrapped.  add 2^32 to correct this.  We only handle a single wrap around since that's ~13 hours of recording, minimum.
            currentTimestamp += 0xFFFFFFFF;
        }

        long long timestampToWrite = (currentTimestamp - firstVideoTimestamp_) / (90000 / video_stream_->time_base.den);  // All of our video offerings are using a 90khz clock.

        // Adjust for our start time offset
        timestampToWrite += videoOffsetMsec_ / (1000 / video_stream_->time_base.den);   // in practice, our timebase den is 1000, so this operation is a no-op.

        /* ELOG_DEBUG("Writing video frame %d with timestamp %u, normalized timestamp %u, video offset msec %u, length %d, input timebase: %d/%d, target timebase: %d/%d", */
        /*            head->getSeqNumber(), head->getTimestamp(), timestampToWrite, videoOffsetMsec_, unpackagedSize_, */
        /*            video_stream_->codec->time_base.num, video_stream_->codec->time_base.den,    // timebase we requested */
        /*            video_stream_->time_base.num, video_stream_->time_base.den);                 // actual timebase */

        AVPacket avpkt;
        av_init_packet(&avpkt);
        avpkt.data = unpackagedBufferpart_;
        avpkt.size = unpackagedSize_;
        avpkt.pts = timestampToWrite;
        avpkt.stream_index = 0;
        av_write_frame(context_, &avpkt);
        av_free_packet(&avpkt);
        unpackagedSize_ = 0;
        unpackagedBufferpart_ = unpackagedBuffer_;
    }
}
示例#26
0
  int ExternalOutput::deliverVideoData(char* buf, int len){
    if (in!=NULL){
      rtpheader *head = (rtpheader*) buf;
      if (head->payloadtype == RED_90000_PT) {
        int totalLength = 12;

        if (head->extension) {
          totalLength += ntohs(head->extensionlength)*4 + 4; // RTP Extension header
        }
        int rtpHeaderLength = totalLength;
        redheader *redhead = (redheader*) (buf + totalLength);

        //redhead->payloadtype = remoteSdp_.inOutPTMap[redhead->payloadtype];
        if (redhead->payloadtype == VP8_90000_PT) {
          while (redhead->follow) {
            totalLength += redhead->getLength() + 4; // RED header
            redhead = (redheader*) (buf + totalLength);
          }
          // Parse RED packet to VP8 packet.
          // Copy RTP header
          memcpy(deliverMediaBuffer_, buf, rtpHeaderLength);
          // Copy payload data
          memcpy(deliverMediaBuffer_ + totalLength, buf + totalLength + 1, len - totalLength - 1);
          // Copy payload type
          rtpheader *mediahead = (rtpheader*) deliverMediaBuffer_;
          mediahead->payloadtype = redhead->payloadtype;
          buf = deliverMediaBuffer_;
          len = len - 1 - totalLength + rtpHeaderLength;
        }
      }
      int estimatedFps=0;
      int ret = in->unpackageVideo(reinterpret_cast<unsigned char*>(buf), len,
          unpackagedBufferpart_, &gotUnpackagedFrame_, &estimatedFps);
      //          ELOG_DEBUG("Estimated FPS %d, previous %d", estimatedFps, prevEstimatedFps_);

      if (ret < 0)
        return 0;
      
      if (videoCodec_ == NULL) {
        if ((estimatedFps!=0)&&((estimatedFps < prevEstimatedFps_*(1-0.2))||(estimatedFps > prevEstimatedFps_*(1+0.2)))){
          //          ELOG_DEBUG("OUT OF THRESHOLD changing context");
          prevEstimatedFps_ = estimatedFps;
        }
        if (warmupfpsCount_++ == 20){
          
          if (!this->initContext()){
            ELOG_ERROR("Contex cannot be initialized properly, closing...");
            this->closeSink();
          }
        }
        return 0;
      }

      unpackagedSize_ += ret;
      unpackagedBufferpart_ += ret;
      if (gotUnpackagedFrame_ && videoCodec_!=NULL) {
        timeval time;
        gettimeofday(&time, NULL);
        long millis = (time.tv_sec * 1000) + (time.tv_usec / 1000);
        if (initTime_ == 0) {
          initTime_ = millis;
        }
        unpackagedBufferpart_ -= unpackagedSize_;
        AVPacket avpkt;
        av_init_packet(&avpkt);
        avpkt.data = unpackagedBufferpart_;
        avpkt.size = unpackagedSize_;
        avpkt.pts = millis - initTime_;
        avpkt.stream_index = 0;
        av_write_frame(context_, &avpkt);
        av_free_packet(&avpkt);
        gotUnpackagedFrame_ = 0;
        unpackagedSize_ = 0;

      }
    }
    return 0;
  }
示例#27
0
文件: elog.cpp 项目: atria-soft/elog
void elog::init(int _argc, const char** _argv) {
	ELOG_INFO("E-log system init (BEGIN)");
	// retrive application Name:
	std::string applName = _argv[0];
	int lastSlash = applName.rfind('/');
	applName = &applName[lastSlash+1];
	// get name: applName
	bool userSpecifyLogFile = false;
	for (int32_t iii=0; iii<_argc ; ++iii) {
		std::string data = _argv[iii];
		if (startWith(data, "--elog-level=")) {
			ELOG_INFO("Change global level at " << getLogLevel(std::string(data.begin()+13, data.end())));
			elog::setLevel(getLogLevel(std::string(data.begin()+13, data.end())));
		} else if (data == "--elog-color") {
			elog::setColor(true);
		} else if (data == "--elog-no-color") {
			elog::setColor(false);
		} else if (data == "--elog-back-trace") {
			elog::setBackTrace(true);
		} else if (startWith(data, "--elog-file=")) {
			std::string value(data.begin()+12, data.end());
			if (value.size() == 0) {
				elog::unsetLogInFile();
			} else {
				elog::setLogInFile(value);
			}
			userSpecifyLogFile = true;
		} else if (startWith(data, "--elog-config=")) {
			std::string value(data.begin()+14, data.end());
			elog::setTime(false);
			elog::setLine(false);
			elog::setFunction(false);
			elog::setLibName(false);
			elog::setThreadId(false);
			elog::setThreadNameEnable(false);
			for (size_t iii=0; iii<value.size(); ++iii) {
				if (value[iii] == 't') {
					elog::setTime(true);
				} else if (value[iii] == 'T') {
					elog::setThreadId(true);
				} else if (value[iii] == 'N') {
					elog::setThreadNameEnable(true);
				} else if (value[iii] == 'L') {
					elog::setLine(true);
				} else if (value[iii] == 'l') {
					elog::setLibName(true);
				} else if (value[iii] == 'f') {
					elog::setFunction(true);
				} else {
					ELOG_ERROR("In program argument: --elog-config= , the value '" << value[iii] << "' is not supported");
				}
			}
		} else if (startWith(data, "--elog-lib=")) {
			std::string value(data.begin()+11, data.end());
			std::vector<std::string> list = split(value, '/');
			if (list.size() != 2) {
				list = split(value, ':');
				if (list.size() != 2) {
					list = split(value, '+');
					if (list.size() != 2) {
						ELOG_ERROR("Can not set the --elog-lib= with value='" << value << "' not formated name:X or name/X or name+X");
						continue;
					}
				}
			}
			ELOG_INFO("Change level of '" << list[0] << "' at " << getLogLevel(list[1]));
			elog::setLevel(list[0], getLogLevel(list[1]));
		} else if (    data == "-h"
		            || data == "--help") {
			ELOG_PRINT("elog - help : ");
			ELOG_PRINT("    " << _argv[0] << " [options]");
			ELOG_PRINT("        --elog-level=            Change the default log level (set all Log level):");
			ELOG_PRINT("            0: debug None (default in release)");
			ELOG_PRINT("            1: debug Critical");
			ELOG_PRINT("            2: debug Error");
			ELOG_PRINT("            3: debug Warning");
			ELOG_PRINT("            4: debug Info (default in debug)");
			ELOG_PRINT("            5: debug Debug");
			ELOG_PRINT("            6: debug Verbose");
			ELOG_PRINT("        --elog-lib=name:X  Set a library specific level:");
			ELOG_PRINT("            name  Name of the library");
			ELOG_PRINT("            X     Log level to set [0..6]");
			ELOG_PRINT("            note: ':' can be replace with '/' or '+'");
			ELOG_PRINT("        --elog-file=pathToFile   File to store the logs: (disable console logs)");
			ELOG_PRINT("        --elog-color             Enable color in log (default in Linux/debug)");
			ELOG_PRINT("        --elog-no-color          Disable color in log (default in Linux/release and Other)");
			ELOG_PRINT("        --elog-back-trace        Enable back-trace when an error log level is generated (to get a fast debug)");
			ELOG_PRINT("        --elog-config=           Configure the Log interface");
			ELOG_PRINT("            t: diplay time");
			#ifdef ELOG_BUILD_ETHREAD
				ELOG_PRINT("            T: diplay thread id");
				ELOG_PRINT("            N: diplay thread name");
			#endif
			ELOG_PRINT("            L: diplay line number");
			ELOG_PRINT("            l: diplay lib name");
			ELOG_PRINT("            f: diplay function name");
			ELOG_PRINT("        -h/--help:               Dispplay this help");
			ELOG_PRINT("    example:");
			ELOG_PRINT("        " << _argv[0] << " --elog-color --elog-level=2 --elog-lib=etk:5 --elog-lib=appl:6 --elog-config=NLlf");
		} else if (startWith(data, "--elog") == true) {
			ELOG_ERROR("Can not parse the argument : '" << data << "'");
		}
	}
	if (userSpecifyLogFile == false) {
		#ifdef DEBUG
			#if defined(__TARGET_OS__Windows)
				elog::setLogInFile("log.txt");
			#endif
		#else
			#if defined(__TARGET_OS__Linux)
				//elog::setLogInFile("/var/log/elog_" +applName + ".log");
				elog::setLogInFile("/tmp/elog_" +applName + ".log");
			#elif defined(__TARGET_OS__MacOs)
				elog::setLogInFile(applName + ".log");
			#elif defined(__TARGET_OS__Windows)
				elog::setLogInFile(applName + ".log");
			#endif
		#endif
	}
	
	ELOG_INFO("E-LOG system init (END)");
}
示例#28
0
  int ExternalOutput::writeVideoData(char* buf, int len){
    if (in!=NULL){
      rtpheader *head = (rtpheader*) buf;
      if (head->payloadtype == RED_90000_PT) {
        int totalLength = 12;

        if (head->extension) {
          totalLength += ntohs(head->extensionlength)*4 + 4; // RTP Extension header
        }
        int rtpHeaderLength = totalLength;
        redheader *redhead = (redheader*) (buf + totalLength);

        //redhead->payloadtype = remoteSdp_.inOutPTMap[redhead->payloadtype];
        if (redhead->payloadtype == VP8_90000_PT) {
          while (redhead->follow) {
            totalLength += redhead->getLength() + 4; // RED header
            redhead = (redheader*) (buf + totalLength);
          }
          // Parse RED packet to VP8 packet.
          // Copy RTP header
          memcpy(deliverMediaBuffer_, buf, rtpHeaderLength);
          // Copy payload data
          memcpy(deliverMediaBuffer_ + totalLength, buf + totalLength + 1, len - totalLength - 1);
          // Copy payload type
          rtpheader *mediahead = (rtpheader*) deliverMediaBuffer_;
          mediahead->payloadtype = redhead->payloadtype;
          buf = reinterpret_cast<char*>(deliverMediaBuffer_);
          len = len - 1 - totalLength + rtpHeaderLength;
        }
      }
      int estimatedFps=0;
      int ret = in->unpackageVideo(reinterpret_cast<unsigned char*>(buf), len,
          unpackagedBufferpart_, &gotUnpackagedFrame_, &estimatedFps);

      if (ret < 0)
        return 0;
      
      if (videoCodec_ == NULL) {
        if ((estimatedFps!=0)&&((estimatedFps < prevEstimatedFps_*(1-0.2))||(estimatedFps > prevEstimatedFps_*(1+0.2)))){
          prevEstimatedFps_ = estimatedFps;
        }
        if (warmupfpsCount_++ == 20){
          if (prevEstimatedFps_==0){
            warmupfpsCount_ = 0;
            return 0;
          }
          if (!this->initContext()){
            ELOG_ERROR("Context cannot be initialized properly, closing...");
            return -1;
          }
        }
        return 0;
      }

      unpackagedSize_ += ret;
      unpackagedBufferpart_ += ret;
      if (unpackagedSize_ > UNPACKAGE_BUFFER_SIZE){
        ELOG_ERROR("Unpackaged size bigget than buffer %d", unpackagedSize_);
      }
      if (gotUnpackagedFrame_ && videoCodec_!=NULL) {
        timeval time;
        gettimeofday(&time, NULL);
        unsigned long long millis = (time.tv_sec * 1000) + (time.tv_usec / 1000);
        if (initTime_ == 0) {
          initTime_ = millis;
        }
        if (millis < initTime_)
        {
          ELOG_WARN("initTime is smaller than currentTime, possible problems when recording ");
        }
        unpackagedBufferpart_ -= unpackagedSize_;

        AVPacket avpkt;
        av_init_packet(&avpkt);
        avpkt.data = unpackagedBufferpart_;
        avpkt.size = unpackagedSize_;
        avpkt.pts = millis - initTime_;
        avpkt.stream_index = 0;
        av_write_frame(context_, &avpkt);
        av_free_packet(&avpkt);
        gotUnpackagedFrame_ = 0;
        unpackagedSize_ = 0;
        unpackagedBufferpart_ = unpackagedBuffer_;

      }
    }
    return 0;
  }
示例#29
0
 void WebRtcConnection::onTransportData(char* buf, int len, Transport *transport) {
   if (audioSink_ == NULL && videoSink_ == NULL && fbSink_==NULL){
     return;
   }
   
   // PROCESS STATS
   if (this->statsListener_){ // if there is no listener we dont process stats
     RtpHeader *head = reinterpret_cast<RtpHeader*> (buf);
     if (head->payloadtype != RED_90000_PT && head->payloadtype != PCMU_8000_PT)     
       thisStats_.processRtcpPacket(buf, len);
   }
   RtcpHeader* chead = reinterpret_cast<RtcpHeader*>(buf);
   // DELIVER FEEDBACK (RR, FEEDBACK PACKETS)
   if (chead->isFeedback()){
     if (fbSink_ != NULL) {
       fbSink_->deliverFeedback(buf,len);
     }
   } else {
     // RTP or RTCP Sender Report
     if (bundle_) {
       // Check incoming SSRC
       RtpHeader *head = reinterpret_cast<RtpHeader*> (buf);
       RtcpHeader *chead = reinterpret_cast<RtcpHeader*> (buf);
       unsigned int recvSSRC;
       if (chead->packettype == RTCP_Sender_PT) { //Sender Report
         recvSSRC = chead->getSSRC();
       }else{
         recvSSRC = head->getSSRC();
       }
       // Deliver data
       if (recvSSRC==this->getVideoSourceSSRC() || recvSSRC==this->getVideoSinkSSRC()) {
         videoSink_->deliverVideoData(buf, len);
       } else if (recvSSRC==this->getAudioSourceSSRC() || recvSSRC==this->getAudioSinkSSRC()) {
         audioSink_->deliverAudioData(buf, len);
       } else {
         ELOG_ERROR("Unknown SSRC %u, localVideo %u, remoteVideo %u, ignoring", recvSSRC, this->getVideoSourceSSRC(), this->getVideoSinkSSRC());
       }
     } else if (transport->mediaType == AUDIO_TYPE) {
       if (audioSink_ != NULL) {
         RtpHeader *head = reinterpret_cast<RtpHeader*> (buf);
         // Firefox does not send SSRC in SDP
         if (this->getAudioSourceSSRC() == 0) {
           ELOG_DEBUG("Audio Source SSRC is %u", head->getSSRC());
           this->setAudioSourceSSRC(head->getSSRC());
           //this->updateState(TRANSPORT_READY, transport);
         }
         head->setSSRC(this->getAudioSinkSSRC());
         audioSink_->deliverAudioData(buf, len);
       }
     } else if (transport->mediaType == VIDEO_TYPE) {
       if (videoSink_ != NULL) {
         RtpHeader *head = reinterpret_cast<RtpHeader*> (buf);
         RtcpHeader *chead = reinterpret_cast<RtcpHeader*> (buf);
          // Firefox does not send SSRC in SDP
         if (this->getVideoSourceSSRC() == 0) {
           unsigned int recvSSRC;
           if (chead->packettype == RTCP_Sender_PT) { //Sender Report
             recvSSRC = chead->getSSRC();
           } else {
             recvSSRC = head->getSSRC();
           }
           ELOG_DEBUG("Video Source SSRC is %u", recvSSRC);
           this->setVideoSourceSSRC(recvSSRC);
           //this->updateState(TRANSPORT_READY, transport);
         }
         // change ssrc for RTP packets, don't touch here if RTCP
         if (chead->packettype != RTCP_Sender_PT) {
           head->setSSRC(this->getVideoSinkSSRC());
         }
         videoSink_->deliverVideoData(buf, len);
       }
     }
   }
 }