int AudioDecoder::encode_audio_frame(AVFrame *frame, AVPacket& output_packet)
    {
        /** Packet used for temporary storage. */
        init_packet(&output_packet);

        ELOG_DEBUG("encode frame nb_samples=%d", frame->nb_samples);


        /**
         * Encode the audio frame and store it in the temporary packet.
         * The output audio stream encoder is used to do this.
         */
        int error;
        int data_present = 0;
        if ((error = avcodec_encode_audio2(output_codec_context, &output_packet,
                        frame, &data_present)) < 0) {
            ELOG_WARN("Could not encode frame,%s", get_error_text(error));
            av_free_packet(&output_packet);
            return error;
        }

        if (0 == data_present)
        {
            ELOG_WARN("encode failed!! data not present");
            return 0;
        }


        return output_packet.size;
    }
Example #2
0
bool NicerConnection::setRemoteCandidates(const std::vector<CandidateInfo> &candidates, bool is_bundle) {
  std::vector<CandidateInfo> cands(candidates);
  auto remote_candidates_promise = std::make_shared<std::promise<void>>();
  nr_ice_peer_ctx *peer = peer_;
  nr_ice_media_stream *stream = stream_;
  std::shared_ptr<NicerInterface> nicer = nicer_;
  async([cands, is_bundle, nicer, peer, stream, this, remote_candidates_promise] {
    ELOG_DEBUG("%s message: adding remote candidates (%ld)", toLog(), cands.size());
    for (const CandidateInfo &cand : cands) {
      std::string sdp = cand.sdp;
      std::size_t pos = sdp.find(",");
      std::string candidate = sdp.substr(0, pos);
      ELOG_DEBUG("%s message: New remote ICE candidate (%s)", toLog(), candidate.c_str());
      UINT4 r = nicer->IcePeerContextParseTrickleCandidate(peer, stream, const_cast<char *>(candidate.c_str()));
      if (r && r != R_ALREADY) {
        ELOG_WARN("%s message: Couldn't add remote ICE candidate (%s) (%d)", toLog(), candidate.c_str(), r);
      }
    }
    remote_candidates_promise->set_value();
  });
  std::future<void> remote_candidates_future = remote_candidates_promise->get_future();
  std::future_status status = remote_candidates_future.wait_for(std::chrono::seconds(1));
  if (status == std::future_status::timeout) {
    ELOG_WARN("%s message: Could not set remote candidates", toLog());
    return false;
  }
  return true;
}
    /**
     * Initialize a temporary storage for the specified number of audio samples.
     * The conversion requires temporary storage due to the different format.
     * The number of audio samples to be allocated is specified in frame_size.
     */
    int AudioDecoder::init_converted_samples(uint8_t ***converted_input_samples,
            AVCodecContext *output_codec_context,
            int frame_size)
    {
        int error;

        /**
         * Allocate as many pointers as there are audio channels.
         * Each pointer will later point to the audio samples of the corresponding
         * channels (although it may be NULL for interleaved formats).
         */
        if (!(*converted_input_samples = (uint8_t**)calloc(output_codec_context->channels,
                        sizeof(**converted_input_samples)))) {
            ELOG_WARN("Could not allocate converted input sample pointers");
            return AVERROR(ENOMEM);
        }

        /**
         * Allocate memory for the samples of all channels in one consecutive
         * block for convenience.
         */
        if ((error = av_samples_alloc(*converted_input_samples, NULL,
                        output_codec_context->channels,
                        frame_size,
                        output_codec_context->sample_fmt, 0)) < 0) {
            ELOG_WARN("Could not allocate converted input samples %s", get_error_text(error));
            av_freep(&(*converted_input_samples)[0]);
            free(*converted_input_samples);
            return error;
        }
        return 0;
    }
Example #4
0
  RTPPayloadVP8* RtpVP8Parser::parseVP8(unsigned char* data,
      int dataLength) {
    //ELOG_DEBUG("Parsing VP8 %d bytes", dataLength);
    RTPPayloadVP8* vp8 = new RTPPayloadVP8; // = &parsedPacket.info.VP8;
    const unsigned char* dataPtr = data;

    // Parse mandatory first byte of payload descriptor
    bool extension = (*dataPtr & 0x80) ? true : false; // X bit
    vp8->nonReferenceFrame = (*dataPtr & 0x20) ? true : false; // N bit
    vp8->beginningOfPartition = (*dataPtr & 0x10) ? true : false; // S bit
    vp8->partitionID = (*dataPtr & 0x0F); // PartID field

    //ELOG_DEBUG("X: %d N %d S %d PartID %d", extension, vp8->nonReferenceFrame, vp8->beginningOfPartition, vp8->partitionID);

    if (vp8->partitionID > 8) {
      // Weak check for corrupt data: PartID MUST NOT be larger than 8.
      return vp8;
    }

    // Advance dataPtr and decrease remaining payload size
    dataPtr++;
    dataLength--;

    if (extension) {
      const int parsedBytes = ParseVP8Extension(vp8, dataPtr, dataLength);
      if (parsedBytes < 0)
        return vp8;
      dataPtr += parsedBytes;
      dataLength -= parsedBytes;
      //ELOG_DEBUG("Parsed bytes in extension %d", parsedBytes);
    }

    if (dataLength <= 0) {
      ELOG_WARN("Error parsing VP8 payload descriptor; payload too short");
      return vp8;
    }

    // Read P bit from payload header (only at beginning of first partition)
    if (dataLength > 0 && vp8->beginningOfPartition && vp8->partitionID == 0) {
      //parsedPacket.frameType = (*dataPtr & 0x01) ? kPFrame : kIFrame;
      vp8->frameType = (*dataPtr & 0x01) ? kPFrame : kIFrame;
    } else {

      vp8->frameType = kPFrame;
    }
    if (0 == ParseVP8FrameSize(vp8, dataPtr, dataLength)) {
      if (vp8->frameWidth != 640){
        ELOG_WARN("VP8 Frame width changed! = %d need postprocessing", vp8->frameWidth);
      }
    }
    vp8->data = dataPtr;
    vp8->dataLength = (unsigned int) dataLength;

    return vp8;
  }
    /**
     * Initialize the audio resampler based on the input and output codec settings.
     * If the input and output sample formats differ, a conversion is required
     * libswresample takes care of this, but requires initialization.
     */
    int AudioDecoder::init_resampler(AVCodecContext *input_codec_context,
            AVCodecContext *output_codec_context)
    {
        int error;

        /**
         * Create a resampler context for the conversion.
         * Set the conversion parameters.
         * Default channel layouts based on the number of channels
         * are assumed for simplicity (they are sometimes not detected
         * properly by the demuxer and/or decoder).
         */
        resample_context = swr_alloc_set_opts(NULL,
                av_get_default_channel_layout(output_codec_context->channels),
                output_codec_context->sample_fmt,
                output_codec_context->sample_rate,
                av_get_default_channel_layout(input_codec_context->channels),
                input_codec_context->sample_fmt,
                input_codec_context->sample_rate,
                0, NULL);

        if (!resample_context) {
            ELOG_WARN( "Could not allocate resample context\n");
            return AVERROR(ENOMEM);
        }


        /**
         * Perform a sanity check so that the number of converted samples is
         * not greater than the number of samples to be converted.
         * If the sample rates differ, this case has to be handled differently
         */

        ELOG_DEBUG( "audio input sample_rate = %d, out %d", input_codec_context->sample_rate, output_codec_context->sample_rate);

        /** Open the resampler with the specified parameters. */
        if ((error = swr_init(resample_context)) < 0) {
            ELOG_WARN( "Could not open resample context");
            swr_free(&resample_context);
            return error;
        }


        /** Open the resampler with the specified parameters. */
        if ((error = swr_init(resample_context)) < 0) {
            ELOG_DEBUG( "Could not open resample context");
            swr_free(&resample_context);
            return error;
        }

        ELOG_DEBUG( "swr_init done");

        return 0;
    }
    int AudioDecoder::decodeAudio(AVPacket& input_packet, AVPacket& outPacket)    {
        ELOG_DEBUG("decoding input packet, size %d", input_packet.size);
        
        AVFrame* input_frame;
        init_frame(&input_frame);

        int data_present;
        int error = avcodec_decode_audio4(input_codec_context, input_frame, &data_present,&input_packet);

        if (error < 0)
        {
            ELOG_DEBUG("decoding error %s", get_error_text(error));
            return error;
        }

        if (data_present <= 0)
        {
            ELOG_DEBUG("data not present");
            return 0;
        }

        // resample

        /** Initialize the temporary storage for the converted input samples. */
        uint8_t **converted_input_samples = NULL;
        if (init_converted_samples(&converted_input_samples, output_codec_context, input_frame->nb_samples))
        {
            ELOG_DEBUG("init_converted_samples fails");
            return 0;
        }

        /**
         * Convert the input samples to the desired output sample format.
         * This requires a temporary storage provided by converted_input_samples
         */
        if (convert_samples((const uint8_t**)input_frame->extended_data, converted_input_samples,input_frame->nb_samples, resample_context))
        {
            ELOG_WARN("convert_samples failed!!");
            return 0;
        }

        /** Add converted input samples to the FIFO buffer for later processing. */
        if (add_samples_to_fifo(fifo, converted_input_samples,
                    input_frame->nb_samples))
        {
            ELOG_WARN("add_samples to fifo failed !!");
        }

        outPacket.pts = input_packet.pts;

        // meanwhile, encode; package
        return load_encode(outPacket);
    }
Example #7
0
CandidatePair NicerConnection::getSelectedPair() {
  auto selected_pair_promise = std::make_shared<std::promise<CandidatePair>>();
  async([this, selected_pair_promise] {
    nr_ice_candidate *local;
    nr_ice_candidate *remote;
    nicer_->IceMediaStreamGetActive(peer_, stream_, 1, &local, &remote);
    CandidatePair pair;
    if (!local || !remote) {
      selected_pair_promise->set_value(CandidatePair{});
      return;
    }
    pair.clientCandidateIp = getStringFromAddress(remote->addr);
    pair.erizoCandidateIp = getStringFromAddress(local->addr);
    pair.clientCandidatePort = getPortFromAddress(remote->addr);
    pair.erizoCandidatePort = getPortFromAddress(local->addr);
    pair.clientHostType = getHostTypeFromNicerCandidate(remote);
    pair.erizoHostType = getHostTypeFromNicerCandidate(local);
    ELOG_DEBUG("%s message: Client Host Type %s", toLog(), pair.clientHostType.c_str());
    selected_pair_promise->set_value(pair);
  });
  std::future<CandidatePair> selected_pair_future = selected_pair_promise->get_future();
  std::future_status status = selected_pair_future.wait_for(std::chrono::seconds(1));
  CandidatePair pair = selected_pair_future.get();
  if (status == std::future_status::timeout) {
    ELOG_WARN("%s message: Could not get selected pair", toLog());
    return CandidatePair{};
  }
  return pair;
}
Example #8
0
  WebRtcConnection::WebRtcConnection(bool audioEnabled, bool videoEnabled, const std::string &stunServer, int stunPort, int minPort, int maxPort) {

    ELOG_WARN("WebRtcConnection constructor stunserver %s stunPort %d minPort %d maxPort %d\n", stunServer.c_str(), stunPort, minPort, maxPort);
    video_ = 0;
    audio_ = 0;
    sequenceNumberFIR_ = 0;
    bundle_ = false;
    this->setVideoSinkSSRC(55543);
    this->setAudioSinkSSRC(44444);
    videoSink_ = NULL;
    audioSink_ = NULL;
    fbSink_ = NULL;
    sourcefbSink_ = this;
    sinkfbSource_ = this;

    globalState_ = INITIAL;
    connStateListener_ = NULL;

    sending_ = true;
    send_Thread_ = boost::thread(&WebRtcConnection::sendLoop, this);

    videoTransport_ = NULL;
    audioTransport_ = NULL;

    audioEnabled_ = audioEnabled;
    videoEnabled_ = videoEnabled;

    deliverMediaBuffer_ = (char*)malloc(3000);

    stunServer_ = stunServer;
    stunPort_ = stunPort;
    minPort_ = minPort;
    maxPort_ = maxPort;
  }
Example #9
0
void NicerConnection::setupTurnServer() {
  if (ice_config_.turn_server.empty()) {
    return;
  }
  auto servers = std::unique_ptr<nr_ice_turn_server[]>(new nr_ice_turn_server[1]);
  nr_ice_turn_server *server = &servers[0];
  nr_ice_stun_server *stun_server = &server->turn_server;
  memset(server, 0, sizeof(nr_ice_turn_server));
  stun_server->transport = IPPROTO_UDP;
  stun_server->type = NR_ICE_STUN_SERVER_TYPE_ADDR;
  nr_transport_addr addr;
  nr_str_port_to_transport_addr(ice_config_.turn_server.c_str(), ice_config_.turn_port, IPPROTO_UDP, &addr);
  stun_server->u.addr = addr;

  server->username = r_strdup(const_cast<char*>(ice_config_.turn_username.c_str()));
  int r = r_data_create(&server->password,
                        reinterpret_cast<UCHAR*>(const_cast<char *>(&ice_config_.turn_pass[0])),
                        ice_config_.turn_pass.size());
  if (r) {
    RFREE(server->username);
    return;
  }

  r = nicer_->IceContextSetTurnServers(ctx_, servers.get(), 1);
  if (r) {
    ELOG_WARN("%s message: Could not setup Turn", toLog());
  }

  ELOG_DEBUG("%s message: TURN server configured", toLog());
}
Example #10
0
  WebRtcConnection::WebRtcConnection(bool audioEnabled, bool videoEnabled, const std::string &stunServer, int stunPort, int minPort, int maxPort, bool trickleEnabled, WebRtcConnectionEventListener* listener)
      : connEventListener_(listener), fec_receiver_(this){
    ELOG_WARN("WebRtcConnection constructor stunserver %s stunPort %d minPort %d maxPort %d\n", stunServer.c_str(), stunPort, minPort, maxPort);
    sequenceNumberFIR_ = 0;
    bundle_ = false;
    this->setVideoSinkSSRC(55543);
    this->setAudioSinkSSRC(44444);
    videoSink_ = NULL;
    audioSink_ = NULL;
    fbSink_ = NULL;
    sourcefbSink_ = this;
    sinkfbSource_ = this;
    globalState_ = CONN_INITIAL;
    videoTransport_ = NULL;
    audioTransport_ = NULL;

    shouldSendFeedback_ = true;

    audioEnabled_ = audioEnabled;
    videoEnabled_ = videoEnabled;
    trickleEnabled_ = trickleEnabled;

    stunServer_ = stunServer;
    stunPort_ = stunPort;
    minPort_ = minPort;
    maxPort_ = maxPort;

    gettimeofday(&mark_, NULL);

    rateControl_ = 0;
     
    sending_ = true;
    rtcpProcessor_ = boost::shared_ptr<RtcpProcessor> (new RtcpProcessor((MediaSink*)this, (MediaSource*) this));
    send_Thread_ = boost::thread(&WebRtcConnection::sendLoop, this);
  }
Example #11
0
void RtpPacketQueue::pushPacket(const char *data, int length)
{
    const RtpHeader *currentHeader = reinterpret_cast<const RtpHeader*>(data);
    uint16_t currentSequenceNumber = currentHeader->getSeqNumber();

    if(lastSequenceNumberGiven_ >= 0 && (rtpSequenceLessThan(currentSequenceNumber, (uint16_t)lastSequenceNumberGiven_) || currentSequenceNumber == lastSequenceNumberGiven_)) {
        // this sequence number is less than the stuff we've already handed out, which means it's too late to be of any value.
        ELOG_WARN("SSRC:%u, Payload: %u, discarding very late sample %d that is <= %d",currentHeader->getSSRC(),currentHeader->getPayloadType(), currentSequenceNumber, lastSequenceNumberGiven_);
        return;
    }

    // TODO this should be a secret of the dataPacket class.  It should maintain its own memory
    // and copy stuff as necessary.
    boost::shared_ptr<dataPacket> packet(new dataPacket());
    memcpy(packet->data, data, length);
    packet->length = length;

    // let's insert this packet where it belongs in the queue.
    boost::mutex::scoped_lock lock(queueMutex_);
    std::list<boost::shared_ptr<dataPacket> >::iterator it;
    for (it=queue_.begin(); it != queue_.end(); ++it) {
        const RtpHeader *header = reinterpret_cast<const RtpHeader*>((*it)->data);
        uint16_t sequenceNumber = header->getSeqNumber();

        if (sequenceNumber == currentSequenceNumber) {
            // We already have this sequence number in the queue.
            ELOG_INFO("discarding duplicate sample %d", currentSequenceNumber);
            break;
        }

        if (this->rtpSequenceLessThan(sequenceNumber, currentSequenceNumber)) {
            queue_.insert(it, packet);
            break;
        }
    }

    if (it == queue_.end()) {
        // something old, or queue is empty.
        queue_.push_back(packet);
    }

    // Enforce our max queue size.
    while(getDepthInSeconds() > maxDepthInSeconds_){
        ELOG_WARN("RtpPacketQueue - Discarding a sample due to excessive queue depth");
        queue_.pop_back();  // remove oldest samples.
    }
}
Example #12
0
void PliPacerHandler::sendFIR() {
  ELOG_WARN("%s message: Timed out waiting for a keyframe", connection_->toLog());
  getContext()->fireWrite(RtpUtils::createFIR(video_source_ssrc_, video_sink_ssrc_, fir_seq_number_++));
  getContext()->fireWrite(RtpUtils::createFIR(video_source_ssrc_, video_sink_ssrc_, fir_seq_number_++));
  getContext()->fireWrite(RtpUtils::createFIR(video_source_ssrc_, video_sink_ssrc_, fir_seq_number_++));
  waiting_for_keyframe_ = false;
  scheduled_pli_ = std::make_shared<ScheduledTaskReference>();
}
Example #13
0
void NicerConnection::startChecking() {
  UINT4 r = nicer_->IcePeerContextPairCandidates(peer_);
  if (r) {
    ELOG_WARN("%s message: Error pairing candidates (%d)", toLog(), r);
    return;
  }

  r = nicer_->IcePeerContextStartChecks2(peer_, 1);
  if (r) {
    if (r == R_NOT_FOUND) {
      ELOG_DEBUG("%s message: Could not start ICE checks, assuming trickle", toLog());
    } else {
      ELOG_WARN("%s message: Could not start peer checks", toLog());
    }
  }
  ELOG_DEBUG("Checks started");
}
Example #14
0
void NicerConnection::startGathering() {
  UINT4 r = nicer_->IceGather(ctx_, &NicerConnection::gather_callback, this);
  if (r && r != R_WOULDBLOCK) {
    ELOG_WARN("%s message: Couldn't start ICE gathering", toLog());
    assert(false);
  }
  ELOG_INFO("%s message: start gathering", toLog());
}
Example #15
0
RtpPacketQueue::RtpPacketQueue(unsigned int max, unsigned int depth) : lastSequenceNumberGiven_(-1), max_(max), depth_(depth)
{
    if(depth_ >= max_) {
        ELOG_WARN("invalid configuration, depth_: %d, max_: %d; reset to defaults", depth_, max_);
        depth_ = erizo::DEFAULT_DEPTH;
        max_ = erizo::DEFAULT_MAX;
    }
}
Example #16
0
void NicerConnection::start() {
  async([this] {
    startSync();
  });
  std::future_status status = start_promise_.get_future().wait_for(std::chrono::seconds(5));
  if (status == std::future_status::timeout) {
    ELOG_WARN("%s Start timed out", toLog());
  }
}
 void OneToManyTranscoder::closeAll() {
     ELOG_WARN ("OneToManyTranscoder closeAll");
     std::map<std::string, MediaSink*>::iterator it = subscribers.begin();
     while( it != subscribers.end()) {
       delete (*it).second;
       it = subscribers.erase(it);
     }
   delete this->publisher;
 }
Example #18
0
RtpPacketQueue::RtpPacketQueue(double depthInSeconds, double maxDepthInSeconds) :
    lastSequenceNumberGiven_(-1), timebase_(0), depthInSeconds_(depthInSeconds), maxDepthInSeconds_(maxDepthInSeconds)
{
    if(depthInSeconds_ >= maxDepthInSeconds_) {
        ELOG_WARN("invalid configuration, depth_: %f, max_: %f; reset to defaults", depthInSeconds_, maxDepthInSeconds_);
        depthInSeconds_ = erizo::DEFAULT_DEPTH;
        maxDepthInSeconds_ = erizo::DEFAULT_MAX;
    }
}
Example #19
0
void NicerConnection::setRemoteCredentials(const std::string& username, const std::string& password) {
  auto promise = std::make_shared<std::promise<void>>();
  async([username, password, promise, this] {
    setRemoteCredentialsSync(username, password);
    promise->set_value();
  });
  auto status = promise->get_future().wait_for(std::chrono::seconds(1));
  if (status == std::future_status::timeout) {
    ELOG_WARN("%s message: Could not set remote credentials", toLog());
  }
}
    /**
     * Load one audio frame from the FIFO buffer, encode and write it to the
     * output file.
     */
    int AudioDecoder::load_encode(AVPacket& output_packet)
    {
        /** Temporary storage of the output samples of the frame written to the file. */
        AVFrame *output_frame;
        /**
         * Use the maximum number of possible samples per frame.
         * If there is less than the maximum possible frame size in the FIFO
         * buffer use this number. Otherwise, use the maximum possible frame size
         */
        const int frame_size = FFMIN(av_audio_fifo_size(fifo),
                output_codec_context->frame_size);
        /** Initialize temporary storage for one output frame. */
        if (init_output_frame(&output_frame, output_codec_context, frame_size))
        {
            ELOG_WARN(" init_output_frame failed!! frame_size=%d", frame_size);
            return 0;
        }


        /**
         * Read as many samples from the FIFO buffer as required to fill the frame.
         * The samples are stored in the frame temporarily.
         */
        if (av_audio_fifo_read(fifo, (void **)output_frame->data, frame_size) < frame_size) {
            ELOG_WARN("Could not read data from FIFO\n");
            av_frame_free(&output_frame);
            return 0;
        }
        
        ELOG_DEBUG("fifo read %d, now left %d", frame_size, av_audio_fifo_size(fifo));

        /** Encode one frame worth of audio samples. */
        int pktlen = encode_audio_frame(output_frame, output_packet);
        if (pktlen <= 0)
        {
            ELOG_WARN("Failed to encode_audio_frame!!");
        }
        av_frame_free(&output_frame);

        return pktlen;
    }
Example #21
0
int SrtpChannel::unprotectRtcp(char* buffer, int *len) {

    if (!active_)
        return 0;
    int val = srtp_unprotect_rtcp(receive_session_, buffer, len);
    if (val == 0) {
        return 0;
    } else {
        ELOG_WARN("Error SrtpChannel::unprotectRtcp %u", val);
        return -1;
    }
}
Example #22
0
int SrtpChannel::protectRtcp(char* buffer, int *len) {

    if (!active_)
        return 0;
    int val = srtp_protect_rtcp(send_session_, (char*) buffer, len);
    if (val == 0) {
        return 0;
    } else {
        rtcpheader* head = reinterpret_cast<rtcpheader*>(buffer);
        ELOG_WARN("Error SrtpChannel::protectRtcp %upackettype %d ", val, head->packettype);
        return -1;
    }
}
Example #23
0
int SrtpChannel::unprotectRtp(char* buffer, int *len) {

    if (!active_)
        return 0;
    int val = srtp_unprotect(receive_session_, (char*) buffer, len);
    if (val == 0) {
        return 0;
    } else {
    rtcpheader* head = reinterpret_cast<rtcpheader*>(buffer);
    rtpheader* headrtp = reinterpret_cast<rtpheader*>(buffer);
        ELOG_WARN("Error SrtpChannel::unprotectRtp %u packettype %d pt %d", val,head->packettype, headrtp->payloadtype);
        return -1;
    }
}
Example #24
0
void NicerConnection::close() {
  boost::mutex::scoped_lock(close_mutex_);
  if (!closed_) {
    closed_ = true;
    async([this] {
      closeSync();
    });
    std::future_status status = close_promise_.get_future().wait_for(std::chrono::seconds(1));
    if (status == std::future_status::timeout) {
      ELOG_WARN("%s Stop timed out", toLog());
      closeSync();
    }
  }
}
Example #25
0
std::string NicerConnection::getNewPwd() {
  char* pwd;
  int r;

  if ((r=nicer_->IceGetNewIcePwd(&pwd))) {
    ELOG_WARN("%s message: Unable to get new ice pwd", toLog());
    return "";
  }

  std::string pwdStr = pwd;
  RFREE(pwd);

  return pwdStr;
}
Example #26
0
std::string NicerConnection::getNewUfrag() {
  char* ufrag;
  int r;

  if ((r=nicer_->IceGetNewIceUFrag(&ufrag))) {
    ELOG_WARN("%s message: Unable to get new ice ufrag", toLog());
    return "";
  }

  std::string ufragStr = ufrag;
  RFREE(ufrag);

  return ufragStr;
}
    int AudioDecoder::add_samples_to_fifo(AVAudioFifo *fifo,
            uint8_t **converted_input_samples,
            const int frame_size)
    {
        int error;
        /**
         * Make the FIFO as large as it needs to be to hold both,
         * the old and the new samples.
         */
        if ((error = av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + frame_size)) < 0) {
            ELOG_WARN("Could not reallocate FIFO");
            return error;
        }
        /** Store the new samples in the FIFO buffer. */
        if (av_audio_fifo_write(fifo, (void **)converted_input_samples,
                    frame_size) < frame_size) {
            ELOG_WARN("Could not write data to FIFO");
            return AVERROR_EXIT;
        }

        ELOG_DEBUG("added frame to fifo, now size %d", av_audio_fifo_size(fifo));

        return 0;
    }
Example #28
0
void Resender::resend(const boost::system::error_code& ec)
{
    if (ec == boost::asio::error::operation_aborted) {
        ELOG_DEBUG("%s - Cancelled", nice_->transportName->c_str());
        return;
    }

    if (nice_ != NULL) {
        ELOG_WARN("%s - Resending DTLS message to %d", nice_->transportName->c_str(), comp_);
        int val = nice_->sendData(comp_, data_, len_);
        if (val < 0) {
            sent_ = -1;
        } else {
            sent_ = 2;
        }
    }
}
Example #29
0
void NicerConnection::setRemoteCredentialsSync(const std::string& username, const std::string& password) {
  ELOG_DEBUG("%s message: Setting remote credentials", toLog());
  std::vector<char *> attributes;
  std::string ufrag = std::string("ice-ufrag: ") + username;
  std::string pwd = std::string("ice-pwd: ") + password;
  attributes.push_back(const_cast<char *>(ufrag.c_str()));
  attributes.push_back(const_cast<char *>(pwd.c_str()));
  UINT4 r = nicer_->IcePeerContextParseStreamAttributes(peer_,
                                                        stream_,
                                                        attributes.size() ? &attributes[0] : nullptr,
                                                        attributes.size());
  if (r) {
    ELOG_WARN("%s message: Error parsing stream attributes", toLog());
    return;
  }

  startChecking();
}
Example #30
0
  int ExternalOutput::writeAudioData(char* buf, int len){
    if (in!=NULL){
      if (videoCodec_ == NULL) {
        return 0;
      }
      rtpheader *head = (rtpheader*)buf;
      //We dont need any other payload at this time
      if(head->payloadtype != PCMU_8000_PT){
        return 0;
      }

      int ret = in->unpackageAudio(reinterpret_cast<unsigned char*>(buf), len,
          unpackagedAudioBuffer_);
      if (ret <= 0)
        return ret;
      timeval time;
      gettimeofday(&time, NULL);
      unsigned long long millis = (time.tv_sec * 1000) + (time.tv_usec / 1000);
      if (millis -lastTime_ >FIR_INTERVAL_MS){
        this->sendFirPacket();
        lastTime_ = millis;
      }
      if (initTime_ == 0) {
        initTime_ = millis;      
      }
      if (millis < initTime_){
        ELOG_WARN("initTime is smaller than currentTime, possible problems when recording ");
      }
      if (ret > UNPACKAGE_BUFFER_SIZE){
        ELOG_ERROR("Unpackaged Audio size too big %d", ret);
      }
      AVPacket avpkt;
      av_init_packet(&avpkt);
      avpkt.data = unpackagedAudioBuffer_;
      avpkt.size = ret;
      avpkt.pts = millis - initTime_;
      avpkt.stream_index = 1;
      av_write_frame(context_, &avpkt);
      av_free_packet(&avpkt);
      return ret;

    }
    return 0;
  }