コード例 #1
0
bool RTPBufferAlternative::copyNextPackageIntoPackage(RTPPackageHandler &package)
{
	// Find the current selected package from the buffer
	RTPBufferPackage *currentReadPackage = ringBuffer[currentReadPos];
	incrementReadPos();
	// Get the audioData from that selected package
	void *currentDataInBuffer = currentReadPackage->getPacketContent();
	// Get the header data from that selected package
	RTPHeader* currentHeaderData = currentReadPackage->getHeader();

	// Check if a underflow was detected (Package has already been read )
	bool underflow = currentReadPackage->underflow();
	// If underflow was detected -> abort
	if (underflow)
		return underflow;

	// Copy data from buffer into package
	void* packageBuffer = package.getWorkBuffer();
	int rtpHeaderSize = package.getRTPHeaderSize();
	int payloadSize = package.getMaximumPayloadSize();
	memcpy((char*)packageBuffer, currentHeaderData, rtpHeaderSize);
	memcpy((char*)packageBuffer + rtpHeaderSize, currentDataInBuffer, payloadSize);

	// set last read sequence number
	lastReadSeqNr = currentHeaderData->getSequenceNumber();
	return underflow;
}
コード例 #2
0
int OneToManyTranscoder::deliverVideoData_(char* buf, int len) {
	memcpy(sendVideoBuffer_, buf, len);

	RTPHeader* theHead = reinterpret_cast<RTPHeader*>(buf);
//	ELOG_DEBUG("extension %d pt %u", theHead->getExtension(),
//			theHead->getPayloadType());

	if (theHead->getPayloadType() == 100) {
        ip_->deliverVideoData(sendVideoBuffer_, len);
	} else {
		this->receiveRtpData((unsigned char*) buf, len);
	}

//	if (subscribers.empty() || len <= 0)
//		return 0;
//	if (sentPackets_ % 500 == 0) {
//		publisher->sendFirPacket();
//	}
//	std::map<int, WebRtcConnection*>::iterator it;
//	for (it = subscribers.begin(); it != subscribers.end(); it++) {
//		memset(sendVideoBuffer_, 0, len);
//		memcpy(sendVideoBuffer_, buf, len);
//		(*it).second->receiveVideoData(sendVideoBuffer_, len);
//	}
//	memset(sendVideoBuffer_, 0, len);
//	memcpy(sendVideoBuffer_, buf, len);
//	sink_->sendData((unsigned char*)sendVideoBuffer_,len);

	sentPackets_++;
	return 0;
}
コード例 #3
0
ファイル: ExternalOutput.cpp プロジェクト: hfeeki/licode
void ExternalOutput::writeAudioData(char* buf, int len) {
    RTPHeader* head = reinterpret_cast<RTPHeader*>(buf);

    if (initTimeAudio_ == -1) {
        initTimeAudio_ = head->getTimestamp();
    }

    timeval time;
    gettimeofday(&time, NULL);
    unsigned long long millis = (time.tv_sec * 1000) + (time.tv_usec / 1000);
    if (millis -lastFullIntraFrameRequest_ >FIR_INTERVAL_MS) {
        this->sendFirPacket();
        lastFullIntraFrameRequest_ = millis;
    }

    // Figure out our audio codec.
    if(context_->oformat->audio_codec == AV_CODEC_ID_NONE) {
        //We dont need any other payload at this time
        if(head->getPayloadType() == PCMU_8000_PT) {
            context_->oformat->audio_codec = AV_CODEC_ID_PCM_MULAW;
        } else if (head->getPayloadType() == OPUS_48000_PT) {
            context_->oformat->audio_codec = AV_CODEC_ID_OPUS;
        }
    }

    // check if we can initialize our context
    this->initContext();

    if (audio_stream_ == NULL) {
        // not yet.
        return;
    }

    int ret = inputProcessor_->unpackageAudio(reinterpret_cast<unsigned char*>(buf), len, unpackagedAudioBuffer_);
    if (ret <= 0)
        return;

//    ELOG_DEBUG("Writing audio frame %d with timestamp %u, input timebase: %d/%d, target timebase: %d/%d",head->getSeqNumber(), head->getTimestamp(),
//               audio_stream_->codec->time_base.num, audio_stream_->codec->time_base.den,    // timebase we requested
//               audio_stream_->time_base.num, audio_stream_->time_base.den);                 // actual timebase

    long long currentTimestamp = head->getTimestamp();
    if (currentTimestamp - initTimeAudio_ < 0) {
        // we wrapped.  add 2^32 to correct this.  We only handle a single wrap around since that's 13 hours of recording, minimum.
        currentTimestamp += 0xFFFFFFFF;
    }
    AVPacket avpkt;
    av_init_packet(&avpkt);
    avpkt.data = unpackagedAudioBuffer_;
    avpkt.size = ret;
    avpkt.pts = (currentTimestamp - initTimeAudio_) / (audio_stream_->codec->time_base.den / audio_stream_->time_base.den);
    avpkt.stream_index = 1;
    av_write_frame(context_, &avpkt);
    av_free_packet(&avpkt);
}
コード例 #4
0
void RTPBufferAlternative::copySilencePackageIntoPackage(RTPPackageHandler &package)
{
	char* packageBuffer = (char*)package.getWorkBuffer();
	RTPHeader *currentHeaderData = (RTPHeader*)package.getRTPPackageHeader();
	currentHeaderData->setSequenceNumber(lastReadSeqNr);
	unsigned int rtpHeaderSize = package.getRTPHeaderSize();
	unsigned int payloadSize = package.getMaximumPayloadSize();

	// Copy data from buffer into package
	memcpy((char*)packageBuffer, currentHeaderData, rtpHeaderSize);
	memset((char*)packageBuffer + rtpHeaderSize, 0, payloadSize);
}
コード例 #5
0
ファイル: MediaProcessor.cpp プロジェクト: GaijinKa/licode
  int OutputProcessor::packageAudio(unsigned char* inBuff, int inBuffLen,
      unsigned char* outBuff) {

    if (audioPackager == 0) {
      ELOG_DEBUG("No se ha inicializado el codec de output audio RTP");
      return -1;
    }


    timeval time;
    gettimeofday(&time, NULL);
    long millis = (time.tv_sec * 1000) + (time.tv_usec / 1000);

    RTPHeader head;
    head.setSeqNumber(seqnum_++);
    head.setTimestamp(millis*8);
    head.setSSRC(55543);
    head.setPayloadType(0);

    memcpy (rtpBuffer_, &head, head.getHeaderLength());
    memcpy(&rtpBuffer_[head.getHeaderLength()], inBuff, inBuffLen);
    //			sink_->sendData(rtpBuffer_, l);
    //	rtpReceiver_->receiveRtpData(rtpBuffer_, (inBuffLen + RTP_HEADER_LEN));
    return (inBuffLen+head.getHeaderLength());
  }
コード例 #6
0
ファイル: TestRTP.cpp プロジェクト: Kaayy-J/OHMComm-Light
void TestRTP::testRTPPackage()
{
    std::string payload("This is a dummy payload");
    RTPPackageHandler pack(100, PayloadType::GSM);

    pack.createNewRTPPackage((char *)payload.c_str(), payload.size());
    const void* headerBuffer = pack.getRTPPackageHeader();
    RTPHeader* header = (RTPHeader *)headerBuffer;
    TEST_ASSERT_EQUALS_MSG(header->getPayloadType(), PayloadType::GSM, "Payload types don't match! 01");

    const void* contentBuffer = pack.getRTPPackageData();
    TEST_ASSERT_EQUALS_MSG(memcmp(payload.c_str(), contentBuffer, payload.size()), 0, "Payloads don't match! 02");
    
    pack.setActualPayloadSize(payload.length());
    TEST_ASSERT(pack.getMaximumPackageSize() >= pack.getRTPHeaderSize() + pack.getMaximumPayloadSize());
    TEST_ASSERT(pack.getActualPayloadSize() <= pack.getMaximumPayloadSize());
    TEST_ASSERT(RTPPackageHandler::isRTPPackage(pack.getWorkBuffer(), pack.getActualPayloadSize()));
}
コード例 #7
0
ファイル: MediaProcessor.cpp プロジェクト: GaijinKa/licode
  int InputProcessor::unpackageAudio(unsigned char* inBuff, int inBuffLen,
      unsigned char* outBuff) {

    RTPHeader* head = reinterpret_cast<RTPHeader*>(inBuff);
    if (head->getPayloadType()!=0){
      ELOG_DEBUG("PT AUDIO %d", head->getPayloadType());
      //      return -1;
    }

    //    ELOG_DEBUG("Audio Timestamp %u", head->getTimestamp());
    int l = inBuffLen - RTPHeader::MIN_SIZE;
    if (l<0){
      ELOG_ERROR ("Error unpackaging audio");
      return 0;
    }
    memcpy(outBuff, &inBuff[RTPHeader::MIN_SIZE], l);

    return l;
  }
コード例 #8
0
ファイル: MediaProcessor.cpp プロジェクト: GaijinKa/licode
  int InputProcessor::unpackageVideo(unsigned char* inBuff, int inBuffLen,
      unsigned char* outBuff, int* gotFrame, int* estimatedFps, double* videoTs, bool* KFrame) {

    if (videoUnpackager == 0) {
      ELOG_DEBUG("Unpackager not correctly initialized");
      return -1;
    }
    bool tempKeyFrame2 = false;

    int inBuffOffset = 0;
    *gotFrame = 0;
    RTPHeader* head = reinterpret_cast<RTPHeader*>(inBuff);

    //head->getMarker());
    //    if ( head->getSSRC() != 55543 /*&& head->payloadtype!=101*/) {
    //      return -1;
    //    }
    if (head->getPayloadType() != 100) {
      return -1;
    }

    int l = inBuffLen - head->getHeaderLength();
    inBuffOffset+=head->getHeaderLength();

    erizo::RTPPayloadVP8* parsed = pars.parseVP8(
        (unsigned char*) &inBuff[inBuffOffset], l, &tempKeyFrame2);
    memcpy(outBuff, parsed->data, parsed->dataLength);

    if (tempKeyFrame2)
    	tempKeyFrame = true;

//    if (tempKeyFrame) {
//    	ELOG_WARN("GOT KEYFRAME");
//    }

    *KFrame = tempKeyFrame;

    if (head->getMarker()) {
      *estimatedFps = 0;
      if (lastVideoTs_){
        *estimatedFps = 90000/(head->getTimestamp() - lastVideoTs_);
      }
      lastVideoTs_ = head->getTimestamp();
      *videoTs = lastVideoTs_;
      *gotFrame = 1;
      tempKeyFrame = false;
    }

    int ret = parsed->dataLength;
    delete parsed;
    return ret;
  }
コード例 #9
0
ファイル: MediaProcessor.cpp プロジェクト: GaijinKa/licode
  int OutputProcessor::packageVideo(unsigned char* inBuff, int buffSize, unsigned char* outBuff) {
    if (videoPackager == 0) {
      ELOG_DEBUG("No se ha inicailizado el codec de output vídeo RTP");
      return -1;
    }

    //    ELOG_DEBUG("To packetize %u", buffSize);
    if (buffSize <= 0)
      return -1;
    RtpVP8Fragmenter frag(inBuff, buffSize, 1100);
    bool lastFrame = false;
    unsigned int outlen = 0;
    timeval time;
    gettimeofday(&time, NULL);
    long millis = (time.tv_sec * 1000) + (time.tv_usec / 1000);
    //		timestamp_ += 90000 / mediaInfo.videoCodec.frameRate;

    do {
      outlen = 0;
      frag.getPacket(outBuff, &outlen, &lastFrame);
      RTPHeader rtpHeader;
      rtpHeader.setMarker(lastFrame?1:0);
      rtpHeader.setSeqNumber(seqnum_++);
      rtpHeader.setTimestamp(millis*90);
      rtpHeader.setSSRC(55543);
      rtpHeader.setPayloadType(100);
      memcpy(rtpBuffer_, &rtpHeader, rtpHeader.getHeaderLength());
      memcpy(&rtpBuffer_[rtpHeader.getHeaderLength()],outBuff, outlen);

      int l = outlen + rtpHeader.getHeaderLength();
      //			sink_->sendData(rtpBuffer_, l);
      rtpReceiver_->receiveRtpData(rtpBuffer_, l);
    } while (!lastFrame);

    return 0;
  }
コード例 #10
0
ファイル: ExternalOutput.cpp プロジェクト: hfeeki/licode
void ExternalOutput::writeVideoData(char* buf, int len) {
    RTPHeader* head = reinterpret_cast<RTPHeader*>(buf);
    if (head->getPayloadType() == RED_90000_PT) {
        int totalLength = head->getHeaderLength();
        int rtpHeaderLength = totalLength;
        redheader *redhead = (redheader*) (buf + totalLength);
        if (redhead->payloadtype == VP8_90000_PT) {
            while (redhead->follow) {
                totalLength += redhead->getLength() + 4; // RED header
                redhead = (redheader*) (buf + totalLength);
            }
            // Parse RED packet to VP8 packet.
            // Copy RTP header
            memcpy(deliverMediaBuffer_, buf, rtpHeaderLength);
            // Copy payload data
            memcpy(deliverMediaBuffer_ + totalLength, buf + totalLength + 1, len - totalLength - 1);
            // Copy payload type
            RTPHeader *mediahead = reinterpret_cast<RTPHeader*>(deliverMediaBuffer_);
            mediahead->setPayloadType(redhead->payloadtype);
            buf = reinterpret_cast<char*>(deliverMediaBuffer_);
            len = len - 1 - totalLength + rtpHeaderLength;
        }
    }

    if (initTimeVideo_ == -1) {
        initTimeVideo_ = head->getTimestamp();
    }

    int gotUnpackagedFrame = false;
    int ret = inputProcessor_->unpackageVideo(reinterpret_cast<unsigned char*>(buf), len, unpackagedBufferpart_, &gotUnpackagedFrame);
    if (ret < 0)
        return;

    this->initContext();

    if (video_stream_ == NULL) {
        // could not init our context yet.
        return;
    }

    unpackagedSize_ += ret;
    unpackagedBufferpart_ += ret;

    if (gotUnpackagedFrame) {
        unpackagedBufferpart_ -= unpackagedSize_;

//        ELOG_DEBUG("Writing video frame %d with timestamp %u, length %d, input timebase: %d/%d, target timebase: %d/%d", head->getSeqNumber(),
//                   head->getTimestamp(), unpackagedSize_,
//                   video_stream_->codec->time_base.num, video_stream_->codec->time_base.den,    // timebase we requested
//                   video_stream_->time_base.num, video_stream_->time_base.den);                 // actual timebase

        long long currentTimestamp = head->getTimestamp();
        if (currentTimestamp - initTimeVideo_ < 0) {
            // we wrapped.  add 2^32 to correct this.  We only handle a single wrap around since that's ~13 hours of recording, minimum.
            currentTimestamp += 0xFFFFFFFF;
        }

        long long timestampToWrite = (currentTimestamp - initTimeVideo_) / (90000 / video_stream_->time_base.den);  // All of our video offerings are using a 90khz clock.

        AVPacket avpkt;
        av_init_packet(&avpkt);
        avpkt.data = unpackagedBufferpart_;
        avpkt.size = unpackagedSize_;
        avpkt.pts = timestampToWrite;
        avpkt.stream_index = 0;
        av_write_frame(context_, &avpkt);
        av_free_packet(&avpkt);
        unpackagedSize_ = 0;
        unpackagedBufferpart_ = unpackagedBuffer_;
    }
}