void RTPEndpoint::onRTPPacket(RTPPacket &packet)
{
	//Check
	if (!sending)
		//Exit
		return;
	
        //Get type
        MediaFrame::Type packetType = packet.GetMedia();
        //Check types
        if (type!=packetType)
                //Exit
                return;
        //Check type
        if (packet.GetCodec()!=codec)
        {
                //Store it
                codec = packet.GetCodec();
                //Depending on the type
                switch(packetType)
                {
                        case MediaFrame::Audio:
                                //Set it
                                RTPSession::SetSendingAudioCodec((AudioCodec::Type)codec);
                                break;
                        case MediaFrame::Video:
                                //Set it
                                RTPSession::SetSendingVideoCodec((VideoCodec::Type)codec);
                                break;
                        case MediaFrame::Text:
                                //Set it
                                RTPSession::SetSendingTextCodec((TextCodec::Type)codec);
                                break;
                }
	}

	//Get diference from latest frame
	QWORD dif = getUpdDifTime(&prev);

	//If was reseted
	if (reseted)
	{
		//Get new time
		timestamp += dif*freq/1000;
		//Not reseted
		reseted = false;
		
	} else {
		//Get dif from packet timestamp
		timestamp += packet.GetTimestamp()-prevts;
	}

	//Update prev rtp ts
	prevts = packet.GetTimestamp();

        //Send it
        RTPSession::SendPacket(packet,timestamp);
}
bool whu_RtpRPicSAg::RecvPic(IplImage* RecvImg)
{
	bool done = false;
	int status;
	char RecvBuf[1204];
	RTPTime delay(0.020);
	RTPTime starttime = RTPTime::CurrentTime();
	int lengh ,i;
	uchar* ptr;
	session.BeginDataAccess();
		if (session.GotoFirstSource())
		{
			int line=0;
			do
			{
				RTPPacket *packet;
				
				while ((packet = session.GetNextPacket()) != 0)
				{
					timestamp1 = packet->GetTimestamp();
					lengh=packet->GetPayloadLength();
					RawData = packet->GetPayloadData();
					memcpy(RecvBuf,RawData,1204);
					memcpy(&line,RecvBuf,4);
					if (line>=0&&line<RecvImg->height)
					{
						
						ptr=(uchar*)(RecvImg->imageData+line*RecvImg->widthStep);
						memcpy(ptr,RecvBuf+4,1200);
					}
					else{
						printf("loss packet\n");
					}
					
					session.DeletePacket(packet);
				}
			} while (session.GotoNextSource());
		}
		session.EndDataAccess();

		RTPTime::Wait(delay);

		RTPTime t = RTPTime::CurrentTime();
		t -= starttime;
		if (t > RTPTime(60.0))
			done = true;
		return true;
}
Beispiel #3
0
// Thread function for RTP session.
static void* rtpSessionThread(void *arg)
{
    VideoStream* video = reinterpret_cast<VideoStream*>(arg);

    u_int8_t bigBuffer[MAX_FRAME_SIZE];

    unsigned int lastPacketTimestamp = 0;
    unsigned int currentIndex = 0;
    double last_time = 0;

    RTPSession session = createRtpSession(video->getMulticastIP(), 
                                          video->getPort());

    while (1) {
        session.Poll();

        // Distribute data from the session to connected clients if we've
        // got anything
        session.BeginDataAccess();

        if (session.GotoFirstSourceWithData()) {
            do {
                RTPPacket *packet = NULL;

                while ((packet = session.GetNextPacket()) != NULL) {
                    if ((packet->GetPayloadLength() > sizeof(bigBuffer)) || 
                        (packet->GetPayloadLength() == 0)) {
                        // Free the packet, we're not going to use it.
                        session.DeletePacket(packet);
                        continue; // Exit this level of the loop and drop it
                    }

                    // Check timestamps for new data.  A new timestamp means
                    // this is from a different time.
                    if (packet->GetTimestamp() != lastPacketTimestamp) {
                        video->decode((uint8_t*)&bigBuffer[0], currentIndex);

                        currentIndex = 0;
                        memset(&bigBuffer[0], 0, sizeof(bigBuffer));
                    } // End new timestamp optimization.


                    // Copy data into buffer
                    if (currentIndex + packet->GetPayloadLength() > sizeof(bigBuffer)) {
                        throw std::runtime_error("Frame buffer overflow");
                    }

                    memcpy(&bigBuffer[currentIndex], packet->GetPayloadData(),
                           packet->GetPayloadLength());
                    currentIndex += packet->GetPayloadLength();

                    // Mark our last timestamp
                    lastPacketTimestamp = packet->GetTimestamp();

                    // Free the packet.
                    session.DeletePacket(packet);
                } 
            } while (session.GotoNextSourceWithData()); 
        }

        session.EndDataAccess();
        RTPTime delay(0, 100); // 100usec

        // Update More Data
        bool moreData;
        session.WaitForIncomingData(delay, &moreData);
    }

    // Leave the session while sending BYE.
    RTPTime timeout(0.75f); //  Wait briefly.
    const char* reason = "Session Destroyed.";
    unsigned int reasonlen = strlen(reason);

    if (session.IsActive())
        session.BYEDestroy(timeout, reason, reasonlen);
}  
Beispiel #4
0
RTPPacket* FECDecoder::Recover()
{
	BYTE aux[8];
	QWORD lostMask = 0;

	//Check we have media pacekts
	if (!medias.size())
		//Exit
		return NULL;
	//Get First packet
	RTPPacket* first = medias.begin()->second;
	//Get the SSRC
	DWORD ssrc = first->GetSSRC();
	//Get first media packet
	DWORD minSeq = first->GetExtSeqNum();
	//Iterator on seq
	DWORD lastSeq = minSeq;

	//Set to 0
	memset(aux,0,8);
	//Create writter
	BitWritter w(aux,8);
	//vector of lost pacekt seq
	std::vector<DWORD> losts;

	//For each media packet
	for (RTPOrderedPackets::iterator it=medias.begin();it!=medias.end();++it)
	{
		//Get seq
		DWORD cur = it->first;
		//Insert lost
		for (DWORD i=lastSeq+1;i<cur;++i)
		{
			//set mask bit to not present
			w.Put(1,0);
			//Add to the vecotr
			losts.push_back(i);
		}
		//Set last seq
		lastSeq = cur;
		//set mask bit to present
		w.Put(1,1);
	}

	//End it
	w.Flush();

	//Get mask
	lostMask = get8(aux,0);

	//Check we have lost pacekts
	if (!losts.size())
		//Exit
		return NULL;

	//For each lost packet
	for(std::vector<DWORD>::iterator it=losts.begin();it!=losts.end();++it)
	{
		//Get lost packet sequence
		DWORD seq = *it;

		//Search FEC packets associated this media packet
		for (FECOrderedData::iterator it2 = codes.begin();it2!=codes.end();++it2)
		{
			//Get FEC packet
			FECData *fec = it2->second;

			//Check if it is associated with this media pacekt in level 0
			if (!fec->IsProtectedAtLevel0(seq))
				//Next
				continue;

			//Get the seq difference between fec data and the media
			// fec seq has to be <= media seq it fec data protect media data)
			DWORD diff = seq-fec->GetBaseExtSeq();
			//Shit mask of the lost packets to check the present ones from the base seq
			QWORD mediaMask = lostMask << (fec->GetBaseExtSeq()-minSeq);
			//Remove lost packet bit from the fec mask
			QWORD fecMask = fec->GetLevel0Mask() & ~(((QWORD)1)<<(64-diff-1));

			//Compare needed pacekts with actual pacekts, to check if we have all of them except the missing
			if ((fecMask & mediaMask) == fecMask)
			{
				//Rocovered media data
				BYTE	recovered[MTU+SRTP_MAX_TRAILER_LEN] ZEROALIGNEDTO32;
				//Get attributes
				bool  p  = fec->GetRecoveryP();
				bool  x  = fec->GetRecoveryX();
				BYTE  cc = fec->GetRecoveryCC();
				bool  m  = fec->GetRecoveryM();
				BYTE  pt = fec->GetRecoveryType();
				DWORD ts = fec->GetRecoveryTimestamp();
				WORD  l  = fec->GetRecoveryLength();
				//Get protection length
				DWORD level0Size = fec->GetLevel0Size();
				//Ensure there is enought size
				if (level0Size>MTU)
				{
					//Error
					Error("-FEC level 0 data size too big [%d]\n",level0Size);
					//Skip this one
					continue;
				}
				//Copy data
				memcpy(recovered,fec->GetLevel0Data(),level0Size);
				//Set value in temp buffer
				set8(aux,0,fecMask);
				//Get bit reader
				BitReader r(aux,8);
				//Read all media packet
				while(r.Left())
				{
					//If the media packet is used to reconstrud the packet
					if (r.Get(1))
					{
						//Get media packet
						RTPPacket* media = medias[fec->GetBaseExtSeq()+r.GetPos()-1];
						//Calculate receovered attributes
						p  ^= media->GetP();
						x  ^= media->GetX();
						cc ^= media->GetCC();
						m  ^= media->GetMark();
						pt ^= media->GetType();
						ts ^= media->GetTimestamp();
						l  ^= media->GetMediaLength();
						//Get data
						BYTE *payload = media->GetMediaData();
						//Calculate the xor
						for (int i=0;i<fmin(media->GetMediaLength(),level0Size);++i)
							//XOR
							recovered[i] ^= payload[i];
					}
				}
				//Create new video packet
				RTPPacket* packet = new RTPPacket(MediaFrame::Video,pt);
				//Set values
				packet->SetP(p);
				packet->SetX(x);
				packet->SetMark(m);
				packet->SetTimestamp(ts);
				//Set sequence number
				packet->SetSeqNum(seq);
				//Set seq cycles
				packet->SetSeqCycles(fec->GetBaseSeqCylcles());
				//Set ssrc
				packet->SetSSRC(ssrc);
				//Set payload and recovered length
				if (!packet->SetPayloadWithExtensionData(recovered,l))
				{
					//Delete packet
					delete(packet);
					//Error
					Error("-FEC payload of recovered packet to big [%u]\n",(unsigned int)l);
					//Skip
					continue;
				}

				Debug("-recovered packet len:%u ts:%u pts:%u seq:%d\n",l,ts,packet->GetTimestamp() ,packet->GetSeqNum());

				//Append the packet to the media packet list
				if (AddPacket(packet))
					//Return it if contained media
					return packet;
				else
					//Discard and continue
					delete(packet);
			}
		}
	}
	//Nothing found
	return NULL;
}
Beispiel #5
0
/****************************************
* RecVideo
*	Obtiene los packetes y los muestra
*****************************************/
int VideoStream::RecVideo()
{
	VideoDecoder*	videoDecoder = NULL;
	VideoCodec::Type type;
	timeval 	before;
	timeval		lastFPURequest;
	DWORD		lostCount=0;
	DWORD		frameTime = (DWORD)-1;
	DWORD		lastSeq = RTPPacket::MaxExtSeqNum;
	bool		waitIntra = false;
	
	
	Log(">RecVideo\n");
	
	//Get now
	gettimeofday(&before,NULL);

	//Not sent FPU yet
	setZeroTime(&lastFPURequest);

	//Mientras tengamos que capturar
	while(receivingVideo)
	{
		//Get RTP packet
		RTPPacket* packet = rtp.GetPacket();

		//Check
		if (!packet)
			//Next
			continue;
		
		//Get extended sequence number and timestamp
		DWORD seq = packet->GetExtSeqNum();
		DWORD ts = packet->GetTimestamp();

		//Get packet data
		BYTE* buffer = packet->GetMediaData();
		DWORD size = packet->GetMediaLength();

		//Get type
		type = (VideoCodec::Type)packet->GetCodec();

		//Lost packets since last
		DWORD lost = 0;

		//If not first
		if (lastSeq!=RTPPacket::MaxExtSeqNum)
			//Calculate losts
			lost = seq-lastSeq-1;

		//Increase total lost count
		lostCount += lost;

		//Update last sequence number
		lastSeq = seq;

		//If lost some packets or still have not got an iframe
		if(lostCount || waitIntra)
		{
			//Check if we got listener and more than 1/2 second have elapsed from last request
			if (listener && getDifTime(&lastFPURequest)>minFPUPeriod)
			{
				//Debug
				Debug("-Requesting FPU lost %d\n",lostCount);
				//Reset count
				lostCount = 0;
				//Request it
				listener->onRequestFPU();
				//Request also over rtp
				rtp.RequestFPU();
				//Update time
				getUpdDifTime(&lastFPURequest);
				//Waiting for refresh
				waitIntra = true;
			}
		}

		//Check if it is a redundant packet
		if (type==VideoCodec::RED)
		{
			//Get redundant packet
			RTPRedundantPacket* red = (RTPRedundantPacket*)packet;
			//Get primary codec
			type = (VideoCodec::Type)red->GetPrimaryCodec();
			//Check it is not ULPFEC redundant packet
			if (type==VideoCodec::ULPFEC)
			{
				//Delete packet
				delete(packet);
				//Skip
				continue;
			}
			//Update primary redundant payload
			buffer = red->GetPrimaryPayloadData();
			size = red->GetPrimaryPayloadSize();
		}
		
		//Check codecs
		if ((videoDecoder==NULL) || (type!=videoDecoder->type))
		{
			//If we already got one
			if (videoDecoder!=NULL)
				//Delete it
				delete videoDecoder;

			//Create video decorder for codec
			videoDecoder = VideoCodecFactory::CreateDecoder(type);

			//Check
			if (videoDecoder==NULL)
			{
				Error("Error creando nuevo decodificador de video [%d]\n",type);
				//Delete packet
				delete(packet);
				//Next
				continue;
			}
		}

		//Check if we have lost the last packet from the previous frame by comparing both timestamps
		if (ts>frameTime)
		{
			Debug("-lost mark packet ts:%u frameTime:%u\n",ts,frameTime);
			//Try to decode what is in the buffer
			videoDecoder->DecodePacket(NULL,0,1,1);
			//Get picture
			BYTE *frame = videoDecoder->GetFrame();
			DWORD width = videoDecoder->GetWidth();
			DWORD height = videoDecoder->GetHeight();
			//Check values
			if (frame && width && height)
			{
				//Set frame size
				videoOutput->SetVideoSize(width,height);

				//Check if muted
				if (!muted)
					//Send it
					videoOutput->NextFrame(frame);
			}
		}
		
		//Update frame time
		frameTime = ts;
		
		//Decode packet
		if(!videoDecoder->DecodePacket(buffer,size,lost,packet->GetMark()))
		{
			//Check if we got listener and more than 1/2 seconds have elapsed from last request
			if (listener && getDifTime(&lastFPURequest)>minFPUPeriod)
			{
				//Debug
				Log("-Requesting FPU decoder error\n");
				//Reset count
				lostCount = 0;
				//Request it
				listener->onRequestFPU();
				//Request also over rtp
				rtp.RequestFPU();
				//Update time
				getUpdDifTime(&lastFPURequest);
				//Waiting for refresh
				waitIntra = true;
			}
		}

		//Check if it is the last packet of a frame
		if(packet->GetMark())
		{
			if (videoDecoder->IsKeyFrame())
				Debug("-Got Intra\n");
			
			//No frame time yet for next frame
			frameTime = (DWORD)-1;

			//Get picture
			BYTE *frame = videoDecoder->GetFrame();
			DWORD width = videoDecoder->GetWidth();
			DWORD height = videoDecoder->GetHeight();
			//Check values
			if (frame && width && height)
			{
				//Set frame size
				videoOutput->SetVideoSize(width,height);
				
				//Check if muted
				if (!muted)
					//Send it
					videoOutput->NextFrame(frame);
			}
			//Check if we got the waiting refresh
			if (waitIntra && videoDecoder->IsKeyFrame())
				//Do not wait anymore
				waitIntra = false;
		}
		//Delete packet
		delete(packet);
	}

	//Delete encoder
	delete videoDecoder;

	Log("<RecVideo\n");
}
Beispiel #6
0
void ReceiverSession::RunNetwork() {
    log("Sending Hi package");
    const char *hi = "HI";
    int status = SendPacket(hi, sizeof(hi), 0, false,
                            sizeof(hi));// Say Hi, should cause the server to send data
    _checkerror(status);

    while (codec == NULL && isRunning) {
        log("Waiting for codec RTCP package...");
        RTPTime::Wait(RTPTime(1, 0));// Wait 1s
    }
    if (!isRunning) return;

    // Start decoder
    status = AMediaCodec_start(codec);
    if (status != AMEDIA_OK) return;
    log("Started decoder");

    // Extracting format data
    int32_t samples = 44100, channels = 1;
    AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_SAMPLE_RATE, &samples);
    AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_CHANNEL_COUNT, &channels);
    audioplayer_initPlayback((uint32_t) samples, (uint32_t) channels);

    bool hasInput = true, hasOutput = true;
    int32_t beginTimestamp = -1, lastTimestamp = 0;
    uint16_t lastSeqNum = 0;
    while (hasInput && isRunning) {
        BeginDataAccess();
        if (GotoFirstSourceWithData()) {
            do {
                RTPPacket *pack;
                while ((pack = GetNextPacket()) != NULL) {
                    // We repurposed the marker flag as end of file
                    hasInput = !pack->HasMarker();

                    // Calculate playback time and do some lost package corrections
                    uint32_t timestamp = pack->GetTimestamp();
                    if (beginTimestamp == -1) {// record first timestamp and use differences
                        beginTimestamp = timestamp;
                        lastSeqNum = pack->GetSequenceNumber() - (uint16_t) 1;
                    }
                    timestamp -= beginTimestamp;
                    if (pack->HasExtension()
                        && pack->GetExtensionID() == AUDIOSYNC_EXTENSION_HEADER_ID
                        && pack->GetExtensionLength() == sizeof(int64_t)) {
                        int64_t *usec = (int64_t*) pack->GetExtensionData();
                        audioplayer_syncPlayback(ntohq(*usec), timestamp);
                    }

                    /*if (pack->HasExtension()) {
                        debugLog("Ext: %" PRIu16 " %lld", pack->GetExtensionID(), (long long)pack->GetExtensionLength());
                    }*/

                    // Handle lost packets, TODO How does this work with multiple senders?
                    if (pack->GetSequenceNumber() != lastSeqNum + 1) {
                        // TODO handle mutliple packages with same timestamp. (Decode together?)
                        /*if (timestamp == lastTimestamp)*/
                        log("Packets jumped %u => %u | %.2f => %.2fs.", lastSeqNum,
                            pack->GetSequenceNumber(), lastTimestamp / 1E6,
                            timestamp / 1E6);
                        // TODO evaluate the impact of this time gap parameter
                        if (timestamp - lastTimestamp > SECOND_MICRO/20) {//50 ms
                            // According to the docs we need to flushIf data is not adjacent.
                            // It is unclear how big these gaps can be and still be tolerable.
                            // During testing this call did cause the codec
                            // to throw errors. most likely in combination with splitted packages,
                            // where one of a a set of packages with the same timestamp got lost
                            log("Flushing codec");
                            AMediaCodec_flush(codec);
                        }
                    }
                    lastSeqNum = pack->GetSequenceNumber();
                    lastTimestamp = timestamp;

                    if (hasInput) {
                        //log("Received %.2f", timestamp / 1000000.0);
                        uint8_t *payload = pack->GetPayloadData();
                        size_t length = pack->GetPayloadLength();
                        status = decoder_enqueueBuffer(codec, payload, length, (int64_t) timestamp);
                        if (status != AMEDIA_OK) hasInput = false;
                    } else {
                        log("Receiver: End of file");
                        // Tell the codec we are done
                        decoder_enqueueBuffer(codec, NULL, -1, (int64_t) timestamp);
                    }
                    hasOutput = decoder_dequeueBuffer(codec, &audioplayer_enqueuePCMFrames);

                    DeletePacket(pack);
                }
            } while (GotoNextSourceWithData());
        }
        EndDataAccess();

        struct timespec req;
        req.tv_sec = 0;
        req.tv_nsec = 1000*1000;
        audioplayer_monitorPlayback();
        // We should give other threads the opportunity to run
        nanosleep(&req, NULL);// TODO base time on duration of received audio?
        audioplayer_monitorPlayback();
    }
    log("Received all data, ending RTP session.");
    BYEDestroy(RTPTime(1, 0), 0, 0);

    while (hasOutput && status == AMEDIA_OK && isRunning) {
        hasOutput = decoder_dequeueBuffer(codec, &audioplayer_enqueuePCMFrames);
        RTPTime::Wait(RTPTime(0, 5000));
    }
    AMediaCodec_stop(codec);
    log("Finished decoding");

    while(isRunning) {
        audioplayer_monitorPlayback();
        RTPTime::Wait(RTPTime(0, 50000));// 10ms
    }
    audioplayer_stopPlayback();
}