Beispiel #1
0
TEST(RTPPacketTest, test_rtp_resizer_pcap_samesize)
{
    if (boost::filesystem::exists("resizer_down.pcap"))
    {
        RTPPCAPReader reader;
        ASSERT_TRUE(reader.open("resizer_down.pcap"));
        RTPResizingQueue q(18, 80, 10, 10, 80);
        RTPPacket packet;
        while (reader.read(packet))
        {
            ASSERT_TRUE(q.enqueue(packet));

            RTPPacket packet2;
            ASSERT_TRUE(q.dequeue(packet));

            unsigned int len = 0;
            u_char payload1[8192];
            u_char payload2[8192];
            packet.getPayload(payload1, len);
            packet2.getPayload(payload2, len);

            for (int i = 0; i < len; i++)
            {
                ASSERT_EQ(payload1[i], payload2[i]);
            }
        }
    }
}
Beispiel #2
0
uint8_t * MyRTPSession::GetMyRTPPacket(uint8_t * packet_buf, size_t * size, unsigned long timeout_ms)
{
	if(!packet_buf) {
		fprintf(stderr, "%s: Invalide argument('packet_buf==NULL')", __func__);
		return NULL;
	}

	if(!size) {
		fprintf(stderr, "%s: Invalide argument('size==NULL')", __func__);
		return NULL;
	}

	unsigned long UsleepTimes = (timeout_ms + USLEEP_UNIT - 1) / USLEEP_UNIT; // floor the 'timeout_ms / USLEEP_UNIT'

	do {
#ifndef RTP_SUPPORT_THREAD
		int status = Poll();
		if(!IsError(status)) return NULL;
#endif 

		BeginDataAccess();

		// check incoming packets
		if (!GotoFirstSourceWithData()) {
			EndDataAccess();
			usleep(USLEEP_UNIT);
			UsleepTimes--;
			continue;
			// return NULL;
		}
		RTPPacket *pack;

		if(!(pack = GetNextPacket()))
		{
			EndDataAccess();
			usleep(USLEEP_UNIT);
			UsleepTimes--;
			continue;
			// return NULL;
		}

		size_t PacketSize = 0;
		uint8_t * Packet = NULL;
		Packet = pack->GetPacketData();
		PacketSize = pack->GetPacketLength();
		// printf("packet length: %lu\n", PacketSize);

		*size = PacketSize;
		memcpy(packet_buf, Packet, PacketSize);

		// we don't longer need the packet, so
		// we'll delete it
		DeletePacket(pack);
		EndDataAccess();
		UsleepTimes = 0;
	} while(UsleepTimes > 0);

	return packet_buf;
}
void RTPEndpoint::onRTPPacket(RTPPacket &packet)
{
	//Check
	if (!sending)
		//Exit
		return;
	
        //Get type
        MediaFrame::Type packetType = packet.GetMedia();
        //Check types
        if (type!=packetType)
                //Exit
                return;
        //Check type
        if (packet.GetCodec()!=codec)
        {
                //Store it
                codec = packet.GetCodec();
                //Depending on the type
                switch(packetType)
                {
                        case MediaFrame::Audio:
                                //Set it
                                RTPSession::SetSendingAudioCodec((AudioCodec::Type)codec);
                                break;
                        case MediaFrame::Video:
                                //Set it
                                RTPSession::SetSendingVideoCodec((VideoCodec::Type)codec);
                                break;
                        case MediaFrame::Text:
                                //Set it
                                RTPSession::SetSendingTextCodec((TextCodec::Type)codec);
                                break;
                }
	}

	//Get diference from latest frame
	QWORD dif = getUpdDifTime(&prev);

	//If was reseted
	if (reseted)
	{
		//Get new time
		timestamp += dif*freq/1000;
		//Not reseted
		reseted = false;
		
	} else {
		//Get dif from packet timestamp
		timestamp += packet.GetTimestamp()-prevts;
	}

	//Update prev rtp ts
	prevts = packet.GetTimestamp();

        //Send it
        RTPSession::SendPacket(packet,timestamp);
}
bool whu_RtpRPicSAg::RecvPic(IplImage* RecvImg)
{
	bool done = false;
	int status;
	char RecvBuf[1204];
	RTPTime delay(0.020);
	RTPTime starttime = RTPTime::CurrentTime();
	int lengh ,i;
	uchar* ptr;
	session.BeginDataAccess();
		if (session.GotoFirstSource())
		{
			int line=0;
			do
			{
				RTPPacket *packet;
				
				while ((packet = session.GetNextPacket()) != 0)
				{
					timestamp1 = packet->GetTimestamp();
					lengh=packet->GetPayloadLength();
					RawData = packet->GetPayloadData();
					memcpy(RecvBuf,RawData,1204);
					memcpy(&line,RecvBuf,4);
					if (line>=0&&line<RecvImg->height)
					{
						
						ptr=(uchar*)(RecvImg->imageData+line*RecvImg->widthStep);
						memcpy(ptr,RecvBuf+4,1200);
					}
					else{
						printf("loss packet\n");
					}
					
					session.DeletePacket(packet);
				}
			} while (session.GotoNextSource());
		}
		session.EndDataAccess();

		RTPTime::Wait(delay);

		RTPTime t = RTPTime::CurrentTime();
		t -= starttime;
		if (t > RTPTime(60.0))
			done = true;
		return true;
}
Beispiel #5
0
// Thread function for RTP session.
static void* rtpSessionThread(void *arg)
{
    VideoStream* video = reinterpret_cast<VideoStream*>(arg);

    u_int8_t bigBuffer[MAX_FRAME_SIZE];

    unsigned int lastPacketTimestamp = 0;
    unsigned int currentIndex = 0;
    double last_time = 0;

    RTPSession session = createRtpSession(video->getMulticastIP(), 
                                          video->getPort());

    while (1) {
        session.Poll();

        // Distribute data from the session to connected clients if we've
        // got anything
        session.BeginDataAccess();

        if (session.GotoFirstSourceWithData()) {
            do {
                RTPPacket *packet = NULL;

                while ((packet = session.GetNextPacket()) != NULL) {
                    if ((packet->GetPayloadLength() > sizeof(bigBuffer)) || 
                        (packet->GetPayloadLength() == 0)) {
                        // Free the packet, we're not going to use it.
                        session.DeletePacket(packet);
                        continue; // Exit this level of the loop and drop it
                    }

                    // Check timestamps for new data.  A new timestamp means
                    // this is from a different time.
                    if (packet->GetTimestamp() != lastPacketTimestamp) {
                        video->decode((uint8_t*)&bigBuffer[0], currentIndex);

                        currentIndex = 0;
                        memset(&bigBuffer[0], 0, sizeof(bigBuffer));
                    } // End new timestamp optimization.


                    // Copy data into buffer
                    if (currentIndex + packet->GetPayloadLength() > sizeof(bigBuffer)) {
                        throw std::runtime_error("Frame buffer overflow");
                    }

                    memcpy(&bigBuffer[currentIndex], packet->GetPayloadData(),
                           packet->GetPayloadLength());
                    currentIndex += packet->GetPayloadLength();

                    // Mark our last timestamp
                    lastPacketTimestamp = packet->GetTimestamp();

                    // Free the packet.
                    session.DeletePacket(packet);
                } 
            } while (session.GotoNextSourceWithData()); 
        }

        session.EndDataAccess();
        RTPTime delay(0, 100); // 100usec

        // Update More Data
        bool moreData;
        session.WaitForIncomingData(delay, &moreData);
    }

    // Leave the session while sending BYE.
    RTPTime timeout(0.75f); //  Wait briefly.
    const char* reason = "Session Destroyed.";
    unsigned int reasonlen = strlen(reason);

    if (session.IsActive())
        session.BYEDestroy(timeout, reason, reasonlen);
}  
Beispiel #6
0
/****************************************
* RecText
*	Obtiene los packetes y los muestra
*****************************************/
int MediaBridgeSession::RecText()
{
	DWORD		timeStamp=0;
	DWORD		lastSeq = RTPPacket::MaxExtSeqNum;

	Log(">RecText\n");

	//Mientras tengamos que capturar
	while(receivingText)
	{
		//Get packet
		RTPPacket *packet = rtpText.GetPacket();

		//Check packet
		if (!packet)
			continue;

		//Get data
		BYTE* data = packet->GetMediaData();
		//And length
		DWORD size = packet->GetMediaLength();

		//Get extended sequence number
		DWORD seq = packet->GetExtSeqNum();

		//Lost packets since last one
		DWORD lost = 0;

		//If not first
		if (lastSeq!=RTPPacket::MaxExtSeqNum)
			//Calculate losts
			lost = seq-lastSeq-1;

		//Update last sequence number
		lastSeq = seq;

		//Get type
		TextCodec::Type type = (TextCodec::Type)packet->GetCodec();

		//Check the type of data
		if (type==TextCodec::T140RED)
		{
			//Get redundant packet
			RTPRedundantPacket* red = (RTPRedundantPacket*)packet;

			//Check lost packets count
			if (lost == 0)
			{
				//Create text frame
				TextFrame frame(timeStamp ,red->GetPrimaryPayloadData(),red->GetPrimaryPayloadSize());
				//Create new timestamp associated to latest media time
				RTMPMetaData meta(getDifTime(&first)/1000);

				//Add text name
				meta.AddParam(new AMFString(L"onText"));
				//Set data
				meta.AddParam(new AMFString(frame.GetWChar()));

				//Send data
				SendMetaData(&meta);
			} else {
				//Timestamp of first packet (either receovered or not)
				DWORD ts = timeStamp;

				//Check if we have any red pacekt
				if (red->GetRedundantCount()>0)
					//Get the timestamp of first redundant packet
					ts = red->GetRedundantTimestamp(0);

				//If we have lost too many
				if (lost>red->GetRedundantCount())
					//Get what we have available only
					lost = red->GetRedundantCount();

				//For each lonot recoveredt packet send a mark
				for (int i=red->GetRedundantCount();i<lost;i++)
				{
					//Create frame of lost replacement
					TextFrame frame(ts,LOSTREPLACEMENT,sizeof(LOSTREPLACEMENT));
					//Create new timestamp associated to latest media time
					RTMPMetaData meta(getDifTime(&first)/1000);

					//Add text name
					meta.AddParam(new AMFString(L"onText"));
					//Set data
					meta.AddParam(new AMFString(frame.GetWChar()));

					//Send data
					SendMetaData(&meta);
				}

				//Fore each recovered packet
				for (int i=red->GetRedundantCount()-lost;i<red->GetRedundantCount();i++)
				{
					//Create frame from recovered data
					TextFrame frame(red->GetRedundantTimestamp(i),red->GetRedundantPayloadData(i),red->GetRedundantPayloadSize(i));
					//Create new timestamp associated to latest media time
					RTMPMetaData meta(getDifTime(&first)/1000);

					//Add text name
					meta.AddParam(new AMFString(L"onText"));
					//Set data
					meta.AddParam(new AMFString(frame.GetWChar()));

					//Send data
					SendMetaData(&meta);
				}
			}
		} else {
			//Create frame
			TextFrame frame(timeStamp,data,size);
			//Create new timestamp associated to latest media time
			RTMPMetaData meta(getDifTime(&first)/1000);

			//Add text name
			meta.AddParam(new AMFString(L"onText"));
			//Set data
			meta.AddParam(new AMFString(frame.GetWChar()));

			//Send data
			SendMetaData(&meta);
		}
	}

	Log("<RecText\n");
}
Beispiel #7
0
/****************************************
* RecAudio
*	Obtiene los packetes y los muestra
*****************************************/
int MediaBridgeSession::RecAudio()
{
	DWORD		firstAudio = 0;
	DWORD		timeStamp=0;
	DWORD		firstTS = 0;
	SWORD		raw[512];
	DWORD		rawSize = 512;
	DWORD		rawLen;

	//Create new audio frame
	RTMPAudioFrame  *audio = new RTMPAudioFrame(0,RTPPAYLOADSIZE);

	Log(">RecAudio\n");

	//Mientras tengamos que capturar
	while(receivingAudio)
	{
		//Obtenemos el paquete
		RTPPacket *packet = rtpAudio.GetPacket();

		//Check
		if (!packet)
			//Next
			continue;

		//Get type
		AudioCodec::Type codec = (AudioCodec::Type)packet->GetCodec();

		//Check rtp type
		if (codec==AudioCodec::SPEEX16)
		{
			//TODO!!!!
		}

		//Check if we have a decoder
		if (!rtpAudioDecoder || rtpAudioDecoder->type!=codec)
		{
			//Check
			if (rtpAudioDecoder)
				//Delete old one
				delete(rtpAudioDecoder);
			//Create new one
			rtpAudioDecoder = AudioCodecFactory::CreateDecoder(codec);
		}

		//Decode it
		rawLen = rtpAudioDecoder->Decode(packet->GetMediaData(),packet->GetMediaLength(),raw,rawSize);

		//Delete packet
		delete(packet);

		//Rencode it
		DWORD len;

		while((len=rtmpAudioEncoder->Encode(raw,rawLen,audio->GetMediaData(),audio->GetMaxMediaSize()))>0)
		{
			//REset
			rawLen = 0;

			//Set length
			audio->SetMediaSize(len);

			switch(rtmpAudioEncoder->type)
			{
				case AudioCodec::SPEEX16:
					//Set RTMP data
					audio->SetAudioCodec(RTMPAudioFrame::SPEEX);
					audio->SetSoundRate(RTMPAudioFrame::RATE11khz);
					audio->SetSamples16Bits(1);
					audio->SetStereo(0);
					break;
				case AudioCodec::NELLY8:
					//Set RTMP data
					audio->SetAudioCodec(RTMPAudioFrame::NELLY8khz);
					audio->SetSoundRate(RTMPAudioFrame::RATE11khz);
					audio->SetSamples16Bits(1);
					audio->SetStereo(0);
					break;
				case AudioCodec::NELLY11:
					//Set RTMP data
					audio->SetAudioCodec(RTMPAudioFrame::NELLY);
					audio->SetSoundRate(RTMPAudioFrame::RATE11khz);
					audio->SetSamples16Bits(1);
					audio->SetStereo(0);
					break;
			}

			//If it is first
			if (!firstTS)
			{
				//Get first audio time
				firstAudio = getDifTime(&first)/1000;
				//It is first
				firstTS = timeStamp;
			}

			DWORD ts = firstAudio +(timeStamp-firstTS)/8;
			//Set timestamp
			audio->SetTimestamp(ts);

			//Send packet
			SendMediaFrame(audio);
		}
	}

	//Check
	if (audio)
		//Delete it
		delete(audio);

	Log("<RecAudio\n");
}
Beispiel #8
0
/****************************************
* RecVideo
*	Obtiene los packetes y los muestra
*****************************************/
int MediaBridgeSession::RecVideo()
{
	//Coders
	VideoDecoder* decoder = NULL;
	VideoEncoder* encoder = VideoCodecFactory::CreateEncoder(VideoCodec::SORENSON);
	//Create new video frame
	RTMPVideoFrame  frame(0,262143);
	//Set codec
	frame.SetVideoCodec(RTMPVideoFrame::FLV1);

	int 	width=0;
	int 	height=0;
	DWORD	numpixels=0;

	Log(">RecVideo\n");

	//Mientras tengamos que capturar
	while(receivingVideo)
	{
		///Obtenemos el paquete
		RTPPacket* packet = rtpVideo.GetPacket();

		//Check
		if (!packet)
			//Next
			continue;

		//Get type
		VideoCodec::Type type = (VideoCodec::Type)packet->GetCodec();


		if ((decoder==NULL) || (type!=decoder->type))
		{
			//Si habia uno nos lo cargamos
			if (decoder!=NULL)
				delete decoder;

			//Creamos uno dependiendo del tipo
			decoder = VideoCodecFactory::CreateDecoder(type);

			//Check
			if (!decoder)
			{
				delete(packet);
				continue;
			}
		}

		//Lo decodificamos
		if(!decoder->DecodePacket(packet->GetMediaData(),packet->GetMediaLength(),0,packet->GetMark()))
		{
			delete(packet);
			continue;
		}
		//Get mark
		bool mark = packet->GetMark();

		//Delete packet
		delete(packet);

		//Check if it is last one
		if(!mark)
			continue;

		//Check size
		if (decoder->GetWidth()!=width || decoder->GetHeight()!=height)
		{
			//Get dimension
			width = decoder->GetWidth();
			height = decoder->GetHeight();

			//Set size
			numpixels = width*height*3/2;

			//Set also frame rate and bps
			encoder->SetFrameRate(25,300,500);

			//Set them in the encoder
			encoder->SetSize(width,height);
		}

		//Encode next frame
		VideoFrame *encoded = encoder->EncodeFrame(decoder->GetFrame(),numpixels);

		//Check
		if (!encoded)
			break;

		//Check size
		if (frame.GetMaxMediaSize()<encoded->GetLength())
			//Not enougth space
			return Error("Not enought space to copy FLV encodec frame [frame:%d,encoded:%d",frame.GetMaxMediaSize(),encoded->GetLength());

		//Get full frame
		frame.SetVideoFrame(encoded->GetData(),encoded->GetLength());

		//Set buffer size
		frame.SetMediaSize(encoded->GetLength());

		//Check type
		if (encoded->IsIntra())
			//Set type
			frame.SetFrameType(RTMPVideoFrame::INTRA);
		else
			//Set type
			frame.SetFrameType(RTMPVideoFrame::INTER);

		//Let the connection set the timestamp
		frame.SetTimestamp(getDifTime(&first)/1000);

		//Send it
		SendMediaFrame(&frame);
	}

	//Check
	if (decoder)
		//Delete
		delete(decoder);
	//Check
	if (encoder)
		//Delete
		delete(encoder);

	Log("<RecVideo\n");
}
void Java_cn_nickwar_MainActivity_nativeWorker(JNIEnv* env, jobject obj) {
	uint16_t portbase=8000,destport=9000;
	std::string ipstr="192.168.1.102";
	uint32_t destip=inet_addr(ipstr.c_str());
	int status,i,num;

	RTPSession session;
	RTPSessionParams sessionparams;
	RTPUDPv4TransmissionParams transparams;
	RTPIPv4Address addr;

	if (destip == INADDR_NONE) {
		__android_log_print(ANDROID_LOG_DEBUG, "pspm.native", "Bad IP address specified");
	}

	destip = ntohl(destip);

	num = 40;

	sessionparams.SetOwnTimestampUnit(1.0/10.0);
	sessionparams.SetAcceptOwnPackets(true);

	transparams.SetPortbase(portbase);

	addr.SetIP(destip);
	addr.SetPort(destport);

	status = session.Create(sessionparams,&transparams);

	if (status<0) {
		std::string tmp = "Create:";
		__android_log_print(ANDROID_LOG_DEBUG, "pspm.native", (tmp+RTPGetErrorString(status)).c_str());
	}
	status = session.AddDestination(addr);
	if (status<0) {
		std::string tmp = "AddDestination:";
		__android_log_print(ANDROID_LOG_DEBUG, "pspm.native", (tmp+RTPGetErrorString(status)).c_str());
	}

	while(!m_bExitApp)
	{
		session.BeginDataAccess();

		unsigned char *buff = NULL;
		if (session.GotoFirstSourceWithData())
		{
			do
			{
				RTPPacket *pack;
				while((pack = session.GetNextPacket()) !=NULL)
				{
					__android_log_print(ANDROID_LOG_DEBUG, "pspm.native", "got packet!\n");
					char message[26];
					sprintf(message, "got packet");
					jstring messageString = env->NewStringUTF(message);
					env->CallVoidMethod(obj, rtpresultFromJNI, messageString);

					if (NULL != env->ExceptionOccurred()) {
						//						break;
						continue;
					}
					if (pack->GetPayloadLength()>0) {
						buff = pack->GetPayloadData();
						__android_log_print(ANDROID_LOG_DEBUG, "pspm.native", "packt data:%s",buff);
					}
					session.DeletePacket(pack);
				}
			}
			while(session.GotoNextSourceWithData());
		}

		session.EndDataAccess();
		//
#ifndef RTP_SUPPORT_THREAD
		status = sess.Poll();
		if (status<0) {
			session.Destroy();
			return;
		}
#endif

		RTPTime::Wait(RTPTime(0,5000));
	}

	session.Destroy();
	return;
}
// The following function should delete rtppack if necessary
int RTPInternalSourceData::ProcessRTPPacket(RTPPacket *rtppack,const RTPTime &receivetime,bool *stored)
{
	bool accept,onprobation,applyprobation;
	double tsunit;
	
	*stored = false;
	
	if (timestampunit < 0) 
		tsunit = INF_GetEstimatedTimestampUnit();
	else
		tsunit = timestampunit;

#ifdef RTP_SUPPORT_PROBATION
	if (validated) 				// If the source is our own process, we can already be validated. No 
		applyprobation = false;		// probation should be applied in that case.
	else
	{
		if (probationtype == RTPSources::NoProbation)
			applyprobation = false;
		else
			applyprobation = true;
	}
#else
	applyprobation = false;
#endif // RTP_SUPPORT_PROBATION

	stats.ProcessPacket(rtppack,receivetime,tsunit,ownssrc,&accept,applyprobation,&onprobation);

#ifdef RTP_SUPPORT_PROBATION
	switch (probationtype)
	{
		case RTPSources::ProbationStore:
			if (!(onprobation || accept))
				return 0;
			if (accept)
				validated = true;
			break;
		case RTPSources::ProbationDiscard:
		case RTPSources::NoProbation:
			if (!accept)
				return 0;
			validated = true;
			break;
		default:
			return ERR_RTP_INTERNALSOURCEDATA_INVALIDPROBATIONTYPE;
	}
#else
	if (!accept)
		return 0;
	validated = true;
#endif // RTP_SUPPORT_PROBATION;
	
	if (validated && !ownssrc) // for own ssrc these variables depend on the outgoing packets, not on the incoming
		issender = true;
	
	// Now, we can place the packet in the queue
	
	if (packetlist.empty())
	{
		*stored = true;
		packetlist.push_back(rtppack);
		return 0;
	}
	
	if (!validated) // still on probation
	{
		// Make sure that we don't buffer too much packets to avoid wasting memory
		// on a bad source. Delete the packet in the queue with the lowest sequence
		// number.
		if (packetlist.size() == RTPINTERNALSOURCEDATA_MAXPROBATIONPACKETS)
		{
			RTPPacket *p = *(packetlist.begin());
			packetlist.pop_front();
			RTPDelete(p,GetMemoryManager());
		}
	}

	// find the right position to insert the packet
	
	std::list<RTPPacket*>::iterator it,start;
	bool done = false;
	uint32_t newseqnr = rtppack->GetExtendedSequenceNumber();
	
	it = packetlist.end();
	--it;
	start = packetlist.begin();
	
	while (!done)
	{
		RTPPacket *p;
		uint32_t seqnr;
		
		p = *it;
		seqnr = p->GetExtendedSequenceNumber();
		if (seqnr > newseqnr)
		{
			if (it != start)
				--it;
			else // we're at the start of the list
			{
				*stored = true;
				done = true;
				packetlist.push_front(rtppack);
			}
		}
		else if (seqnr < newseqnr) // insert after this packet
		{
			++it;
			packetlist.insert(it,rtppack);
			done = true;
			*stored = true;
		}
		else // they're equal !! Drop packet
		{
			done = true;
		}
	}

	return 0;
}
Beispiel #11
0
int RTPSources::ProcessRawPacket(RTPRawPacket *rawpack, RTPTransmitter *rtptrans[], int numtrans, bool acceptownpackets)
{
    int status;

    if (rawpack->IsRTP()) // RTP packet
    {
        RTPPacket *rtppack;

        // First, we'll see if the packet can be parsed
        rtppack = new RTPPacket(*rawpack);

        if ((status = rtppack->GetCreationError()) < 0)
        {
            if (status == ERR_RTP_PACKET_INVALIDPACKET)
            {
                delete rtppack;
                rtppack = 0;
            }
            else
            {
                delete rtppack;
                return status;
            }
        }

        // Check if the packet was valid
        if (rtppack != 0)
        {
            bool stored = false;
            bool ownpacket = false;
            int i;
            const RTPAddress& senderaddress = rawpack->GetSenderAddress();

            for (i = 0; !ownpacket && i < numtrans; i++)
            {
                if (rtptrans[i]->ComesFromThisTransmitter(senderaddress))
                    ownpacket = true;
            }

            // Check if the packet is our own.
            if (ownpacket)
            {
                // Now it depends on the user's preference
                // what to do with this packet:
                if (acceptownpackets)
                {
                    // sender addres for own packets has to be NULL!
                    if ((status = ProcessRTPPacket(rtppack, rawpack->GetReceiveTime(), 0, &stored)) < 0)
                    {
                        if (!stored)
                            delete rtppack;
                        return status;
                    }
                }
            }
            else
            {
                if ((status = ProcessRTPPacket(rtppack, rawpack->GetReceiveTime(), &senderaddress, &stored)) < 0)
                {
                    if (!stored)
                        delete rtppack;
                    return status;
                }
            }
            if (!stored)
                delete rtppack;
        }
    }
    else // RTCP packet
    {
        RTCPCompoundPacket rtcpcomppack(*rawpack);
        bool valid = false;

        if ((status = rtcpcomppack.GetCreationError()) < 0)
        {
            if (status != ERR_RTP_RTCPCOMPOUND_INVALIDPACKET)
                return status;
        }
        else
            valid = true;

        if (valid)
        {
            bool ownpacket = false;
            int i;
            const RTPAddress& senderaddress = rawpack->GetSenderAddress();

            for (i = 0; !ownpacket && i < numtrans; i++)
            {
                if (rtptrans[i]->ComesFromThisTransmitter(senderaddress))
                    ownpacket = true;
            }

            // First check if it's a packet of this session.
            if (ownpacket)
            {
                if (acceptownpackets)
                {
                    // sender address for own packets has to be NULL
                    status = ProcessRTCPCompoundPacket(&rtcpcomppack, rawpack->GetReceiveTime(), 0);
                    if (status < 0)
                        return status;
                }
            }
            else // not our own packet
            {
                status = ProcessRTCPCompoundPacket(&rtcpcomppack, rawpack->GetReceiveTime(), &rawpack->GetSenderAddress());
                if (status < 0)
                    return status;
            }
        }
    }

    return 0;
}
Beispiel #12
0
RTPPacket* FECDecoder::Recover()
{
	BYTE aux[8];
	QWORD lostMask = 0;

	//Check we have media pacekts
	if (!medias.size())
		//Exit
		return NULL;
	//Get First packet
	RTPPacket* first = medias.begin()->second;
	//Get the SSRC
	DWORD ssrc = first->GetSSRC();
	//Get first media packet
	DWORD minSeq = first->GetExtSeqNum();
	//Iterator on seq
	DWORD lastSeq = minSeq;

	//Set to 0
	memset(aux,0,8);
	//Create writter
	BitWritter w(aux,8);
	//vector of lost pacekt seq
	std::vector<DWORD> losts;

	//For each media packet
	for (RTPOrderedPackets::iterator it=medias.begin();it!=medias.end();++it)
	{
		//Get seq
		DWORD cur = it->first;
		//Insert lost
		for (DWORD i=lastSeq+1;i<cur;++i)
		{
			//set mask bit to not present
			w.Put(1,0);
			//Add to the vecotr
			losts.push_back(i);
		}
		//Set last seq
		lastSeq = cur;
		//set mask bit to present
		w.Put(1,1);
	}

	//End it
	w.Flush();

	//Get mask
	lostMask = get8(aux,0);

	//Check we have lost pacekts
	if (!losts.size())
		//Exit
		return NULL;

	//For each lost packet
	for(std::vector<DWORD>::iterator it=losts.begin();it!=losts.end();++it)
	{
		//Get lost packet sequence
		DWORD seq = *it;

		//Search FEC packets associated this media packet
		for (FECOrderedData::iterator it2 = codes.begin();it2!=codes.end();++it2)
		{
			//Get FEC packet
			FECData *fec = it2->second;

			//Check if it is associated with this media pacekt in level 0
			if (!fec->IsProtectedAtLevel0(seq))
				//Next
				continue;

			//Get the seq difference between fec data and the media
			// fec seq has to be <= media seq it fec data protect media data)
			DWORD diff = seq-fec->GetBaseExtSeq();
			//Shit mask of the lost packets to check the present ones from the base seq
			QWORD mediaMask = lostMask << (fec->GetBaseExtSeq()-minSeq);
			//Remove lost packet bit from the fec mask
			QWORD fecMask = fec->GetLevel0Mask() & ~(((QWORD)1)<<(64-diff-1));

			//Compare needed pacekts with actual pacekts, to check if we have all of them except the missing
			if ((fecMask & mediaMask) == fecMask)
			{
				//Rocovered media data
				BYTE	recovered[MTU+SRTP_MAX_TRAILER_LEN] ZEROALIGNEDTO32;
				//Get attributes
				bool  p  = fec->GetRecoveryP();
				bool  x  = fec->GetRecoveryX();
				BYTE  cc = fec->GetRecoveryCC();
				bool  m  = fec->GetRecoveryM();
				BYTE  pt = fec->GetRecoveryType();
				DWORD ts = fec->GetRecoveryTimestamp();
				WORD  l  = fec->GetRecoveryLength();
				//Get protection length
				DWORD level0Size = fec->GetLevel0Size();
				//Ensure there is enought size
				if (level0Size>MTU)
				{
					//Error
					Error("-FEC level 0 data size too big [%d]\n",level0Size);
					//Skip this one
					continue;
				}
				//Copy data
				memcpy(recovered,fec->GetLevel0Data(),level0Size);
				//Set value in temp buffer
				set8(aux,0,fecMask);
				//Get bit reader
				BitReader r(aux,8);
				//Read all media packet
				while(r.Left())
				{
					//If the media packet is used to reconstrud the packet
					if (r.Get(1))
					{
						//Get media packet
						RTPPacket* media = medias[fec->GetBaseExtSeq()+r.GetPos()-1];
						//Calculate receovered attributes
						p  ^= media->GetP();
						x  ^= media->GetX();
						cc ^= media->GetCC();
						m  ^= media->GetMark();
						pt ^= media->GetType();
						ts ^= media->GetTimestamp();
						l  ^= media->GetMediaLength();
						//Get data
						BYTE *payload = media->GetMediaData();
						//Calculate the xor
						for (int i=0;i<fmin(media->GetMediaLength(),level0Size);++i)
							//XOR
							recovered[i] ^= payload[i];
					}
				}
				//Create new video packet
				RTPPacket* packet = new RTPPacket(MediaFrame::Video,pt);
				//Set values
				packet->SetP(p);
				packet->SetX(x);
				packet->SetMark(m);
				packet->SetTimestamp(ts);
				//Set sequence number
				packet->SetSeqNum(seq);
				//Set seq cycles
				packet->SetSeqCycles(fec->GetBaseSeqCylcles());
				//Set ssrc
				packet->SetSSRC(ssrc);
				//Set payload and recovered length
				if (!packet->SetPayloadWithExtensionData(recovered,l))
				{
					//Delete packet
					delete(packet);
					//Error
					Error("-FEC payload of recovered packet to big [%u]\n",(unsigned int)l);
					//Skip
					continue;
				}

				Debug("-recovered packet len:%u ts:%u pts:%u seq:%d\n",l,ts,packet->GetTimestamp() ,packet->GetSeqNum());

				//Append the packet to the media packet list
				if (AddPacket(packet))
					//Return it if contained media
					return packet;
				else
					//Discard and continue
					delete(packet);
			}
		}
	}
	//Nothing found
	return NULL;
}
Beispiel #13
0
/****************************************
* RecVideo
*	Obtiene los packetes y los muestra
*****************************************/
int VideoStream::RecVideo()
{
	VideoDecoder*	videoDecoder = NULL;
	VideoCodec::Type type;
	timeval 	before;
	timeval		lastFPURequest;
	DWORD		lostCount=0;
	DWORD		frameTime = (DWORD)-1;
	DWORD		lastSeq = RTPPacket::MaxExtSeqNum;
	bool		waitIntra = false;
	
	
	Log(">RecVideo\n");
	
	//Get now
	gettimeofday(&before,NULL);

	//Not sent FPU yet
	setZeroTime(&lastFPURequest);

	//Mientras tengamos que capturar
	while(receivingVideo)
	{
		//Get RTP packet
		RTPPacket* packet = rtp.GetPacket();

		//Check
		if (!packet)
			//Next
			continue;
		
		//Get extended sequence number and timestamp
		DWORD seq = packet->GetExtSeqNum();
		DWORD ts = packet->GetTimestamp();

		//Get packet data
		BYTE* buffer = packet->GetMediaData();
		DWORD size = packet->GetMediaLength();

		//Get type
		type = (VideoCodec::Type)packet->GetCodec();

		//Lost packets since last
		DWORD lost = 0;

		//If not first
		if (lastSeq!=RTPPacket::MaxExtSeqNum)
			//Calculate losts
			lost = seq-lastSeq-1;

		//Increase total lost count
		lostCount += lost;

		//Update last sequence number
		lastSeq = seq;

		//If lost some packets or still have not got an iframe
		if(lostCount || waitIntra)
		{
			//Check if we got listener and more than 1/2 second have elapsed from last request
			if (listener && getDifTime(&lastFPURequest)>minFPUPeriod)
			{
				//Debug
				Debug("-Requesting FPU lost %d\n",lostCount);
				//Reset count
				lostCount = 0;
				//Request it
				listener->onRequestFPU();
				//Request also over rtp
				rtp.RequestFPU();
				//Update time
				getUpdDifTime(&lastFPURequest);
				//Waiting for refresh
				waitIntra = true;
			}
		}

		//Check if it is a redundant packet
		if (type==VideoCodec::RED)
		{
			//Get redundant packet
			RTPRedundantPacket* red = (RTPRedundantPacket*)packet;
			//Get primary codec
			type = (VideoCodec::Type)red->GetPrimaryCodec();
			//Check it is not ULPFEC redundant packet
			if (type==VideoCodec::ULPFEC)
			{
				//Delete packet
				delete(packet);
				//Skip
				continue;
			}
			//Update primary redundant payload
			buffer = red->GetPrimaryPayloadData();
			size = red->GetPrimaryPayloadSize();
		}
		
		//Check codecs
		if ((videoDecoder==NULL) || (type!=videoDecoder->type))
		{
			//If we already got one
			if (videoDecoder!=NULL)
				//Delete it
				delete videoDecoder;

			//Create video decorder for codec
			videoDecoder = VideoCodecFactory::CreateDecoder(type);

			//Check
			if (videoDecoder==NULL)
			{
				Error("Error creando nuevo decodificador de video [%d]\n",type);
				//Delete packet
				delete(packet);
				//Next
				continue;
			}
		}

		//Check if we have lost the last packet from the previous frame by comparing both timestamps
		if (ts>frameTime)
		{
			Debug("-lost mark packet ts:%u frameTime:%u\n",ts,frameTime);
			//Try to decode what is in the buffer
			videoDecoder->DecodePacket(NULL,0,1,1);
			//Get picture
			BYTE *frame = videoDecoder->GetFrame();
			DWORD width = videoDecoder->GetWidth();
			DWORD height = videoDecoder->GetHeight();
			//Check values
			if (frame && width && height)
			{
				//Set frame size
				videoOutput->SetVideoSize(width,height);

				//Check if muted
				if (!muted)
					//Send it
					videoOutput->NextFrame(frame);
			}
		}
		
		//Update frame time
		frameTime = ts;
		
		//Decode packet
		if(!videoDecoder->DecodePacket(buffer,size,lost,packet->GetMark()))
		{
			//Check if we got listener and more than 1/2 seconds have elapsed from last request
			if (listener && getDifTime(&lastFPURequest)>minFPUPeriod)
			{
				//Debug
				Log("-Requesting FPU decoder error\n");
				//Reset count
				lostCount = 0;
				//Request it
				listener->onRequestFPU();
				//Request also over rtp
				rtp.RequestFPU();
				//Update time
				getUpdDifTime(&lastFPURequest);
				//Waiting for refresh
				waitIntra = true;
			}
		}

		//Check if it is the last packet of a frame
		if(packet->GetMark())
		{
			if (videoDecoder->IsKeyFrame())
				Debug("-Got Intra\n");
			
			//No frame time yet for next frame
			frameTime = (DWORD)-1;

			//Get picture
			BYTE *frame = videoDecoder->GetFrame();
			DWORD width = videoDecoder->GetWidth();
			DWORD height = videoDecoder->GetHeight();
			//Check values
			if (frame && width && height)
			{
				//Set frame size
				videoOutput->SetVideoSize(width,height);
				
				//Check if muted
				if (!muted)
					//Send it
					videoOutput->NextFrame(frame);
			}
			//Check if we got the waiting refresh
			if (waitIntra && videoDecoder->IsKeyFrame())
				//Do not wait anymore
				waitIntra = false;
		}
		//Delete packet
		delete(packet);
	}

	//Delete encoder
	delete videoDecoder;

	Log("<RecVideo\n");
}
Beispiel #14
0
TEST(RTPPacketTest, test_rtp_resizer_downsize)
{
    u_char pkt1[] = {
        0x80, 0x92, 0x17, 0xb0, 0x00, 0x00,
        0x02, 0x80, 0x75, 0xae, 0xb1, 0x14, 0x78, 0x52,
        0x80, 0xa0, 0x00, 0xfa, 0xc2, 0x00, 0x07, 0xd6,
        0xf9, 0x5b, 0x05, 0xe0, 0x00, 0xfa, 0xdd, 0x0b,
        0xe0, 0xf2, 0x11, 0x3b, 0x4b, 0x12, 0x81, 0xfa,
        0xd1, 0xb5, 0x00, 0x5e, 0xf8, 0x13, 0x80, 0xa0,
        0x00, 0xfa, 0xc2, 0x00, 0x07, 0xd6, 0xf0, 0x2a,
        0xc0, 0xa0, 0x00, 0xfa, 0xc2, 0x00, 0x07, 0xd6,
        0x70, 0x46, 0x80, 0xa0, 0x00, 0xfa, 0xc2, 0x00,
        0x07, 0xd6, 0x79, 0xa4, 0x40, 0xa0, 0x00, 0xfa,
        0xc2, 0x00, 0x07, 0xd6, 0x78, 0x4b, 0xc0, 0xa0,
        0x00, 0xfa, 0xc2, 0x00, 0x07, 0xd6
    };

    u_char pld1[] = {0x78, 0x52,
                     0x80, 0xa0, 0x00, 0xfa, 0xc2, 0x00, 0x07, 0xd6,
                     0xf9, 0x5b, 0x05, 0xe0, 0x00, 0xfa, 0xdd, 0x0b,
                     0xe0, 0xf2, 0x11, 0x3b, 0x4b, 0x12, 0x81, 0xfa,
                     0xd1, 0xb5, 0x00, 0x5e, 0xf8, 0x13, 0x80, 0xa0,
                     0x00, 0xfa, 0xc2, 0x00, 0x07, 0xd6, 0xf0, 0x2a,
                     0xc0, 0xa0, 0x00, 0xfa, 0xc2, 0x00, 0x07, 0xd6,
                     0x70, 0x46, 0x80, 0xa0, 0x00, 0xfa, 0xc2, 0x00,
                     0x07, 0xd6, 0x79, 0xa4, 0x40, 0xa0, 0x00, 0xfa,
                     0xc2, 0x00, 0x07, 0xd6, 0x78, 0x4b, 0xc0, 0xa0,
                     0x00, 0xfa, 0xc2, 0x00, 0x07, 0xd6
                    };

    RTPPacket packet;

    ASSERT_TRUE(packet.parse(pkt1, 92));
    RTPResizingQueue q(18, 80, 10, 10, 20);
    ASSERT_EQ(q.getTargetSize(), 20);
    ASSERT_EQ(q.getTargetClockRate(), 160);

    ASSERT_TRUE(q.enqueue(packet));



    RTPPacket p1, p2, p3, p4, p5;
    ASSERT_TRUE(q.dequeue(p1));
    ASSERT_TRUE(q.dequeue(p2));
    ASSERT_TRUE(q.dequeue(p3));
    ASSERT_TRUE(q.dequeue(p4));
    ASSERT_FALSE(q.dequeue(p5));

    //
    // We are expecting 4 samples
    //
    unsigned int len = 0;
    u_char d0[80];
    u_char d1[80];
    u_char d2[80];
    u_char d3[80];
    u_char d4[80];

    packet.getPayload(d0, len);
    for (unsigned int i = 0; i < len; i++)
    {
        ASSERT_EQ(d0[i], pld1[i]);
    }



    p1.getPayload(d1, len);
    p2.getPayload(d2, len);
    p3.getPayload(d3, len);
    p4.getPayload(d4, len);

    ASSERT_EQ(packet.getPayloadSize(), 80);
    ASSERT_EQ(p1.getPayloadSize(), 20);
    ASSERT_EQ(p2.getPayloadSize(), 20);
    ASSERT_EQ(p3.getPayloadSize(), 20);
    ASSERT_EQ(p4.getPayloadSize(), 20);

    //
    // We are expecting incrementing sequence
    //
    ASSERT_EQ(p1.getSequenceNumber(), p2.getSequenceNumber() - 1);
    ASSERT_EQ(p2.getSequenceNumber(), p3.getSequenceNumber() - 1);
    ASSERT_EQ(p3.getSequenceNumber(), p4.getSequenceNumber() - 1);

    //
    // We are expecting incrementing timestamps by 160
    //
    ASSERT_EQ(p1.getTimeStamp(), p2.getTimeStamp() - 160);
    ASSERT_EQ(p2.getTimeStamp(), p3.getTimeStamp() - 160);
    ASSERT_EQ(p3.getTimeStamp(), p4.getTimeStamp() - 160);


    int j = 0;
    for (int i = 0; i < 20; i++)
    {
        ASSERT_EQ(d0[j], d1[i]);
        j++;
    }

    for (int i = 0; i < 20; i++)
    {
        ASSERT_EQ(d0[j], d2[i]);
        j++;
    }

    for (int i = 0; i < 20; i++)
    {
        ASSERT_EQ(d0[j], d3[i]);
        j++;
    }

    for (int i = 0; i < 20; i++)
    {
        ASSERT_EQ(d0[j], d4[i]);
        j++;
    }
}
//called on timer
//all data is processed here
void MediaStream::timerClick()
{
//d->mutex.lock();

#ifndef TEST_AUDIO    
    int status = d->session.Poll();
    if ( status<0 ) {
        qDebug("Poll: %s", RTPGetErrorString(status).c_str() );
    }
//    printf("JStat2 %d : %d\n", d->micBuffer->size(),d->dspBuffer->size());

    //checkRtpError( status );
    // check incoming packets
    d->session.BeginDataAccess();
    if ( d->session.GotoFirstSourceWithData() ) {

        qDebug("have rtp data");
        do {
            RTPSourceData *sourceData = d->session.GetCurrentSourceInfo();

            RTPPacket *pack;
            if ((pack = d->session.GetNextPacket()) != NULL) {
                qDebug("Get packet N %ld", pack->GetExtendedSequenceNumber());


                // debug("Got  packet with payload type %d, size %d", pack->GetPayloadType(), pack->GetPayloadLength() );

                // TODO initialise decoder here using pack payload type, maybe use QIntDict of decoders
                if ( d->decoder ) {
                    short* decodedData = 0;

                    int size = d->decoder->decode((char*)pack->GetPayloadData(), pack->GetPayloadLength(), &decodedData );

                    if ( size > 0 ) {

                        // adjust the volume
                        for ( int i=0; i<size; i++ ) {
                            double val = double(decodedData[i]) * call_dlg_dsp_level / 50.0;
                            if ( val > 32700.0 )
                                val = 32700.0;
                            if ( val < -32700.0 )
                                val = -32700.0;

                            decodedData[i] = short(val);
                        }
                        
                        // write to DSP buffer
                        d->dspBuffer->lock();
                        d->dspBuffer->put( (char*)decodedData, size*2 );
                        d->dspBuffer->unlock();
                        
                        processDspData(decodedData,size);

                        delete[] decodedData;
                    }
                    qDebug("decoded data (%d byes) with payload type %d",  size*2, pack->GetPayloadType() );


                } else {
                    qDebug("can't decode data with payload type %d", pack->GetPayloadType() );
                }

                // we don't longer need the packet, so
                // we'll delete it
                delete pack;
            }
        } while ( d->session.GotoNextSourceWithData());
    }
    d->session.EndDataAccess();

    // send the packet
    // check for in data


    short *data = 0;
    int micDataSize = 0; // size of readed mic data in samples

    d->micBuffer->lock();
    micDataSize = d->micBuffer->size()/2;
    if ( micDataSize ) {
        data = new short[micDataSize];
        memcpy( data, d->micBuffer->data(), micDataSize*2 );
        d->micBuffer->fetch( micDataSize*2 );
    }
    d->micBuffer->unlock();

    // adjust mic volume
    for ( int i=0; i<micDataSize; i++ ) {
        double val = double(data[i]) * call_dlg_mic_level / 50.0;
        if ( val > 32700.0 )
            val = 32700.0;
        if ( val < -32700.0 )
            val = -32700.0;

        data[i] = short(val);
    }

    if(micDataSize == 0) {
	micDataSize = 160;
	data = new short[160];
    }

    // examine the data here, to calculate levels
    processMicData(data, micDataSize);


    if ( data ) {
        char * encodedData = 0;
        int readed = micDataSize;
        int size = 0;

        qDebug("have mic data %d", micDataSize );
        
        
        do {
            int readed = 0;
            size = d->encoder->encode( data, micDataSize, &encodedData, &readed );

            int localPayload = d->codecPayload; // TODO get local payload here

            qDebug("readed %d  encoded %d", readed, size );

            delete[] data;
            data = 0;
            micDataSize = 0;

            // TODO: for pcmu packet (payload==0) send packets of certain size
            if ( size > 0 ) {
                memcpy( d->outBuffer+d->outBufferPos, encodedData, size );
                d->outBufferPos += size;
                d->outBufferTime += readed;
                if ( d->outBufferPos ) {
                    //checkRtpError( 

                    if ( d->session.IsActive() && d->sendPacketsFlag ) {
                        int status = d->session.SendPacket( (void *)d->outBuffer, (int)d->outBufferPos, (unsigned char)localPayload , false, (long)d->outBufferTime );
                        if ( status<0 ) {
                             qDebug("can't SendPacket, %s", RTPGetErrorString(status).c_str() );
                        }
                    }
                    qDebug("sent packet");
                }

                    

                d->outBufferPos = 0;
                d->outBufferTime = 0;
            }

            if ( encodedData ) {
                delete[] encodedData;
                encodedData = 0;
            }

        } while (size > 0);
    }
    
    status = d->session.Poll();
    if ( status<0 ) {
         qDebug("Poll: %s", RTPGetErrorString(status).c_str() );
    }
#else // TEST_AUDIO

    short *data = 0;
    int micDataSize = 0; // size of readed mic data in samples

    d->micBuffer->lock();
    micDataSize = d->micBuffer->size()/2;
    if ( micDataSize ) {
        data = new short[micDataSize];
        memcpy( data, d->micBuffer->data(), micDataSize*2 );
        d->micBuffer->fetch( micDataSize*2 );
    }
    d->micBuffer->unlock();

    if (data) {
        // write to DSP buffer
        d->dspBuffer->lock();
        d->dspBuffer->put( (char*)data,micDataSize*2 );
        d->dspBuffer->unlock();

    }

    static int totalSamples = 0;
    totalSamples += micDataSize;


    if ( micDataSize )
        printf("total audio samples: %d  %d   \r", micDataSize, totalSamples);
    

#endif // TEST_AUDIO
    
//    d->mutex->unlock();
}
int RTPPacketProcessor::ProcessRTPBlock(unsigned char *data,int len,unsigned long ip,int port,bool *collis,bool acceptlocalpackets,double localtsunit)
{
	int status;
	RTPPacket *packet;

	*collis = false;
	if (!initialized)
	{
		delete [] data;
		return ERR_RTP_PACKETPROCESSORNOTINITIALIZED;
	}

	if ((status = GetRTPData(data,len,&packet)) < 0)
	{
		delete [] data;
		return status;
	}
	if (packet != NULL) // valid packet
	{
		// check for collision with local ssrc and process further
		if (contribsrcs->GetLocalSSRC() == htonl(packet->GetSSRC()))
		{
			// collision with local ssrc

			if (ip == conn->GetLocalIP() && port == conn->GetSendPort())
			{
				if (!acceptlocalpackets)
				{
					delete packet;
					return 0;
				}
			}
			else
			{
				if (handlers->handlers[RTP_EXCEPTION_LOCALSSRCCOLLISION].handler != NULL)
					CallLocalSSRCCollHandler(packet->GetSSRC(),ip,true,port);
			
				delete packet;
				*collis = true;
				return 0;
			}
		}
		
		status = sources->ProcessPacket(packet,ip,port,localtsunit);
		if (status < 0)
		{
			delete packet;

			/* On a collision we still want other data to be processed,
			   so we will not count this as an error */
			if (status == ERR_RTP_COLLISIONBETWEENSSRCS)
				return 0;

			return status;
		}
	}
	else // invalid packet
		delete [] data;
	
	return 0;
}
Beispiel #17
0
TEST(RTPPacketTest, test_basic_packet_parser)
{
    u_char pkt1[] =
    {
        0x80, 0x12, 0x00, 0xb5, 0x00, 0x2c, 0xcb, 0x6c,
        0x00, 0x00, 0x3a, 0x87, 0x22, 0xb3, 0x40, 0x77,
        0x02, 0x6d, 0x21, 0x37, 0xc3, 0x82, 0x26, 0xda,
        0x7f, 0xe4, 0xe8, 0x58, 0xd6, 0xa2, 0x3c, 0x5a
    };

    RTPPacket packet1;
    ASSERT_TRUE(packet1.parse(pkt1, 32));
    ASSERT_EQ(packet1.getSynchronizationSource(), 0x3A87);
    ASSERT_EQ(packet1.getTimeStamp(), 2935660);
    ASSERT_EQ(packet1.getPayloadType(), 18);
    ASSERT_EQ(packet1.getVersion(), 2);
    ASSERT_EQ(packet1.getSequenceNumber(), 181);
    ASSERT_EQ(packet1.getPayloadSize(), 20);


    const u_char* pkt2 = packet1.data();
    for (int i = 0; i < 32; i++)
    {
        ASSERT_EQ(pkt1[i], pkt2[i]);
    }

    RTPPacket packet2;
    packet2 = packet1;

    const u_char* pkt3 = packet2.data();
    for (int i = 0; i < 32; i++)
    {
        ASSERT_EQ(pkt1[i], pkt3[i]);
    }


    ASSERT_EQ(packet1.getSynchronizationSource(), packet2.getSynchronizationSource());
    ASSERT_EQ(packet1.getTimeStamp(), packet2.getTimeStamp());
    ASSERT_EQ(packet1.getPayloadType(), packet2.getPayloadType());
    ASSERT_EQ(packet1.getVersion(), packet2.getVersion());
    ASSERT_EQ(packet1.getSequenceNumber(), packet2.getSequenceNumber());

    unsigned int len;
    u_char payload1[8192];
    u_char payload2[8192];
    packet1.getPayload(payload1, len);
    packet2.getPayload(payload2, len);

    ASSERT_EQ(len, packet1.getPayloadSize());
    for (unsigned int i = 0; i < len; i++)
    {
        ASSERT_EQ(payload1[i], payload2[i]);
    }

    RTPPacket packet3;
    packet3.parse(packet2.data(), packet2.getPacketSize());

    ASSERT_EQ(packet3.getSynchronizationSource(), packet2.getSynchronizationSource());
    ASSERT_EQ(packet3.getTimeStamp(), packet2.getTimeStamp());
    ASSERT_EQ(packet3.getPayloadType(), packet2.getPayloadType());
    ASSERT_EQ(packet3.getVersion(), packet2.getVersion());
    ASSERT_EQ(packet3.getSequenceNumber(), packet2.getSequenceNumber());

    u_char payload3[8192];
    packet3.getPayload(payload3, len);
    packet2.getPayload(payload2, len);

    ASSERT_EQ(len, packet3.getPayloadSize());
    for (unsigned int i = 0; i < len; i++)
    {
        ASSERT_EQ(payload3[i], payload2[i]);
    }

    //
    // Check payload integrity
    //

    u_char pl[] = { 0x22, 0xb3, 0x40, 0x77,
                    0x02, 0x6d, 0x21, 0x37, 0xc3, 0x82, 0x26, 0xda,
                    0x7f, 0xe4, 0xe8, 0x58, 0xd6, 0xa2, 0x3c, 0x5a
                  };

    RTPPacket plpacket;
    plpacket.setPayload(pl, 20);

    ASSERT_EQ(plpacket.getPayloadSize(), 20);
    u_char pl1[8192] ;
    plpacket.getPayload(pl1, len);

    for (int i = 0; i < 20; i++)
        ASSERT_EQ(pl1[i], pl[i]);
}
Beispiel #18
0
void ReceiverSession::RunNetwork() {
    log("Sending Hi package");
    const char *hi = "HI";
    int status = SendPacket(hi, sizeof(hi), 0, false,
                            sizeof(hi));// Say Hi, should cause the server to send data
    _checkerror(status);

    while (codec == NULL && isRunning) {
        log("Waiting for codec RTCP package...");
        RTPTime::Wait(RTPTime(1, 0));// Wait 1s
    }
    if (!isRunning) return;

    // Start decoder
    status = AMediaCodec_start(codec);
    if (status != AMEDIA_OK) return;
    log("Started decoder");

    // Extracting format data
    int32_t samples = 44100, channels = 1;
    AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_SAMPLE_RATE, &samples);
    AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_CHANNEL_COUNT, &channels);
    audioplayer_initPlayback((uint32_t) samples, (uint32_t) channels);

    bool hasInput = true, hasOutput = true;
    int32_t beginTimestamp = -1, lastTimestamp = 0;
    uint16_t lastSeqNum = 0;
    while (hasInput && isRunning) {
        BeginDataAccess();
        if (GotoFirstSourceWithData()) {
            do {
                RTPPacket *pack;
                while ((pack = GetNextPacket()) != NULL) {
                    // We repurposed the marker flag as end of file
                    hasInput = !pack->HasMarker();

                    // Calculate playback time and do some lost package corrections
                    uint32_t timestamp = pack->GetTimestamp();
                    if (beginTimestamp == -1) {// record first timestamp and use differences
                        beginTimestamp = timestamp;
                        lastSeqNum = pack->GetSequenceNumber() - (uint16_t) 1;
                    }
                    timestamp -= beginTimestamp;
                    if (pack->HasExtension()
                        && pack->GetExtensionID() == AUDIOSYNC_EXTENSION_HEADER_ID
                        && pack->GetExtensionLength() == sizeof(int64_t)) {
                        int64_t *usec = (int64_t*) pack->GetExtensionData();
                        audioplayer_syncPlayback(ntohq(*usec), timestamp);
                    }

                    /*if (pack->HasExtension()) {
                        debugLog("Ext: %" PRIu16 " %lld", pack->GetExtensionID(), (long long)pack->GetExtensionLength());
                    }*/

                    // Handle lost packets, TODO How does this work with multiple senders?
                    if (pack->GetSequenceNumber() != lastSeqNum + 1) {
                        // TODO handle mutliple packages with same timestamp. (Decode together?)
                        /*if (timestamp == lastTimestamp)*/
                        log("Packets jumped %u => %u | %.2f => %.2fs.", lastSeqNum,
                            pack->GetSequenceNumber(), lastTimestamp / 1E6,
                            timestamp / 1E6);
                        // TODO evaluate the impact of this time gap parameter
                        if (timestamp - lastTimestamp > SECOND_MICRO/20) {//50 ms
                            // According to the docs we need to flushIf data is not adjacent.
                            // It is unclear how big these gaps can be and still be tolerable.
                            // During testing this call did cause the codec
                            // to throw errors. most likely in combination with splitted packages,
                            // where one of a a set of packages with the same timestamp got lost
                            log("Flushing codec");
                            AMediaCodec_flush(codec);
                        }
                    }
                    lastSeqNum = pack->GetSequenceNumber();
                    lastTimestamp = timestamp;

                    if (hasInput) {
                        //log("Received %.2f", timestamp / 1000000.0);
                        uint8_t *payload = pack->GetPayloadData();
                        size_t length = pack->GetPayloadLength();
                        status = decoder_enqueueBuffer(codec, payload, length, (int64_t) timestamp);
                        if (status != AMEDIA_OK) hasInput = false;
                    } else {
                        log("Receiver: End of file");
                        // Tell the codec we are done
                        decoder_enqueueBuffer(codec, NULL, -1, (int64_t) timestamp);
                    }
                    hasOutput = decoder_dequeueBuffer(codec, &audioplayer_enqueuePCMFrames);

                    DeletePacket(pack);
                }
            } while (GotoNextSourceWithData());
        }
        EndDataAccess();

        struct timespec req;
        req.tv_sec = 0;
        req.tv_nsec = 1000*1000;
        audioplayer_monitorPlayback();
        // We should give other threads the opportunity to run
        nanosleep(&req, NULL);// TODO base time on duration of received audio?
        audioplayer_monitorPlayback();
    }
    log("Received all data, ending RTP session.");
    BYEDestroy(RTPTime(1, 0), 0, 0);

    while (hasOutput && status == AMEDIA_OK && isRunning) {
        hasOutput = decoder_dequeueBuffer(codec, &audioplayer_enqueuePCMFrames);
        RTPTime::Wait(RTPTime(0, 5000));
    }
    AMediaCodec_stop(codec);
    log("Finished decoding");

    while(isRunning) {
        audioplayer_monitorPlayback();
        RTPTime::Wait(RTPTime(0, 50000));// 10ms
    }
    audioplayer_stopPlayback();
}
Beispiel #19
0
TEST(RTPPacketTest, test_rtp_resizer_upsize)
{
    u_char pkt1[] = {
        0x80, 0x12, 0x00, 0xb5, 0x00, 0x2c,
        0xcb, 0x6c, 0x00, 0x00, 0x3a, 0x87, 0x22, 0xb3,
        0x40, 0x77, 0x02, 0x6d, 0x21, 0x37, 0xc3, 0x82,
        0x26, 0xda, 0x7f, 0xe4, 0xe8, 0x58, 0xd6, 0xa2,
        0x3c, 0x5a
    };

    u_char pkt2[] = {
        0x80, 0x12, 0x00, 0xb6, 0x00, 0x2c,
        0xcc, 0x0c, 0x00, 0x00, 0x3a, 0x87, 0x22, 0xb3,
        0x40, 0x6d, 0x2a, 0xcc, 0xa1, 0x36, 0xfb, 0xba,
        0x8d, 0xb2, 0x7f, 0xed, 0x41, 0xab, 0x1b, 0x1b,
        0xca, 0x5a
    };

    u_char pkt3[] = {
        0x80, 0x12, 0x00, 0xb7, 0x00, 0x2c,
        0xcc, 0xac, 0x00, 0x00, 0x3a, 0x87, 0x0d, 0x64,
        0x40, 0x76, 0x22, 0x64, 0x61, 0x27, 0xc3, 0x8c,
        0x49, 0x72, 0xfb, 0x84, 0xc0, 0x7a, 0x48, 0xb5,
        0xce, 0x58
    };

    u_char pkt4[] = {
        0x80, 0x12, 0x00, 0xb8, 0x00, 0x2c,
        0xcd, 0x4c, 0x00, 0x00, 0x3a, 0x87, 0x22, 0xdb,
        0xc0, 0x76, 0x14, 0x5d, 0x61, 0x63, 0xc5, 0xcc,
        0x05, 0xba, 0xff, 0xe4, 0xc6, 0x6f, 0xd7, 0xda,
        0x0b, 0x7e
    };

    RTPPacket p1, p2, p3, p4;
    ASSERT_TRUE(p1.parse(pkt1, 32));
    ASSERT_TRUE(p2.parse(pkt2, 32));
    ASSERT_TRUE(p3.parse(pkt3, 32));
    ASSERT_TRUE(p4.parse(pkt4, 32));

    RTPResizingQueue q(18, 80, 10, 10, 80);
    ASSERT_EQ(q.getTargetSize(), 80);
    ASSERT_EQ(q.getTargetClockRate(), 160 * 4);

    RTPPacket resized;
    ASSERT_TRUE(q.enqueue(p1));
    ASSERT_FALSE(q.dequeue(resized));
    ASSERT_TRUE(q.enqueue(p2));
    ASSERT_FALSE(q.dequeue(resized));
    ASSERT_TRUE(q.enqueue(p3));
    ASSERT_FALSE(q.dequeue(resized));
    ASSERT_TRUE(q.enqueue(p4));
    ASSERT_TRUE(q.dequeue(resized));

    ASSERT_EQ(resized.getPayloadSize(), 80);

    u_char d0[80];
    u_char d1[20];
    u_char d2[20];
    u_char d3[20];
    u_char d4[20];

    unsigned int len;
    resized.getPayload(d0, len);
    p1.getPayload(d1, len);
    p2.getPayload(d2, len);
    p3.getPayload(d3, len);
    p4.getPayload(d4, len);


    int j = 0;
    for (int i = 0; i < 20; i++)
    {
        ASSERT_EQ(d0[j], d1[i]);
        j++;
    }

    for (int i = 0; i < 20; i++)
    {
        ASSERT_EQ(d0[j], d2[i]);
        j++;
    }

    for (int i = 0; i < 20; i++)
    {
        ASSERT_EQ(d0[j], d3[i]);
        j++;
    }

    for (int i = 0; i < 20; i++)
    {
        ASSERT_EQ(d0[j], d4[i]);
        j++;
    }
}