コード例 #1
0
ファイル: mediabridgesession.cpp プロジェクト: crubia/wt
void MediaBridgeSession::onMetaData(DWORD id,RTMPMetaData *publishedMetaData)
{
	//Cheeck
	if (!sendingText)
		//Exit
		return;

	//Get name
	AMFString* name = (AMFString*)publishedMetaData->GetParams(0);

	//Check it the send command text
	if (name->GetWString().compare(L"onText")==0)
	{
		RTPPacket packet(MediaFrame::Text,TextCodec::T140);

		//Get string
		AMFString *str = (AMFString*)publishedMetaData->GetParams(1);

		//Log
		Log("Sending t140 data [\"%ls\"]\n",str->GetWChar());

		//Set data
		packet.SetPayload((BYTE*)str->GetWChar(),str->GetUTF8Size());

		//Set timestamp
		packet.SetTimestamp(getDifTime(&first)/1000);

		//No mark
		packet.SetMark(false);

		//Send it
		rtpText.SendPacket(packet);
	}
}
コード例 #2
0
ファイル: tools.cpp プロジェクト: abishekk92/voipmonitor
int getUpdDifTime(struct timeval *before)
{
	int dif = getDifTime(before);

	gettimeofday(before,0);

	return dif;
}
コード例 #3
0
void RTMPConnection::SendControlMessage(RTMPMessage::Type type,RTMPObject* msg)
{
	//Get timestamp
	QWORD ts = getDifTime(&startTime)/1000;
	Log("-SendControlMessage [%d]\n",(BYTE)type);
	//Append message to control stream
	chunkOutputStreams[2]->SendMessage(new RTMPMessage(0,ts,type,msg));
	//We have new data to send
	SignalWriteNeeded();
}
コード例 #4
0
void RTMPConnection::SendCommandResponse(DWORD streamId,const wchar_t* name,QWORD transId,AMFData* params,AMFData *extra)
{
	Log("-SendCommandResponse [streamId:%d,name:%ls,transId:%ld]\n",streamId,name,transId);
	//Create cmd response
	RTMPCommandMessage *cmd = new RTMPCommandMessage(name,transId,params,extra);
	//Dump
	cmd->Dump();
	//Get timestamp
	QWORD ts = getDifTime(&startTime)/1000;
	//Append message to command stream
	chunkOutputStreams[3]->SendMessage(new RTMPMessage(streamId,ts,cmd));
	//We have new data to send
	SignalWriteNeeded();
}
コード例 #5
0
void RTMPConnection::OnPlayedMetaData(DWORD streamId,RTMPMetaData *meta)
{
	//Get the timestamp of the metadata
	QWORD ts = meta->GetTimestamp();

	//Check timestamp
	if (ts==-1)
		//Calculate timestamp based on current time
		ts = getDifTime(&startTime)/1000;

	//Append to the comand trunk	
	chunkOutputStreams[3]->SendMessage(new RTMPMessage(streamId,ts,meta->Clone()));
	//Signal frames
	SignalWriteNeeded();
}
コード例 #6
0
int PipeTextInput::WriteText(const wchar_t *data,DWORD size)
{
	//Bloqueamos
	pthread_mutex_lock(&mutex);

	//Si estamos reproduciendo
	if (inited)
	{
		//Pop new frame
		frames.push_back(new TextFrame(getDifTime(&first)/1000,data,size));

		//Signal write
		pthread_cond_signal(&cond);
	} else
		Log("Not inited\n");

	//Desbloqueamos
	pthread_mutex_unlock(&mutex);

	//Salimos
	return true;
}
コード例 #7
0
void RTMPConnection::OnPlayedFrame(DWORD streamId,RTMPMediaFrame *frame)
{
	//Get the timestamp from the frame
	QWORD ts = frame->GetTimestamp();
	
	//Check timestamp
	if (ts==-1)
		//Calculate timestamp based on current time
		ts = getDifTime(&startTime)/1000;

	//Dependign on the streams
	switch(frame->GetType())
	{
		case RTMPMediaFrame::Audio:
			//Append to the audio trunk	
			chunkOutputStreams[4]->SendMessage(new RTMPMessage(streamId,ts,frame->Clone()));
			break;	
		case RTMPMediaFrame::Video:
			chunkOutputStreams[5]->SendMessage(new RTMPMessage(streamId,ts,frame->Clone()));
			break;
	}
	//Signal frames
	SignalWriteNeeded();
}
コード例 #8
0
ファイル: mediabridgesession.cpp プロジェクト: crubia/wt
/****************************************
* RecVideo
*	Obtiene los packetes y los muestra
*****************************************/
int MediaBridgeSession::RecVideo()
{
	//Coders
	VideoDecoder* decoder = NULL;
	VideoEncoder* encoder = VideoCodecFactory::CreateEncoder(VideoCodec::SORENSON);
	//Create new video frame
	RTMPVideoFrame  frame(0,262143);
	//Set codec
	frame.SetVideoCodec(RTMPVideoFrame::FLV1);

	int 	width=0;
	int 	height=0;
	DWORD	numpixels=0;

	Log(">RecVideo\n");

	//Mientras tengamos que capturar
	while(receivingVideo)
	{
		///Obtenemos el paquete
		RTPPacket* packet = rtpVideo.GetPacket();

		//Check
		if (!packet)
			//Next
			continue;

		//Get type
		VideoCodec::Type type = (VideoCodec::Type)packet->GetCodec();


		if ((decoder==NULL) || (type!=decoder->type))
		{
			//Si habia uno nos lo cargamos
			if (decoder!=NULL)
				delete decoder;

			//Creamos uno dependiendo del tipo
			decoder = VideoCodecFactory::CreateDecoder(type);

			//Check
			if (!decoder)
			{
				delete(packet);
				continue;
			}
		}

		//Lo decodificamos
		if(!decoder->DecodePacket(packet->GetMediaData(),packet->GetMediaLength(),0,packet->GetMark()))
		{
			delete(packet);
			continue;
		}
		//Get mark
		bool mark = packet->GetMark();

		//Delete packet
		delete(packet);

		//Check if it is last one
		if(!mark)
			continue;

		//Check size
		if (decoder->GetWidth()!=width || decoder->GetHeight()!=height)
		{
			//Get dimension
			width = decoder->GetWidth();
			height = decoder->GetHeight();

			//Set size
			numpixels = width*height*3/2;

			//Set also frame rate and bps
			encoder->SetFrameRate(25,300,500);

			//Set them in the encoder
			encoder->SetSize(width,height);
		}

		//Encode next frame
		VideoFrame *encoded = encoder->EncodeFrame(decoder->GetFrame(),numpixels);

		//Check
		if (!encoded)
			break;

		//Check size
		if (frame.GetMaxMediaSize()<encoded->GetLength())
			//Not enougth space
			return Error("Not enought space to copy FLV encodec frame [frame:%d,encoded:%d",frame.GetMaxMediaSize(),encoded->GetLength());

		//Get full frame
		frame.SetVideoFrame(encoded->GetData(),encoded->GetLength());

		//Set buffer size
		frame.SetMediaSize(encoded->GetLength());

		//Check type
		if (encoded->IsIntra())
			//Set type
			frame.SetFrameType(RTMPVideoFrame::INTRA);
		else
			//Set type
			frame.SetFrameType(RTMPVideoFrame::INTER);

		//Let the connection set the timestamp
		frame.SetTimestamp(getDifTime(&first)/1000);

		//Send it
		SendMediaFrame(&frame);
	}

	//Check
	if (decoder)
		//Delete
		delete(decoder);
	//Check
	if (encoder)
		//Delete
		delete(encoder);

	Log("<RecVideo\n");
}
コード例 #9
0
ファイル: flvencoder.cpp プロジェクト: tidehc/media-server-1
int FLVEncoder::EncodeVideo()
{
	timeval prev;

	//Start
	Log(">FLVEncoder  encode video\n");

	//Allocate media frame
	RTMPVideoFrame frame(0,262143);

	//Check codec
	switch(videoCodec)
	{
		case VideoCodec::SORENSON:
			//Ser Video codec
			frame.SetVideoCodec(RTMPVideoFrame::FLV1);
			break;
		case VideoCodec::H264:
			//Ser Video codec
			frame.SetVideoCodec(RTMPVideoFrame::AVC);
			//Set NAL type
			frame.SetAVCType(RTMPVideoFrame::AVCNALU);
			//No delay
			frame.SetAVCTS(0);
			break;
		default:
			return Error("-Wrong codec type %d\n",videoCodec);
	}
	
	//Create the encoder
	VideoEncoder *encoder = VideoCodecFactory::CreateEncoder(videoCodec,videoProperties);

	///Set frame rate
	encoder->SetFrameRate(fps,bitrate,intra);

	//Set dimensions
	encoder->SetSize(width,height);

	//Start capturing
	videoInput->StartVideoCapture(width,height,fps);

	//The time of the first one
	gettimeofday(&prev,NULL);

	//No wait for first
	DWORD frameTime = 0;

	Log(">FLVEncoder encode vide\n");

	//Mientras tengamos que capturar
	while(encodingVideo)
	{
		//Nos quedamos con el puntero antes de que lo cambien
		BYTE* pic=videoInput->GrabFrame(frameTime);
		
		//Ensure we are still encoding
		if (!encodingVideo)
			break;

		//Check pic
		if (!pic)
			continue;

		//Check if we need to send intra
		if (sendFPU)
		{
			//Set it
			encoder->FastPictureUpdate();
			//Do not send anymore
			sendFPU = false;
		}

		//Encode next frame
		VideoFrame *encoded = encoder->EncodeFrame(pic,videoInput->GetBufferSize());

		//Check
		if (!encoded)
			break;

		//Check size
		if (frame.GetMaxMediaSize()<encoded->GetLength())
		{
			//Not enougth space
			Error("Not enought space to copy FLV encodec frame [frame:%d,encoded:%d",frame.GetMaxMediaSize(),encoded->GetLength());
			//NExt
			continue;
		}

		//Check
		if (frameTime)
		{
			timespec ts;
			//Lock
			pthread_mutex_lock(&mutex);
			//Calculate timeout
			calcAbsTimeout(&ts,&prev,frameTime);
			//Wait next or stopped
			int canceled  = !pthread_cond_timedwait(&cond,&mutex,&ts);
			//Unlock
			pthread_mutex_unlock(&mutex);
			//Check if we have been canceled
			if (canceled)
				//Exit
				break;
		}
		//Set sending time of previous frame
		getUpdDifTime(&prev);

		//Set timestamp
		encoded->SetTimestamp(getDifTime(&first)/1000);

		//Set next one
		frameTime = 1000/fps;

		//Set duration
		encoded->SetDuration(frameTime);
		
		//Get full frame
		frame.SetVideoFrame(encoded->GetData(),encoded->GetLength());

		//Set buffer size
		frame.SetMediaSize(encoded->GetLength());

		//Check type
		if (encoded->IsIntra())
			//Set type
			frame.SetFrameType(RTMPVideoFrame::INTRA);
		else
			//Set type
			frame.SetFrameType(RTMPVideoFrame::INTER);

	
		//If we need desc but yet not have it
		if (!frameDesc && encoded->IsIntra() && videoCodec==VideoCodec::H264)
		{
			//Create new description
			AVCDescriptor desc;
			//Set values
			desc.SetConfigurationVersion(1);
			desc.SetAVCProfileIndication(0x42);
			desc.SetProfileCompatibility(0x80);
			desc.SetAVCLevelIndication(0x0C);
			desc.SetNALUnitLength(3);
			//Get encoded data
			BYTE *data = encoded->GetData();
			//Get size
			DWORD size = encoded->GetLength();
			//get from frame
			desc.AddParametersFromFrame(data,size);
			//Crete desc frame
			frameDesc = new RTMPVideoFrame(getDifTime(&first)/1000,desc);
			//Lock
			pthread_mutex_lock(&mutex);
			//Send it
			SendMediaFrame(frameDesc);
			//unlock
			pthread_mutex_unlock(&mutex);
		}
		
		//Lock
		pthread_mutex_lock(&mutex);
		//Set timestamp
		frame.SetTimestamp(encoded->GetTimeStamp());
		//Publish it
		SendMediaFrame(&frame);
		//For each listener
		for(MediaFrameListeners::iterator it = mediaListeners.begin(); it!=mediaListeners.end(); ++it)
			//Send it
			(*it)->onMediaFrame(RTMPMediaStream::id,*encoded);
		//unlock
		pthread_mutex_unlock(&mutex);
	}
	Log("-FLVEncoder encode video end of loop\n");

	//Stop the capture
	videoInput->StopVideoCapture();

	//Check
	if (encoder)
		//Exit
		delete(encoder);
	Log("<FLVEncoder encode vide\n");
	
	//Exit
	return 1;
}
コード例 #10
0
ファイル: flvencoder.cpp プロジェクト: tidehc/media-server-1
/*******************************************
* Encode
*	Capturamos el audio y lo mandamos
*******************************************/
int FLVEncoder::EncodeAudio()
{
	RTMPAudioFrame	audio(0,4096);

	//Start
	Log(">Encode Audio\n");

	//Fill frame preperties
	switch(audioCodec)
	{
		case AudioCodec::SPEEX16:
			//Set RTMP data
			audio.SetAudioCodec(RTMPAudioFrame::SPEEX);
			audio.SetSoundRate(RTMPAudioFrame::RATE11khz);
			audio.SetSamples16Bits(1);
			audio.SetStereo(0);
			break;
		case AudioCodec::NELLY8:
			//Set RTMP data
			audio.SetAudioCodec(RTMPAudioFrame::NELLY8khz);
			audio.SetSoundRate(RTMPAudioFrame::RATE11khz);
			audio.SetSamples16Bits(1);
			audio.SetStereo(0);
			break;
		case AudioCodec::NELLY11:
			//Set RTMP data
			audio.SetAudioCodec(RTMPAudioFrame::NELLY);
			audio.SetSoundRate(RTMPAudioFrame::RATE11khz);
			audio.SetSamples16Bits(1);
			audio.SetStereo(0);
			break;
		case AudioCodec::AAC:
			//Set RTMP data
			// If the SoundFormat indicates AAC, the SoundType should be 1 (stereo) and the SoundRate should be 3 (44 kHz).
			// However, this does not mean that AAC audio in FLV is always stereo, 44 kHz data.
			// Instead, the Flash Player ignores these values and extracts the channel and sample rate data is encoded in the AAC bit stream.
			audio.SetAudioCodec(RTMPAudioFrame::AAC);
			audio.SetSoundRate(RTMPAudioFrame::RATE44khz);
			audio.SetSamples16Bits(1);
			audio.SetStereo(1);
			audio.SetAACPacketType(RTMPAudioFrame::AACRaw);
			break;
		default:
			return Error("-Codec %s not supported\n",AudioCodec::GetNameFor(audioCodec));
	}
	
	//Create encoder
	AudioEncoder *encoder = AudioCodecFactory::CreateEncoder(audioCodec,audioProperties);

	//Check
	if (!encoder)
		//Error
		return Error("Error opening encoder");
	
	//Try to set native rate
	DWORD rate = encoder->TrySetRate(audioInput->GetNativeRate());

	//Create audio frame
	AudioFrame frame(audioCodec,rate);

	//Start recording
	audioInput->StartRecording(rate);

	//Get first
	QWORD ini = 0;

	//Num of samples since ini
	QWORD samples = 0;

	//Allocate samlpes
	SWORD* recBuffer = (SWORD*) malloc(encoder->numFrameSamples*sizeof(SWORD));

	//Check codec
	if (audioCodec==AudioCodec::AAC)
	{
		//Create AAC config frame
		aacSpecificConfig = new RTMPAudioFrame(0,AACSpecificConfig(rate,1));

		//Lock
		pthread_mutex_lock(&mutex);
		//Send audio desc
		SendMediaFrame(aacSpecificConfig);
		//unlock
		pthread_mutex_unlock(&mutex);
	}

	//Mientras tengamos que capturar
	while(encodingAudio)
	{
		//Check clock drift, do not allow to exceed 4 frames
		if (ini+(samples+encoder->numFrameSamples*4)*1000/encoder->GetClockRate()<getDifTime(&first)/1000)
		{
			Log("-RTMPParticipant clock drift, dropping audio and reseting init time\n");
			//Clear buffer
			audioInput->ClearBuffer();
			//Reser timestam
			ini = getDifTime(&first)/1000;
			//And samples
			samples = 0;
		}
		
		//Capturamos
		DWORD  recLen = audioInput->RecBuffer(recBuffer,encoder->numFrameSamples);
		//Check len
		if (!recLen)
		{
			//Log
			Debug("-cont\n");
			//Reser timestam
			ini = getDifTime(&first)/1000;
			//And samples
			samples = 0;
			//Skip
			continue;
		}
		
		//Rencode it
		DWORD len;

		while((len=encoder->Encode(recBuffer,recLen,audio.GetMediaData(),audio.GetMaxMediaSize()))>0)
		{
			//REset
			recLen = 0;

			//Set length
			audio.SetMediaSize(len);

			//Check if it is first frame
			if (!ini)
				//Get initial timestamp
				ini = getDifTime(&first)/1000;

			//Set timestamp
			audio.SetTimestamp(ini+samples*1000/encoder->GetClockRate());

			//Increase samples
			samples += encoder->numFrameSamples;

			//Copy to rtp frame
			frame.SetMedia(audio.GetMediaData(),audio.GetMediaSize());
			//Set frame time
			frame.SetTimestamp(audio.GetTimestamp());
			//Set frame duration
			frame.SetDuration(encoder->numFrameSamples);
			//Clear rtp
			frame.ClearRTPPacketizationInfo();
			//Add rtp packet
			frame.AddRtpPacket(0,len,NULL,0);
			
			//Lock
			pthread_mutex_lock(&mutex);
			//Send audio
			SendMediaFrame(&audio);
			//For each listener
			for(MediaFrameListeners::iterator it = mediaListeners.begin(); it!=mediaListeners.end(); ++it)
				//Send it
				(*it)->onMediaFrame(RTMPMediaStream::id,frame);
			//unlock
			pthread_mutex_unlock(&mutex);
		}
	}

	//Stop recording
	audioInput->StopRecording();

	//Delete buffer
	if (recBuffer)
		//Delete
		free(recBuffer);

	//Check codec
	if (encoder)
		//Borramos el codec
		delete(encoder);
	
	//Salimos
        Log("<Encode Audio\n");
}
コード例 #11
0
ファイル: videostream.cpp プロジェクト: tidehc/media-server-1
/****************************************
* RecVideo
*	Obtiene los packetes y los muestra
*****************************************/
int VideoStream::RecVideo()
{
	VideoDecoder*	videoDecoder = NULL;
	VideoCodec::Type type;
	timeval 	before;
	timeval		lastFPURequest;
	DWORD		lostCount=0;
	DWORD		frameTime = (DWORD)-1;
	DWORD		lastSeq = RTPPacket::MaxExtSeqNum;
	bool		waitIntra = false;
	
	
	Log(">RecVideo\n");
	
	//Get now
	gettimeofday(&before,NULL);

	//Not sent FPU yet
	setZeroTime(&lastFPURequest);

	//Mientras tengamos que capturar
	while(receivingVideo)
	{
		//Get RTP packet
		RTPPacket* packet = rtp.GetPacket();

		//Check
		if (!packet)
			//Next
			continue;
		
		//Get extended sequence number and timestamp
		DWORD seq = packet->GetExtSeqNum();
		DWORD ts = packet->GetTimestamp();

		//Get packet data
		BYTE* buffer = packet->GetMediaData();
		DWORD size = packet->GetMediaLength();

		//Get type
		type = (VideoCodec::Type)packet->GetCodec();

		//Lost packets since last
		DWORD lost = 0;

		//If not first
		if (lastSeq!=RTPPacket::MaxExtSeqNum)
			//Calculate losts
			lost = seq-lastSeq-1;

		//Increase total lost count
		lostCount += lost;

		//Update last sequence number
		lastSeq = seq;

		//If lost some packets or still have not got an iframe
		if(lostCount || waitIntra)
		{
			//Check if we got listener and more than 1/2 second have elapsed from last request
			if (listener && getDifTime(&lastFPURequest)>minFPUPeriod)
			{
				//Debug
				Debug("-Requesting FPU lost %d\n",lostCount);
				//Reset count
				lostCount = 0;
				//Request it
				listener->onRequestFPU();
				//Request also over rtp
				rtp.RequestFPU();
				//Update time
				getUpdDifTime(&lastFPURequest);
				//Waiting for refresh
				waitIntra = true;
			}
		}

		//Check if it is a redundant packet
		if (type==VideoCodec::RED)
		{
			//Get redundant packet
			RTPRedundantPacket* red = (RTPRedundantPacket*)packet;
			//Get primary codec
			type = (VideoCodec::Type)red->GetPrimaryCodec();
			//Check it is not ULPFEC redundant packet
			if (type==VideoCodec::ULPFEC)
			{
				//Delete packet
				delete(packet);
				//Skip
				continue;
			}
			//Update primary redundant payload
			buffer = red->GetPrimaryPayloadData();
			size = red->GetPrimaryPayloadSize();
		}
		
		//Check codecs
		if ((videoDecoder==NULL) || (type!=videoDecoder->type))
		{
			//If we already got one
			if (videoDecoder!=NULL)
				//Delete it
				delete videoDecoder;

			//Create video decorder for codec
			videoDecoder = VideoCodecFactory::CreateDecoder(type);

			//Check
			if (videoDecoder==NULL)
			{
				Error("Error creando nuevo decodificador de video [%d]\n",type);
				//Delete packet
				delete(packet);
				//Next
				continue;
			}
		}

		//Check if we have lost the last packet from the previous frame by comparing both timestamps
		if (ts>frameTime)
		{
			Debug("-lost mark packet ts:%u frameTime:%u\n",ts,frameTime);
			//Try to decode what is in the buffer
			videoDecoder->DecodePacket(NULL,0,1,1);
			//Get picture
			BYTE *frame = videoDecoder->GetFrame();
			DWORD width = videoDecoder->GetWidth();
			DWORD height = videoDecoder->GetHeight();
			//Check values
			if (frame && width && height)
			{
				//Set frame size
				videoOutput->SetVideoSize(width,height);

				//Check if muted
				if (!muted)
					//Send it
					videoOutput->NextFrame(frame);
			}
		}
		
		//Update frame time
		frameTime = ts;
		
		//Decode packet
		if(!videoDecoder->DecodePacket(buffer,size,lost,packet->GetMark()))
		{
			//Check if we got listener and more than 1/2 seconds have elapsed from last request
			if (listener && getDifTime(&lastFPURequest)>minFPUPeriod)
			{
				//Debug
				Log("-Requesting FPU decoder error\n");
				//Reset count
				lostCount = 0;
				//Request it
				listener->onRequestFPU();
				//Request also over rtp
				rtp.RequestFPU();
				//Update time
				getUpdDifTime(&lastFPURequest);
				//Waiting for refresh
				waitIntra = true;
			}
		}

		//Check if it is the last packet of a frame
		if(packet->GetMark())
		{
			if (videoDecoder->IsKeyFrame())
				Debug("-Got Intra\n");
			
			//No frame time yet for next frame
			frameTime = (DWORD)-1;

			//Get picture
			BYTE *frame = videoDecoder->GetFrame();
			DWORD width = videoDecoder->GetWidth();
			DWORD height = videoDecoder->GetHeight();
			//Check values
			if (frame && width && height)
			{
				//Set frame size
				videoOutput->SetVideoSize(width,height);
				
				//Check if muted
				if (!muted)
					//Send it
					videoOutput->NextFrame(frame);
			}
			//Check if we got the waiting refresh
			if (waitIntra && videoDecoder->IsKeyFrame())
				//Do not wait anymore
				waitIntra = false;
		}
		//Delete packet
		delete(packet);
	}

	//Delete encoder
	delete videoDecoder;

	Log("<RecVideo\n");
}
コード例 #12
0
void H264FrameSource::doGetNextFrame()
{  
        // 根据 fps, 计算等待时间  
        double delay = 1000.0 / videoFPS ;
        int to_delay = delay * 1000;    // us  
  
        if(!m_videoInput)
		return;

	BYTE *pic = m_videoInput->GrabFrame();

	//Check picture
	if (!pic) {
		fFrameSize = 0;
		m_started = 0; 
		return;
	}

	//Check if we need to send intra
	if (sendFPU)
	{
		videoEncoder->FastPictureUpdate();
	}

	//if (fPresentationTime.tv_sec == 0 && fPresentationTime.tv_usec == 0) {
      		// This is the first frame, so use the current time:
      		
	//} else {
		// Increment by the play time of the previous data:
	//	unsigned uSeconds	= fPresentationTime.tv_usec + fLastPlayTime;
	//	fPresentationTime.tv_sec += uSeconds/1000000;
	//	fPresentationTime.tv_usec = uSeconds%1000000;
	//}
	
	// Remember the play time of this data:
	//fLastPlayTime = (fPlayTimePerFrame*fFrameSize)/fPreferredFrameSize;
	//fDurationInMicroseconds = fLastPlayTime;
	//fDurationInMicroseconds = 1000.0 / videoFPS;

	VideoFrame *videoFrame = videoEncoder->EncodeFrame(pic,m_videoInput->GetBufferSize());
	
	//If was failed
	if (!videoFrame){
		//Next
		fFrameSize = 0;
		m_started = 0;
		Log("-----Error encoding video\n");
        	double delay = 1000.0 / videoFPS;
        	int to_delay = delay * 1000;    // us  
        	nextTask() = envir().taskScheduler().scheduleDelayedTask(to_delay,
                (TaskFunc*)FramedSource::afterGetting, this); 
		return;
	}
	
	if(sendFPU)
		sendFPU = false;

	//Set frame timestamp
	videoFrame->SetTimestamp(getDifTime(&first)/1000);

	//Set sending time of previous frame
	//getUpdDifTime(&prev);

	//gettimeofday(&fPresentationTime, 0);

	fFrameSize = videoFrame->GetLength();

	memmove(fTo, videoFrame->GetData(), fFrameSize);

	if (fFrameSize > fMaxSize) {
		fNumTruncatedBytes = fFrameSize - fMaxSize;
		fFrameSize = fMaxSize;
	}
	else {
		fNumTruncatedBytes = 0;
	}
	
	gettimeofday(&fPresentationTime, NULL);

	//to_delay = ((1000 / videoFPS) * fFrameSize / RTPPAYLOADSIZE) * 1000;    // us  

        nextTask() = envir().taskScheduler().scheduleDelayedTask(to_delay,
				(TaskFunc*)FramedSource::afterGetting, this);
	
}
コード例 #13
0
/***********************
 * ParseData
 * 	Process incomming data
 **********************/
void RTMPConnection::ParseData(BYTE *data,const DWORD size)
{
	RTMPChunkInputStreams::iterator it;
	int len = 0;
	int digesOffsetMethod = 0;

	//Get pointer and data size
	BYTE *buffer = data;
	DWORD bufferSize = size;
	DWORD digestPosServer = 0;

	//Increase current window
	curWindowSize += size;
	//And total size
	recvSize += size;
	
	//Check current window
	if (windowSize && curWindowSize>windowSize)
	{
		//Send
		SendControlMessage(RTMPMessage::Acknowledgement,RTMPAcknowledgement::Create(recvSize));
		//Reset window
		curWindowSize = 0;
	}

	//While there is data
	while(bufferSize>0)
	{
		//Check connection state
		switch(state)
		{
			case HEADER_C0_WAIT:
				//Parse c0
				len = c0.Parse(buffer,bufferSize);
				//Move
				buffer+=len;
				bufferSize-=len;
				//If it is parsed
				if (c0.IsParsed())
				{
					//Move to next state
					state = HEADER_C1_WAIT;
					//Debug
					Log("Received c0 version: %d\n",c0.GetRTMPVersion());
				}
				break;
			case HEADER_C1_WAIT:
				//Parse c1
				len = c1.Parse(buffer,bufferSize);
				//Move
				buffer+=len;
				bufferSize-=len;
				//If it is parsed
				if (c1.IsParsed())
				{
					Log("-Received C1 client version [%d,%d,%d,%d]\n",c1.GetVersion()[0],c1.GetVersion()[1],c1.GetVersion()[2],c1.GetVersion()[3]);
					//Set s0 data
					s01.SetRTMPVersion(3);
					//Set current timestamp
					s01.SetTime(getDifTime(&startTime)/1000);
					//Check which version are we using
					if (c1.GetVersion()[3])
					{
						//Verify client
						digesOffsetMethod = VerifyC1Data(c1.GetData(),c1.GetSize());
						//Set version
						s01.SetVersion(3,5,1,1);
						//Check if found diggest ofset
						digest = (digesOffsetMethod>0);
					} else {
						//Seet version to zero
						s01.SetVersion(0,0,0,0);
						//Do not calculate digest
						digest = false;
					}
					//Set random data from memory
					BYTE* random = s01.GetRandom();
					//Fill it
					for (int i=0;i<s01.GetRandomSize();i++)
						//With random
						random[i] = rand();
					//If we have to calculate digest
					if (digest)
						//calculate digest for s1 only, skipping s0
						digestPosServer = GenerateS1Data(digesOffsetMethod,s01.GetData()+1,s01.GetSize()-1);
					//Send S01 data
					WriteData(s01.GetData(),s01.GetSize());	
					//Move to next state
					state = HEADER_C2_WAIT;
					//Debug
					Log("Sending s0 and s1 with digest %s offset method %d\n",digest?"on":"off",digesOffsetMethod);
				}
				break;
			case HEADER_C2_WAIT:
				//Parse c2
				len = c2.Parse(buffer,bufferSize);
				//Move
				buffer+=len;
				bufferSize-=len;
				//If it is parsed
				if (c2.IsParsed())
				{
					//Set s2 data
					s2.SetTime(c1.GetTime());
					//Set current timestamp
					s2.SetTime2(getDifTime(&startTime)/1000);
					//Echo c1 data
					s2.SetRandom(c1.GetRandom(),c1.GetRandomSize());
					//If we have to calculate digest
					if (digest)
						//calculate digest for s1
						GenerateS2Data(digesOffsetMethod,s2.GetData(),s2.GetSize());
					//Send S2 data
					WriteData(s2.GetData(),s2.GetSize());	
					//Move to next state
					state = CHUNK_HEADER_WAIT;
					//Debug
					Log("Received c2, sending c2. CONNECTED.\n");
				}
				break;
			case CHUNK_HEADER_WAIT:
				//Parse c2
				len = header.Parse(buffer,bufferSize);
				//Move
				buffer+=len;
				bufferSize-=len;
				//If it is parsed
				if (header.IsParsed())
				{
					//Clean all buffers
					type0.Reset();
					type1.Reset();
					type2.Reset();
					extts.Reset();
					//Move to next state
					state = CHUNK_TYPE_WAIT;
					//Debug
					//Log("Received header [fmt:%d,stream:%d]\n",header.GetFmt(),header.GetStreamId());
					//header.Dump();
				}
				break;
			case CHUNK_TYPE_WAIT:
				//Get sream id
				chunkStreamId = header.GetStreamId();
				//Find chunk stream
				it = chunkInputStreams.find(chunkStreamId);
				//Check if we have a new chunk stream or already got one
				if (it==chunkInputStreams.end())
				{
					//Log
					//Log("Creating new chunk stream [id:%d]\n",chunkStreamId);
					//Create it
					chunkInputStream = new RTMPChunkInputStream();
					//Append it
					chunkInputStreams[chunkStreamId] = chunkInputStream;	
				} else 
					//Set the stream
					chunkInputStream = it->second;
				//Switch type
				switch(header.GetFmt())
				{
					case 0:
						//Check if the buffer type has been parsed
						len = type0.Parse(buffer,bufferSize);
						//Check if it is parsed
						if (type0.IsParsed())
						{
							//Debug
							//Debug("Got type 0 header [timestamp:%lu,messagelength:%d,type:%d,streamId:%d]\n",type0.GetTimestamp(),type0.GetMessageLength(),type0.GetMessageTypeId(),type0.GetMessageStreamId());
							//type0.Dump();
							//Set data for stream
							chunkInputStream->SetMessageLength	(type0.GetMessageLength());
							chunkInputStream->SetMessageTypeId	(type0.GetMessageTypeId());
							chunkInputStream->SetMessageStreamId	(type0.GetMessageStreamId());
							//Check if we have extended timestamp
							if (type0.GetTimestamp()!=0xFFFFFF)
							{
								//Set timesptamp
								chunkInputStream->SetTimestamp(type0.GetTimestamp());
								//No timestamp delta
								chunkInputStream->SetTimestampDelta(0);
								//Move to next state
								state = CHUNK_DATA_WAIT;
							} else
								//We have to read 4 more bytes
								state = CHUNK_EXT_TIMESTAMP_WAIT;
							//Start data reception
							chunkInputStream->StartChunkData();
							//Reset sent bytes in buffer
							chunkLen = 0;
						}	
						break;
					case 1:
						//Check if the buffer type has been parsed
						len = type1.Parse(buffer,bufferSize);
						//Check if it is parsed
						if (type1.IsParsed())
						{
							//Debug
							//Debug("Got type 1 header [timestampDelta:%u,messagelength:%d,type:%d]\n",type1.GetTimestampDelta(),type1.GetMessageLength(),type1.GetMessageTypeId());
							//type1.Dump();
							//Set data for stream
							chunkInputStream->SetMessageLength(type1.GetMessageLength());
							chunkInputStream->SetMessageTypeId(type1.GetMessageTypeId());
							//Check if we have extended timestam
							if (type1.GetTimestampDelta()!=0xFFFFFF)
							{
								//Set timestamp delta
								chunkInputStream->SetTimestampDelta(type1.GetTimestampDelta());
								//Set timestamp
								chunkInputStream->IncreaseTimestampWithDelta();
								//Move to next state
								state = CHUNK_DATA_WAIT;
							} else
								//We have to read 4 more bytes
								state = CHUNK_EXT_TIMESTAMP_WAIT;
							//Start data reception
							chunkInputStream->StartChunkData();
							//Reset sent bytes in buffer
							chunkLen = 0;
						}	
						break;
					case 2:
						//Check if the buffer type has been parsed
						len = type2.Parse(buffer,bufferSize);
						//Check if it is parsed
						if (type2.IsParsed())
						{
							//Debug
							//Debug("Got type 2 header [timestampDelta:%lu]\n",type2.GetTimestampDelta());
							//type2.Dump();
							//Check if we have extended timestam
							if (type2.GetTimestampDelta()!=0xFFFFFF)
							{
								//Set timestamp delta
								chunkInputStream->SetTimestampDelta(type2.GetTimestampDelta());
								//Increase timestamp
								chunkInputStream->IncreaseTimestampWithDelta();
								//Move to next state
								state = CHUNK_DATA_WAIT;
							} else
								//We have to read 4 more bytes
								state = CHUNK_EXT_TIMESTAMP_WAIT;
							//Start data reception
							chunkInputStream->StartChunkData();
							//Reset sent bytes in buffer
							chunkLen = 0;
						}	
						break;
					case 3:
						//Debug("Got type 3 header\n");
						//No header chunck
						len = 0;
						//If it is the first chunk
						if (chunkInputStream->IsFirstChunk())
							//Increase timestamp with previous delta
							chunkInputStream->IncreaseTimestampWithDelta();
						//Start data reception
						chunkInputStream->StartChunkData();
						//Move to next state
						state = CHUNK_DATA_WAIT;
						//Reset sent bytes in buffer
						chunkLen = 0;
						break;
				}
				//Move pointer
				buffer += len;
				bufferSize -= len;
				break;
			case CHUNK_EXT_TIMESTAMP_WAIT:
				//Parse extended timestamp
				len = extts.Parse(buffer,bufferSize);
				//Move
				buffer+=len;
				bufferSize-=len;
				//If it is parsed
				if (extts.IsParsed())
				{
					//Check header type
					if (header.GetFmt()==1)
					{
						//Set the timestamp
						chunkInputStream->SetTimestamp(extts.GetTimestamp());
						//No timestamp delta
						chunkInputStream->SetTimestampDelta(0);
					} else {
						//Set timestamp delta
						chunkInputStream->SetTimestampDelta(extts.GetTimestamp());
						//Increase timestamp
						chunkInputStream->IncreaseTimestampWithDelta();
					}
					//Move to next state
					state = CHUNK_DATA_WAIT;
				}
				break;
			case CHUNK_DATA_WAIT:
				//Check max buffer size
				if (maxChunkSize && chunkLen+bufferSize>maxChunkSize)
					//Parse only max chunk size
					len = maxChunkSize-chunkLen;
				else
					//parse all data
					len = bufferSize;
				//Check size
				if (!len)
				{
					//Debug
					Error("Chunk data of size zero  [maxChunkSize:%d,chunkLen:%d]\n");
					//Skip
					break;
				}
				//Parse data
				len = chunkInputStream->Parse(buffer,len);
				//Check if it has parsed a msg
				if (chunkInputStream->IsParsed())
				{
					//Log("Got message [timestamp:%lu]\n",chunkInputStream->GetTimestamp());
					//Get message
					RTMPMessage* msg = chunkInputStream->GetMessage();
					//Get message stream
					DWORD messageStreamId = msg->GetStreamId();
					//Check message type
					if (msg->IsControlProtocolMessage())
					{
						//Get message type
						BYTE type = msg->GetType();
						//Get control protocl message
						RTMPObject* ctrl = msg->GetControlProtocolMessage();
						//Procces msg
						ProcessControlMessage(messageStreamId,type,ctrl);
					} else if (msg->IsCommandMessage()) {
						//Get Command message
						RTMPCommandMessage* cmd = msg->GetCommandMessage();
						//Dump msg
						cmd->Dump();
						//Proccess msg
						ProcessCommandMessage(messageStreamId,cmd);
					} else if (msg->IsMedia()) {
						//Get media frame
						RTMPMediaFrame* frame = msg->GetMediaFrame();
						//Check if we have it
						if (frame)
							//Process message
							ProcessMediaData(messageStreamId,frame);
					} else if (msg->IsMetaData() || msg->IsSharedObject()) {
						//Get object
						RTMPMetaData *meta = msg->GetMetaData();
						//Debug it
						meta->Dump();
						//Process meta data
						ProcessMetaData(messageStreamId,meta);
					} else {
						//UUh??
						Error("Unknown rtmp message, should never happen\n");
					}
					//Delete msg
					delete(msg);
					//Move to next chunck
					state = CHUNK_HEADER_WAIT;	
					//Clean header
					header.Reset();
				}
				//Increase buffer length
				chunkLen += len;
				//Move pointer
				buffer += len;
				bufferSize -= len;
				//Check max chunk size
				if (maxChunkSize && chunkLen>=maxChunkSize)
				{
					//Wait for next buffer header
					state = CHUNK_HEADER_WAIT;
					//Clean header
					header.Reset();
				}

				break;
		}
	}
}
コード例 #14
0
/***********************************
* MixAudio
*	Mezcla los audios
************************************/
int AudioMixer::MixAudio()
{
	timeval  tv;
	Audios::iterator	it;
	DWORD step = 10;
	QWORD curr = 0;
	QWORD prev = 0;

	//Logeamos
	Log(">MixAudio\n");

	//Init ts
	getUpdDifTime(&tv);

	//Mientras estemos mezclando
	while(mixingAudio)
	{
		//zero the mixer buffer
		memset(mixer_buffer, 0, MIXER_BUFFER_SIZE*sizeof(WORD));

		//Wait until next to process again minus process time
		msleep(step*1000-(getDifTime(&tv)-curr*1000));

		//Get new time
		curr = getDifTime(&tv)/1000;
		
		//Get time elapsed
		QWORD diff = curr-prev;

		//Update
		prev = curr;

		//Get num samples at 8Khz
		DWORD numSamples = diff*8;

		//Block list
		lstAudiosUse.WaitUnusedAndLock();

		//At most the maximum
		if (numSamples>MIXER_BUFFER_SIZE)
			//Set it at most (shoudl never happen)
			numSamples = MIXER_BUFFER_SIZE;

		//First pass: Iterate through the audio inputs and calculate the sum of all streams
		for(it = lstAudios.begin(); it != lstAudios.end(); it++)
		{
			//Get the source
			AudioSource *audio = it->second;

			//And the audio buffer
			WORD *buffer = audio->buffer;

			//Get the samples from the fifo
			audio->len = audio->output->GetSamples(buffer,numSamples);

			//Mix the audio
			for(int i = 0; i < audio->len; i++)
				//MIX
				mixer_buffer[i] += buffer[i];
		}

		// Second pass: Calculate this stream's output
		for(it = lstAudios.begin(); it != lstAudios.end(); it++)
		{
			//Get the source
			AudioSource *audio = it->second;

			//Check audio
			if (!audio)
				//Next
				continue;
			//And the audio buffer
			WORD *buffer = audio->buffer;

			//Calculate the result
			for(int i=0; i<audio->len; i++)
				//We don't want to hear our own signal
				buffer[i] = mixer_buffer[i] - buffer[i];

			//Check length
			if (audio->len<numSamples)
				//Copy the rest
				memcpy(((BYTE*)buffer)+audio->len*sizeof(WORD),((BYTE*)mixer_buffer)+audio->len*sizeof(WORD),(numSamples-audio->len)*sizeof(WORD));

			//PUt the output
			audio->input->PutSamples(buffer,numSamples);
		}

		//Unblock list
		lstAudiosUse.Unlock();
	}

	//Logeamos
	Log("<MixAudio\n");

	return 1;
}
コード例 #15
0
ファイル: mediabridgesession.cpp プロジェクト: crubia/wt
/****************************************
* RecAudio
*	Obtiene los packetes y los muestra
*****************************************/
int MediaBridgeSession::RecAudio()
{
	DWORD		firstAudio = 0;
	DWORD		timeStamp=0;
	DWORD		firstTS = 0;
	SWORD		raw[512];
	DWORD		rawSize = 512;
	DWORD		rawLen;

	//Create new audio frame
	RTMPAudioFrame  *audio = new RTMPAudioFrame(0,RTPPAYLOADSIZE);

	Log(">RecAudio\n");

	//Mientras tengamos que capturar
	while(receivingAudio)
	{
		//Obtenemos el paquete
		RTPPacket *packet = rtpAudio.GetPacket();

		//Check
		if (!packet)
			//Next
			continue;

		//Get type
		AudioCodec::Type codec = (AudioCodec::Type)packet->GetCodec();

		//Check rtp type
		if (codec==AudioCodec::SPEEX16)
		{
			//TODO!!!!
		}

		//Check if we have a decoder
		if (!rtpAudioDecoder || rtpAudioDecoder->type!=codec)
		{
			//Check
			if (rtpAudioDecoder)
				//Delete old one
				delete(rtpAudioDecoder);
			//Create new one
			rtpAudioDecoder = AudioCodecFactory::CreateDecoder(codec);
		}

		//Decode it
		rawLen = rtpAudioDecoder->Decode(packet->GetMediaData(),packet->GetMediaLength(),raw,rawSize);

		//Delete packet
		delete(packet);

		//Rencode it
		DWORD len;

		while((len=rtmpAudioEncoder->Encode(raw,rawLen,audio->GetMediaData(),audio->GetMaxMediaSize()))>0)
		{
			//REset
			rawLen = 0;

			//Set length
			audio->SetMediaSize(len);

			switch(rtmpAudioEncoder->type)
			{
				case AudioCodec::SPEEX16:
					//Set RTMP data
					audio->SetAudioCodec(RTMPAudioFrame::SPEEX);
					audio->SetSoundRate(RTMPAudioFrame::RATE11khz);
					audio->SetSamples16Bits(1);
					audio->SetStereo(0);
					break;
				case AudioCodec::NELLY8:
					//Set RTMP data
					audio->SetAudioCodec(RTMPAudioFrame::NELLY8khz);
					audio->SetSoundRate(RTMPAudioFrame::RATE11khz);
					audio->SetSamples16Bits(1);
					audio->SetStereo(0);
					break;
				case AudioCodec::NELLY11:
					//Set RTMP data
					audio->SetAudioCodec(RTMPAudioFrame::NELLY);
					audio->SetSoundRate(RTMPAudioFrame::RATE11khz);
					audio->SetSamples16Bits(1);
					audio->SetStereo(0);
					break;
			}

			//If it is first
			if (!firstTS)
			{
				//Get first audio time
				firstAudio = getDifTime(&first)/1000;
				//It is first
				firstTS = timeStamp;
			}

			DWORD ts = firstAudio +(timeStamp-firstTS)/8;
			//Set timestamp
			audio->SetTimestamp(ts);

			//Send packet
			SendMediaFrame(audio);
		}
	}

	//Check
	if (audio)
		//Delete it
		delete(audio);

	Log("<RecAudio\n");
}
コード例 #16
0
ファイル: videostream.cpp プロジェクト: tidehc/media-server-1
/*******************************************
* SendVideo
*	Capturamos el video y lo mandamos
*******************************************/
int VideoStream::SendVideo()
{
	timeval prev;
	timeval lastFPU;
	
	DWORD num = 0;
	QWORD overslept = 0;

	Acumulator bitrateAcu(1000);
	Acumulator fpsAcu(1000);
	
	Log(">SendVideo [width:%d,size:%d,bitrate:%d,fps:%d,intra:%d]\n",videoGrabWidth,videoGrabHeight,videoBitrate,videoFPS,videoIntraPeriod);

	//Creamos el encoder
	VideoEncoder* videoEncoder = VideoCodecFactory::CreateEncoder(videoCodec,videoProperties);

	//Comprobamos que se haya creado correctamente
	if (videoEncoder == NULL)
		//error
		return Error("Can't create video encoder\n");

	//Comrpobamos que tengamos video de entrada
	if (videoInput == NULL)
		return Error("No video input");

	//Iniciamos el tama�o del video
	if (!videoInput->StartVideoCapture(videoGrabWidth,videoGrabHeight,videoFPS))
		return Error("Couldn't set video capture\n");

	//Start at 80%
	int current = videoBitrate*0.8;

	//Send at higher bitrate first frame, but skip frames after that so sending bitrate is kept
	videoEncoder->SetFrameRate(videoFPS,current*5,videoIntraPeriod);

	//No wait for first
	QWORD frameTime = 0;

	//Iniciamos el tamama�o del encoder
 	videoEncoder->SetSize(videoGrabWidth,videoGrabHeight);

	//The time of the previos one
	gettimeofday(&prev,NULL);

	//Fist FPU
	gettimeofday(&lastFPU,NULL);
	
	//Started
	Log("-Sending video\n");

	//Mientras tengamos que capturar
	while(sendingVideo)
	{
		//Nos quedamos con el puntero antes de que lo cambien
		BYTE *pic = videoInput->GrabFrame(frameTime/1000);

		//Check picture
		if (!pic)
			//Exit
			continue;

		//Check if we need to send intra
		if (sendFPU)
		{
			//Do not send anymore
			sendFPU = false;
			//Do not send if we just send one (100ms)
			if (getDifTime(&lastFPU)/1000>minFPUPeriod)
			{
				//Send at higher bitrate first frame, but skip frames after that so sending bitrate is kept
				videoEncoder->SetFrameRate(videoFPS,current*5,videoIntraPeriod);
				//Reste frametime so it is calcualted afterwards
				frameTime = 0;
				//Set it
				videoEncoder->FastPictureUpdate();
				//Update last FPU
				getUpdDifTime(&lastFPU);
			}
		}

		//Calculate target bitrate
		int target = current;

		//Check temporal limits for estimations
		if (bitrateAcu.IsInWindow())
		{
			//Get real sent bitrate during last second and convert to kbits 
			DWORD instant = bitrateAcu.GetInstantAvg()/1000;
			//If we are in quarentine
			if (videoBitrateLimitCount)
				//Limit sending bitrate
				target = videoBitrateLimit;
			//Check if sending below limits
			else if (instant<videoBitrate)
				//Increase a 8% each second or fps kbps
				target += (DWORD)(target*0.08/videoFPS)+1;
		}

		//Check target bitrate agains max conf bitrate
		if (target>videoBitrate*1.2)
			//Set limit to max bitrate allowing a 20% overflow so instant bitrate can get closer to target
			target = videoBitrate*1.2;

		//Check limits counter
		if (videoBitrateLimitCount>0)
			//One frame less of limit
			videoBitrateLimitCount--;

		//Check if we have a new bitrate
		if (target && target!=current)
		{
			//Reset bitrate
			videoEncoder->SetFrameRate(videoFPS,target,videoIntraPeriod);
			//Upate current
			current = target;
		}
		
		//Procesamos el frame
		VideoFrame *videoFrame = videoEncoder->EncodeFrame(pic,videoInput->GetBufferSize());

		//If was failed
		if (!videoFrame)
			//Next
			continue;
		
		//Increase frame counter
		fpsAcu.Update(getTime()/1000,1);
		
		//Check
		if (frameTime)
		{
			timespec ts;
			//Lock
			pthread_mutex_lock(&mutex);
			//Calculate slept time
			QWORD sleep = frameTime;
			//Remove extra sleep from prev
			if (overslept<sleep)
				//Remove it
				sleep -= overslept;
			else
				//Do not overflow
				sleep = 1;

			//Calculate timeout
			calcAbsTimeoutNS(&ts,&prev,sleep);
			//Wait next or stopped
			int canceled  = !pthread_cond_timedwait(&cond,&mutex,&ts);
			//Unlock
			pthread_mutex_unlock(&mutex);
			//Check if we have been canceled
			if (canceled)
				//Exit
				break;
			//Get differencence
			QWORD diff = getDifTime(&prev);
			//If it is biffer
			if (diff>frameTime)
				//Get what we have slept more
				overslept = diff-frameTime;
			else
				//No oversletp (shoulddn't be possible)
				overslept = 0;
		}

		//Increase frame counter
		fpsAcu.Update(getTime()/1000,1);
		
		//If first
		if (!frameTime)
		{
			//Set frame time, slower
			frameTime = 5*1000000/videoFPS;
			//Restore bitrate
			videoEncoder->SetFrameRate(videoFPS,current,videoIntraPeriod);
		} else {
			//Set frame time
			frameTime = 1000000/videoFPS;
		}
		
		//Add frame size in bits to bitrate calculator
		bitrateAcu.Update(getDifTime(&ini)/1000,videoFrame->GetLength()*8);

		//Set frame timestamp
		videoFrame->SetTimestamp(getDifTime(&ini)/1000);

		//Check if we have mediaListener
		if (mediaListener)
			//Call it
			mediaListener->onMediaFrame(*videoFrame);

		//Set sending time of previous frame
		getUpdDifTime(&prev);

		//Calculate sending times based on bitrate
		DWORD sendingTime = videoFrame->GetLength()*8/current;

		//Adjust to maximum time
		if (sendingTime>frameTime/1000)
			//Cap it
			sendingTime = frameTime/1000;

		//If it was a I frame
		if (videoFrame->IsIntra())
			//Clean rtp rtx buffer
			rtp.FlushRTXPackets();

		//Send it smoothly
		smoother.SendFrame(videoFrame,sendingTime);

		//Dump statistics
		if (num && ((num%videoFPS*10)==0))
		{
			Debug("-Send bitrate target=%d current=%d avg=%llf rate=[%llf,%llf] fps=[%llf,%llf] limit=%d\n",target,current,bitrateAcu.GetInstantAvg()/1000,bitrateAcu.GetMinAvg()/1000,bitrateAcu.GetMaxAvg()/1000,fpsAcu.GetMinAvg(),fpsAcu.GetMaxAvg(),videoBitrateLimit);
			bitrateAcu.ResetMinMax();
			fpsAcu.ResetMinMax();
		}
		num++;
	}

	Log("-SendVideo out of loop\n");

	//Terminamos de capturar
	videoInput->StopVideoCapture();

	//Check
	if (videoEncoder)
		//Borramos el encoder
		delete videoEncoder;

	//Salimos
	Log("<SendVideo [%d]\n",sendingVideo);

	return 0;
}
コード例 #17
0
ファイル: mediabridgesession.cpp プロジェクト: crubia/wt
/****************************************
* RecText
*	Obtiene los packetes y los muestra
*****************************************/
int MediaBridgeSession::RecText()
{
	DWORD		timeStamp=0;
	DWORD		lastSeq = RTPPacket::MaxExtSeqNum;

	Log(">RecText\n");

	//Mientras tengamos que capturar
	while(receivingText)
	{
		//Get packet
		RTPPacket *packet = rtpText.GetPacket();

		//Check packet
		if (!packet)
			continue;

		//Get data
		BYTE* data = packet->GetMediaData();
		//And length
		DWORD size = packet->GetMediaLength();

		//Get extended sequence number
		DWORD seq = packet->GetExtSeqNum();

		//Lost packets since last one
		DWORD lost = 0;

		//If not first
		if (lastSeq!=RTPPacket::MaxExtSeqNum)
			//Calculate losts
			lost = seq-lastSeq-1;

		//Update last sequence number
		lastSeq = seq;

		//Get type
		TextCodec::Type type = (TextCodec::Type)packet->GetCodec();

		//Check the type of data
		if (type==TextCodec::T140RED)
		{
			//Get redundant packet
			RTPRedundantPacket* red = (RTPRedundantPacket*)packet;

			//Check lost packets count
			if (lost == 0)
			{
				//Create text frame
				TextFrame frame(timeStamp ,red->GetPrimaryPayloadData(),red->GetPrimaryPayloadSize());
				//Create new timestamp associated to latest media time
				RTMPMetaData meta(getDifTime(&first)/1000);

				//Add text name
				meta.AddParam(new AMFString(L"onText"));
				//Set data
				meta.AddParam(new AMFString(frame.GetWChar()));

				//Send data
				SendMetaData(&meta);
			} else {
				//Timestamp of first packet (either receovered or not)
				DWORD ts = timeStamp;

				//Check if we have any red pacekt
				if (red->GetRedundantCount()>0)
					//Get the timestamp of first redundant packet
					ts = red->GetRedundantTimestamp(0);

				//If we have lost too many
				if (lost>red->GetRedundantCount())
					//Get what we have available only
					lost = red->GetRedundantCount();

				//For each lonot recoveredt packet send a mark
				for (int i=red->GetRedundantCount();i<lost;i++)
				{
					//Create frame of lost replacement
					TextFrame frame(ts,LOSTREPLACEMENT,sizeof(LOSTREPLACEMENT));
					//Create new timestamp associated to latest media time
					RTMPMetaData meta(getDifTime(&first)/1000);

					//Add text name
					meta.AddParam(new AMFString(L"onText"));
					//Set data
					meta.AddParam(new AMFString(frame.GetWChar()));

					//Send data
					SendMetaData(&meta);
				}

				//Fore each recovered packet
				for (int i=red->GetRedundantCount()-lost;i<red->GetRedundantCount();i++)
				{
					//Create frame from recovered data
					TextFrame frame(red->GetRedundantTimestamp(i),red->GetRedundantPayloadData(i),red->GetRedundantPayloadSize(i));
					//Create new timestamp associated to latest media time
					RTMPMetaData meta(getDifTime(&first)/1000);

					//Add text name
					meta.AddParam(new AMFString(L"onText"));
					//Set data
					meta.AddParam(new AMFString(frame.GetWChar()));

					//Send data
					SendMetaData(&meta);
				}
			}
		} else {
			//Create frame
			TextFrame frame(timeStamp,data,size);
			//Create new timestamp associated to latest media time
			RTMPMetaData meta(getDifTime(&first)/1000);

			//Add text name
			meta.AddParam(new AMFString(L"onText"));
			//Set data
			meta.AddParam(new AMFString(frame.GetWChar()));

			//Send data
			SendMetaData(&meta);
		}
	}

	Log("<RecText\n");
}
コード例 #18
0
int MP4Streamer::PlayLoop()
{
	QWORD audioNext = MP4_INVALID_TIMESTAMP;
	QWORD videoNext = MP4_INVALID_TIMESTAMP;
	QWORD textNext  = MP4_INVALID_TIMESTAMP;
	timeval tv ;
	timespec ts;

	Log(">MP4Streamer::PlayLoop()\n");

	//If it is from the begining
	if (!seeked)
	{
		// If we have audio
		if (audio)
		{
			//Reset
			audio->Reset();
			// Send audio
			audioNext = audio->GetNextFrameTime();
		}

		// If we have video
		if (video)
		{
			//Reset
			video->Reset();
			// Send video
			videoNext = video->GetNextFrameTime();
		}

		// If we have text
		if (text)
		{
			//Reset
			text->Reset();
			//Get the next frame time
			textNext = text->GetNextFrameTime();
		}
	} else {
		//If we have video
		if (video)
			//Get nearest i frame
			videoNext = video->SeekNearestSyncFrame(seeked);
		//If we have audio
		if (audio)
			//Get nearest frame
			audioNext = audio->Seek(seeked);
		//If we have text
		if (text)
			//Get nearest frame
			textNext = text->Seek(seeked);
	}

	//If first text frame is not sent inmediatelly
	if (text && textNext!=seeked)
		//send previous text subtitle
		text->ReadPrevious(seeked,listener);

	// Calculate start time
	getUpdDifTime(&tv);

	//Lock
	pthread_mutex_lock(&mutex);

	//Time counter
	QWORD t = 0;

	// Wait control messages or finish of both streams
	while ( opened && playing && (!(audioNext==MP4_INVALID_TIMESTAMP && videoNext==MP4_INVALID_TIMESTAMP && textNext==MP4_INVALID_TIMESTAMP)))
	{
		// Get next time
		if (audioNext<videoNext)
		{
			if (audioNext<textNext)
				t = audioNext;
			else
				t = textNext;
		} else {
			if (videoNext<textNext)
				t = videoNext;
			else
				t = textNext;
		}

		// Wait time diff
		QWORD now = (QWORD)getDifTime(&tv)/1000+seeked;

		if (t>now)
		{
			//Calculate timeout
			calcAbsTimeout(&ts,&tv,t-seeked);

			//Wait next or stopped
			pthread_cond_timedwait(&cond,&mutex,&ts);
			
			//loop
			continue;
		}

		// if we have to send audio
		if (audioNext<=t)
			audioNext = audio->Read(listener);

		// or video
		if (videoNext<=t)
			videoNext = video->Read(listener);

		// or text
		if (textNext<=t)
			textNext = text->Read(listener);

	}

	Log("-MP4Streamer::PlayLoop()\n");

	//Check if we were stoped
	bool stoped = !opened || !playing;

	//Not playing anymore
	playing = 0;

	//Unlock
	pthread_mutex_unlock(&mutex);

	//Check end of file
	if (!stoped && listener)
		//End of file
		listener->onEnd();

	Log("<MP4Streamer::PlayLoop()\n");

	return 1;
}