Exemple #1
0
status_t
StreamBase::_NextPacket(bool reuse)
{
	TRACE_PACKET("StreamBase::_NextPacket(%d)\n", reuse);

	if (fReusePacket) {
		// The last packet was marked for reuse, so we keep using it.
		TRACE_PACKET("  re-using last packet\n");
		fReusePacket = reuse;
		return B_OK;
	}

	av_free_packet(&fPacket);

	while (true) {
		if (av_read_frame(fContext, &fPacket) < 0) {
			// NOTE: Even though we may get the error for a different stream,
			// av_read_frame() is not going to be successful from here on, so
			// it doesn't matter
			fReusePacket = false;
			return B_LAST_BUFFER_ERROR;
		}

		if (fPacket.stream_index == Index())
			break;

		// This is a packet from another stream, ignore it.
		av_free_packet(&fPacket);
	}

	// Mark this packet with the new reuse flag.
	fReusePacket = reuse;
	return B_OK;
}
Exemple #2
0
status_t
AVFormatWriter::StreamCookie::WriteChunk(const void* chunkBuffer,
	size_t chunkSize, media_encode_info* encodeInfo)
{
	TRACE_PACKET("AVFormatWriter::StreamCookie::WriteChunk(%p, %ld, "
		"start_time: %lld)\n", chunkBuffer, chunkSize, encodeInfo->start_time);

	BAutolock _(fStreamLock);

	// TODO: Probably the AVCodecEncoder needs to pass packet data
	// in encodeInfo...

	fPacket.data = const_cast<uint8_t*>((const uint8_t*)chunkBuffer);
	fPacket.size = chunkSize;

	if (fCalculatePTS) {
		fPacket.pts = (encodeInfo->start_time
			* fStream->time_base.den / fStream->time_base.num) / 1000000;
		TRACE_PACKET("  PTS: %lld  (stream->time_base: (%d/%d), "
			"codec->time_base: (%d/%d))\n", fPacket.pts,
			fStream->time_base.num, fStream->time_base.den,
			fStream->codec->time_base.num, fStream->codec->time_base.den);
	}

// From ffmpeg.c::do_audio_out():
// TODO:
//	if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
//		fPacket.pts = av_rescale_q(enc->coded_frame->pts,
//		enc->time_base, ost->st->time_base);


#if 0
	// TODO: Eventually, we need to write interleaved packets, but
	// maybe we are only supposed to use this if we have actually
	// more than one stream. For the moment, this crashes in AVPacket
	// shuffling inside libavformat. Maybe if we want to use this, we
	// need to allocate a separate AVPacket and copy the chunk buffer.
	int result = av_interleaved_write_frame(fContext, &fPacket);
	if (result < 0)
		TRACE("  av_interleaved_write_frame(): %d\n", result);
#else
	int result = av_write_frame(fContext, &fPacket);
	if (result < 0)
		TRACE("  av_write_frame(): %d\n", result);
#endif

	return result == 0 ? B_OK : B_ERROR;
}
Exemple #3
0
void CGetEvent::Execute(const ProtoPacketPtr packet)
{
	SCOPED_LOG(m_Log);

	TRY 
	{
		LOG_TRACE("Execute subscribe, packet: [%s].") % packet->ShortDebugString(); 

		// getting event params
		const std::string& name = packet->job().params(0).rows(0).data(0);
		const std::string& caller = packet->job().params(0).rows(0).data(1);

		CHECK(!name.empty());

		// subscribe to event
		m_EventHash = CEventDispatcher::Instance().Subscribe(name, packet->from(), caller, boost::bind(&CGetEvent::EventCallBack, this, _1));

		// saving request packet params
		m_RequestPacketGuid = packet->guid();
		m_RequestPacketHost = packet->from();

		TRACE_PACKET(packet, m_EventHash, m_RequestPacketHost, m_RequestPacketGuid);

		// add this job to the list of waiting
		m_TimeOut = std::numeric_limits<std::size_t>::max();
		m_Kernel.AddToWaiting(shared_from_this(), m_RequestPacketHost);
	}
	CATCH_PASS_EXCEPTIONS("CGetEvent::Execute failed.")
}
bool CClientState::ProcessPacketEntities( SVC_PacketEntities *msg )
{
	CL_PreprocessEntities(); // setup client prediction

	if ( !msg->m_bIsDelta )
	{
		// Delta too old or is initial message
#ifndef _XBOX			
		// we can start recording now that we've received an uncompressed packet
		demorecorder->SetSignonState( SIGNONSTATE_FULL );
#endif
		// Tell prediction that we're recreating entities due to an uncompressed packet arriving
		if ( g_pClientSidePrediction  )
		{
			g_pClientSidePrediction->OnReceivedUncompressedPacket();
		}
	}
	else
	{
		if ( m_nDeltaTick == -1  )
		{
			// we requested a full update but still got a delta compressed packet. ignore it.
			return true;
		}
	}
	
	TRACE_PACKET(( "CL Receive (%d <-%d)\n", m_nCurrentSequence, msg->m_nDeltaFrom ));
	TRACE_PACKET(( "CL Num Ents (%d)\n", msg->m_nUpdatedEntries ));

	if ( g_pLocalNetworkBackdoor )
	{
		if ( m_nSignonState == SIGNONSTATE_SPAWN  )
		{	
			// We are done with signon sequence.
			SetSignonState( SIGNONSTATE_FULL, m_nServerCount );
		}

		// ignore message, all entities are transmitted using fast local memcopy routines
		m_nDeltaTick = GetServerTickCount();
		return true;
	}
	
	if ( !CL_ProcessPacketEntities ( msg ) )
		return false;

	return CBaseClientState::ProcessPacketEntities( msg );
}
Exemple #5
0
status_t
AVFormatWriter::WriteChunk(void* _cookie, const void* chunkBuffer,
	size_t chunkSize, media_encode_info* encodeInfo)
{
	TRACE_PACKET("AVFormatWriter::WriteChunk(%p, %ld, %p)\n", chunkBuffer,
		chunkSize, encodeInfo);

	StreamCookie* cookie = reinterpret_cast<StreamCookie*>(_cookie);
	return cookie->WriteChunk(chunkBuffer, chunkSize, encodeInfo);
}
status_t
AVFormatWriter::StreamCookie::WriteChunk(const void* chunkBuffer,
	size_t chunkSize, media_encode_info* encodeInfo)
{
	TRACE_PACKET("AVFormatWriter::StreamCookie[%d]::WriteChunk(%p, %ld, "
		"start_time: %lld)\n", fStream->index, chunkBuffer, chunkSize,
		encodeInfo->start_time);

	BAutolock _(fStreamLock);

	fPacket.data = const_cast<uint8_t*>((const uint8_t*)chunkBuffer);
	fPacket.size = chunkSize;

	fPacket.pts = int64_t((double)encodeInfo->start_time
		* fStream->time_base.den / (1000000.0 * fStream->time_base.num)
		+ 0.5);

	fPacket.flags = 0;
	if ((encodeInfo->flags & B_MEDIA_KEY_FRAME) != 0)
		fPacket.flags |= AV_PKT_FLAG_KEY;

	TRACE_PACKET("  PTS: %lld (stream->time_base: (%d/%d), "
		"codec->time_base: (%d/%d))\n", fPacket.pts,
		fStream->time_base.num, fStream->time_base.den,
		fStream->codec->time_base.num, fStream->codec->time_base.den);

#if 0
	// TODO: Eventually, we need to write interleaved packets, but
	// maybe we are only supposed to use this if we have actually
	// more than one stream. For the moment, this crashes in AVPacket
	// shuffling inside libavformat. Maybe if we want to use this, we
	// need to allocate a separate AVPacket and copy the chunk buffer.
	int result = av_interleaved_write_frame(fContext, &fPacket);
	if (result < 0)
		TRACE("  av_interleaved_write_frame(): %d\n", result);
#else
	int result = av_write_frame(fContext, &fPacket);
	if (result < 0)
		TRACE("  av_write_frame(): %d\n", result);
#endif

	return result == 0 ? B_OK : B_ERROR;
}
Exemple #7
0
void CGetEvent::EventCallBack(const ProtoPacketPtr packet)
{
	SCOPED_LOG(m_Log);

	TRY 
	{
		TRACE_PACKET(packet);

		LOG_TRACE("Event callback, packet: [%s].") % packet->ShortDebugString(); 

		// received packet with job result, send it to the subscriber
		packet->set_type(packets::Packet_PacketType_REPLY);	

		// packet GUID
		packet->set_guid(m_RequestPacketGuid);
		packet->set_timeout(0);

		m_Kernel.Send(m_RequestPacketHost, packet);
	}
	CATCH_PASS_EXCEPTIONS("CGetEvent::EventCallBack failed.")
}
Exemple #8
0
void CGetEvent::HandleReply(const ProtoPacketPtr packet)
{
	SCOPED_LOG(m_Log);

	TRACE_PACKET(packet, m_RequestPacketHost);

	if (packet)
	{
		LOG_TRACE("Handling event reply, packet: [%s].") % packet->ShortDebugString(); 
		m_TimeOut = std::numeric_limits<std::size_t>::max();
		m_Kernel.AddToWaiting(shared_from_this(), m_RequestPacketHost);
	}

	if (!m_CallBackFn)
		return;

	TRY 
	{
		m_CallBackFn(packet);
	}
	CATCH_IGNORE_EXCEPTIONS((m_Log << packet), "Event callback failed.")
}
Exemple #9
0
status_t
StreamBase::GetNextChunk(const void** chunkBuffer,
	size_t* chunkSize, media_header* mediaHeader)
{
	BAutolock _(fStreamLock);

	TRACE_PACKET("StreamBase::GetNextChunk()\n");

	// Get the last stream DTS before reading the next packet, since
	// then it points to that one.
	int64 lastStreamDTS = fStream->cur_dts;

	status_t ret = _NextPacket(false);
	if (ret != B_OK) {
		*chunkBuffer = NULL;
		*chunkSize = 0;
		return ret;
	}

	// NOTE: AVPacket has a field called "convergence_duration", for which
	// the documentation is quite interesting. It sounds like it could be
	// used to know the time until the next I-Frame in streams that don't
	// let you know the position of keyframes in another way (like through
	// the index).

	// According to libavformat documentation, fPacket is valid until the
	// next call to av_read_frame(). This is what we want and we can share
	// the memory with the least overhead.
	*chunkBuffer = fPacket.data;
	*chunkSize = fPacket.size;

	if (mediaHeader != NULL) {
		mediaHeader->type = fFormat.type;
		mediaHeader->buffer = 0;
		mediaHeader->destination = -1;
		mediaHeader->time_source = -1;
		mediaHeader->size_used = fPacket.size;
		if (fPacket.pts != kNoPTSValue) {
//TRACE("  PTS: %lld (time_base.num: %d, .den: %d), stream DTS: %lld\n",
//fPacket.pts, fStream->time_base.num, fStream->time_base.den,
//fStream->cur_dts);
			mediaHeader->start_time = _ConvertFromStreamTimeBase(fPacket.pts);
		} else {
//TRACE("  PTS (stream): %lld (time_base.num: %d, .den: %d), stream DTS: %lld\n",
//lastStreamDTS, fStream->time_base.num, fStream->time_base.den,
//fStream->cur_dts);
			mediaHeader->start_time
				= _ConvertFromStreamTimeBase(lastStreamDTS);
		}
		mediaHeader->file_pos = fPacket.pos;
		mediaHeader->data_offset = 0;
		switch (mediaHeader->type) {
			case B_MEDIA_RAW_AUDIO:
				break;
			case B_MEDIA_ENCODED_AUDIO:
				mediaHeader->u.encoded_audio.buffer_flags
					= (fPacket.flags & AV_PKT_FLAG_KEY) ? B_MEDIA_KEY_FRAME : 0;
				break;
			case B_MEDIA_RAW_VIDEO:
				mediaHeader->u.raw_video.line_count
					= fFormat.u.raw_video.display.line_count;
				break;
			case B_MEDIA_ENCODED_VIDEO:
				mediaHeader->u.encoded_video.field_flags
					= (fPacket.flags & AV_PKT_FLAG_KEY) ? B_MEDIA_KEY_FRAME : 0;
				mediaHeader->u.encoded_video.line_count
					= fFormat.u.encoded_video.output.display.line_count;
				break;
			default:
				break;
		}
	}

//	static bigtime_t pts[2];
//	static bigtime_t lastPrintTime = system_time();
//	static BLocker printLock;
//	if (fStream->index < 2) {
//		if (fPacket.pts != kNoPTSValue)
//			pts[fStream->index] = _ConvertFromStreamTimeBase(fPacket.pts);
//		printLock.Lock();
//		bigtime_t now = system_time();
//		if (now - lastPrintTime > 1000000) {
//			printf("PTS: %.4f/%.4f, diff: %.4f\r", pts[0] / 1000000.0,
//				pts[1] / 1000000.0, (pts[0] - pts[1]) / 1000000.0);
//			fflush(stdout);
//			lastPrintTime = now;
//		}
//		printLock.Unlock();
//	}

	return B_OK;
}
void SendTable_WritePropList(
	const SendTable *pTable,
	const void *pState,
	const int nBits,
	bf_write *pOut,
	const int objectID,
	const int *pCheckProps,
	const int nCheckProps )
{
	if ( nCheckProps == 0 )
	{
		// Write single final zero bit, signifying that there no changed properties
		pOut->WriteOneBit( 0 );
		return;
	}

	bool bDebugWatch = Sendprop_UsingDebugWatch();

	s_debug_info_shown = false;
	s_debug_bits_start = pOut->GetNumBitsWritten();
	
	CSendTablePrecalc *pPrecalc = pTable->m_pPrecalc;
	CDeltaBitsWriter deltaBitsWriter( pOut );
	
	bf_read inputBuffer( "SendTable_WritePropList->inputBuffer", pState, BitByte( nBits ), nBits );
	CDeltaBitsReader inputBitsReader( &inputBuffer );

	// Ok, they want to specify a small list of properties to check.
	int iToProp = NextProp( &inputBitsReader );
	int i = 0;
	while ( i < nCheckProps )
	{
		// Seek the 'to' state to the current property we want to check.
		while ( iToProp < pCheckProps[i] )
		{
			SkipPropData( &inputBuffer, pPrecalc->GetProp( iToProp ) );
			iToProp = NextProp( &inputBitsReader );
		}

		if ( iToProp == PROP_SENTINEL )
		{
			break;
		}
		else if ( iToProp == pCheckProps[i] )
		{
			const SendProp *pProp = pPrecalc->GetProp( iToProp );

			// Show debug stuff.
			if ( bDebugWatch )
			{
				ShowEncodeDeltaWatchInfo( pTable, pProp, inputBuffer, objectID, iToProp );
			}

			// See how many bits the data for this property takes up.
			int iStartBit = inputBuffer.GetNumBitsRead();
			SkipPropData( &inputBuffer, pProp );
			int nToStateBits = inputBuffer.GetNumBitsRead() - iStartBit;

			TRACE_PACKET( ( "    Send Field (%s) = %d (%d bytes)\n", pProp->GetName(), nToStateBits, ( nToStateBits + 7 ) / 8 ) );

			// Write the data into the output.
			deltaBitsWriter.WritePropIndex( iToProp );
			inputBuffer.Seek( iStartBit );
			pOut->WriteBitsFromBuffer( &inputBuffer, nToStateBits );

			// Seek to the next prop.
			iToProp = NextProp( &inputBitsReader );
		}

		++i;
	}

	if ( s_debug_info_shown )
	{
		int  bits = pOut->GetNumBitsWritten() - s_debug_bits_start;
		ConDMsg( "= %i bits (%i bytes)\n", bits, Bits2Bytes(bits) );
	}

	inputBitsReader.ForceFinished(); // avoid a benign assert
}
Exemple #11
0
status_t
StreamBase::GetNextChunk(const void** chunkBuffer,
	size_t* chunkSize, media_header* mediaHeader)
{
	BAutolock _(fStreamLock);

	TRACE_PACKET("StreamBase::GetNextChunk()\n");

	// Get the last stream DTS before reading the next packet, since
	// then it points to that one.
	int64 lastStreamDTS = fStream->cur_dts;

	status_t ret = _NextPacket(false);
	if (ret != B_OK) {
		*chunkBuffer = NULL;
		*chunkSize = 0;
		return ret;
	}

	// NOTE: AVPacket has a field called "convergence_duration", for which
	// the documentation is quite interesting. It sounds like it could be
	// used to know the time until the next I-Frame in streams that don't
	// let you know the position of keyframes in another way (like through
	// the index).

	// According to libavformat documentation, fPacket is valid until the
	// next call to av_read_frame(). This is what we want and we can share
	// the memory with the least overhead.
	*chunkBuffer = fPacket.data;
	*chunkSize = fPacket.size;

	if (mediaHeader != NULL) {
		mediaHeader->type = fFormat.type;
		mediaHeader->buffer = 0;
		mediaHeader->destination = -1;
		mediaHeader->time_source = -1;
		mediaHeader->size_used = fPacket.size;

		// FFmpeg recommends to use the decoding time stamps as primary source
		// for presentation time stamps, especially for video formats that are
		// using frame reordering. More over this way it is ensured that the
		// returned start times are ordered in a monotonically increasing time
		// series (even for videos that contain B-frames).
		// \see http://git.videolan.org/?p=ffmpeg.git;a=blob;f=libavformat/avformat.h;h=1e8a6294890d580cd9ebc684eaf4ce57c8413bd8;hb=9153b33a742c4e2a85ff6230aea0e75f5a8b26c2#l1623
		bigtime_t presentationTimeStamp;
		if (fPacket.dts != kNoPTSValue)
			presentationTimeStamp = fPacket.dts;
		else if (fPacket.pts != kNoPTSValue)
			presentationTimeStamp = fPacket.pts;
		else
			presentationTimeStamp = lastStreamDTS;

		mediaHeader->start_time	= _ConvertFromStreamTimeBase(presentationTimeStamp);
		mediaHeader->file_pos = fPacket.pos;
		mediaHeader->data_offset = 0;
		switch (mediaHeader->type) {
			case B_MEDIA_RAW_AUDIO:
				break;
			case B_MEDIA_ENCODED_AUDIO:
				mediaHeader->u.encoded_audio.buffer_flags
					= (fPacket.flags & AV_PKT_FLAG_KEY) ? B_MEDIA_KEY_FRAME : 0;
				break;
			case B_MEDIA_RAW_VIDEO:
				mediaHeader->u.raw_video.line_count
					= fFormat.u.raw_video.display.line_count;
				break;
			case B_MEDIA_ENCODED_VIDEO:
				mediaHeader->u.encoded_video.field_flags
					= (fPacket.flags & AV_PKT_FLAG_KEY) ? B_MEDIA_KEY_FRAME : 0;
				mediaHeader->u.encoded_video.line_count
					= fFormat.u.encoded_video.output.display.line_count;
				break;
			default:
				break;
		}
	}

//	static bigtime_t pts[2];
//	static bigtime_t lastPrintTime = system_time();
//	static BLocker printLock;
//	if (fStream->index < 2) {
//		if (fPacket.pts != kNoPTSValue)
//			pts[fStream->index] = _ConvertFromStreamTimeBase(fPacket.pts);
//		printLock.Lock();
//		bigtime_t now = system_time();
//		if (now - lastPrintTime > 1000000) {
//			printf("PTS: %.4f/%.4f, diff: %.4f\r", pts[0] / 1000000.0,
//				pts[1] / 1000000.0, (pts[0] - pts[1]) / 1000000.0);
//			fflush(stdout);
//			lastPrintTime = now;
//		}
//		printLock.Unlock();
//	}

	return B_OK;
}