Exemplo n.º 1
0
CRtmpdSession::CRtmpdSession()
{_STT();

	// Initialize structures
	oexZero( m_session );
	oexZero( m_packet );
	m_nPacketReady = 0;
	m_nNonBlockingMode = 0;
	m_nTs = 0;
}
Exemplo n.º 2
0
void CRtmpdSession::Destroy()
{_STT();

	// Release session if any
	if ( m_session.m_sb.sb_socket )
		RTMP_Close( &m_session );

	oexZero( m_session );
	oexZero( m_packet );
	m_nPacketReady = 0;
	m_nNonBlockingMode = 0;
	m_nTs = 0;
}
Exemplo n.º 3
0
CFfContainer::CFfContainer()
{_STT();

	m_pFormatContext = oexNULL;
	m_pCodecContext = oexNULL;
	m_pFrame = oexNULL;
	m_nVideoStream = -1;

	m_nAudioStream = -1;

	m_nWrite = 0;
	m_nRead = 0;
	m_bKeyRxd = 0;
	m_nFrames = 0;
	m_nLastFrameFlags = 0;
	m_nLastFrameEncodedSize = 0;
	m_nKeyFrameInterval = 0;
	oexZero( m_pkt );

	m_time_base = 0;
	m_video_scale = 0;
	m_audio_scale = 0;

	m_vts_offset = 0;
	m_ats_offset = 0;

}
Exemplo n.º 4
0
void CFfAudioDecoder::Destroy()
{_STT();

	oexAutoLock ll( _g_ffmpeg_lock );
	if ( !ll.IsLocked() ) return;

	if ( m_pFrame )
	{	av_free( m_pFrame );
		m_pFrame = oexNULL;
	} // end if

	if ( m_pCodecContext )
	{	avcodec_close( m_pCodecContext );
		m_pCodecContext = oexNULL;
	} // end if

	if ( m_pFormatContext )
	{	av_free( m_pFormatContext );
		m_pFormatContext = oexNULL;
	} // end if

	m_pCodec = oexNULL;
	oexZero( m_pkt );
	m_lFrames = 0;
}
Exemplo n.º 5
0
CFfAudioDecoder::CFfAudioDecoder()
{_STT();

	m_pCodec = oexNULL;
	m_pCodecContext = oexNULL;
	m_pFormatContext = oexNULL;
	m_pFrame = oexNULL;
	m_lFrames = 0;
	oexZero( m_pkt );
}
Exemplo n.º 6
0
int CRtmpdSession::SendPacketBin( int format, int csi, int type, int ext, sqbind::CSqBinary *b, int nQueue )
{
	// Sanity checks
	if ( !b )
		return 0;

	// Must have a session
	if ( !m_session.m_sb.sb_socket )
		return 0;

	// Initialize packet
	oexZero( m_packet );

	// Initialize packet headers
	m_packet.m_nChannel = csi;
	m_packet.m_headerType = format;
	m_packet.m_packetType = type;
	m_packet.m_nInfoField2 = ext;
/*
	// Audio
	if ( 8 == type )
		m_packet.m_nTimeStamp = m_nTs++;
	// Video
	else if ( 9 == type )
		m_packet.m_nTimeStamp = m_nTs;
	else
*/		m_packet.m_nTimeStamp = 0;

	// Timestamps?
	m_packet.m_hasAbsTimestamp = ( 0 < m_nTs ) ? 1 : 0;

	sqbind::CSqBinary body;
	if ( !body.Allocate( RTMP_MAX_HEADER_SIZE + b->getUsed() + 128 ) )
		return 0;

	// Add padding byte for AMF3
	if ( 0x11 == type )
		body.setUsed( RTMP_MAX_HEADER_SIZE + 1 ),
		body.setUCHAR( RTMP_MAX_HEADER_SIZE, 0 );
	else
		body.setUsed( RTMP_MAX_HEADER_SIZE );

	// Append data
	body.Append( b );

	// Point the packet at the encoded variables
	m_packet.m_body = (char*)body.Ptr( RTMP_MAX_HEADER_SIZE );

	// Set body size
	m_packet.m_nBodySize = body.getUsed() - RTMP_MAX_HEADER_SIZE;

	// Send the packet
	return RTMP_SendPacket( &m_session, &m_packet, nQueue );
}
Exemplo n.º 7
0
OEX_USING_NAMESPACE

CDataLog::CDataLog()
{   _STT();

    SetLogParams( eLogBase, eIndexStep );

    m_bChangesOnly = oexTRUE;

    oexZero( m_pLogs );

    m_uLimit = eDefaultFetchLimit;
}
Exemplo n.º 8
0
int CFfConvert::ConvertFB( AVFrame* src, sqbind::CSqBinary *dst )
{_STT();

	if ( !m_psc )
		return 0;
	
_STT_SET_CHECKPOINT( 10 );

	// Ensure source buffer is large enough
	if ( !src )
		return 0;

	// Allocate memory for destination image
	if ( dst->Size() < ( m_dst_size + FF_INPUT_BUFFER_PADDING_SIZE * 2 )
		 && !dst->Allocate( m_dst_size + FF_INPUT_BUFFER_PADDING_SIZE * 2 ) )
		return 0;
				
_STT_SET_CHECKPOINT( 11 );

	// Fill in picture data
	AVPicture apSrc, apDst;
	
	// Copy source information
	oexZero( apSrc );
	for ( int i = 0; i < (int)oexSizeOfArray( apSrc.linesize ); i++ )
		apSrc.data[ i ] = src->data[ i ],
		apSrc.linesize[ i ] = src->linesize[ i ];
	
	if ( !CFfFmt::FillAVPicture( &apDst, m_dst_fmt, m_dst_width, m_dst_height, dst->_Ptr() ) )
		return 0;

_STT_SET_CHECKPOINT( 12 );

	if ( m_flip )
		Flip( m_src_fmt, m_src_height, &apSrc );

_STT_SET_CHECKPOINT( 13 );

	int nRet = sws_scale(	m_psc, apSrc.data, apSrc.linesize, 0, m_src_height,
							apDst.data, apDst.linesize );

_STT_SET_CHECKPOINT( 14 );

	if ( 0 >= nRet )
		return 0;

	dst->setUsed( m_dst_size );

	return 1;
}
Exemplo n.º 9
0
int CRtmpdSession::ReadPacket()
{_STT();

	// Ensure we have a session
	if ( !isSession() )
		return -1;

	// Ensure we still have a connection
	if ( !RTMP_IsConnected( &m_session ) )
	{	Destroy();
		return -2;
	} // end if

	// Free previous packet if needed
	if ( m_nPacketReady )
		oexZero( m_packet ),
//		RTMPPacket_Free( &m_packet ), // +++ No, don't do that
		m_nPacketReady = 0;

	// Non-blocking?
	if ( m_nNonBlockingMode )
	{
		fd_set rset;
		FD_ZERO( &rset );
		FD_SET( m_session.m_sb.sb_socket, &rset );

		struct timeval tv;
		tv.tv_sec = 0;
		tv.tv_usec = 0;

		int ret = select( m_session.m_sb.sb_socket + 1, &rset, NULL, NULL, &tv );
		if ( 0 > ret )
			return -3;

		if ( !FD_ISSET( m_session.m_sb.sb_socket, &rset ) )
			return 0;

	} // end if

	// See if we can get a packet
	if ( !RTMP_ReadPacket( &m_session, &m_packet ) || !RTMPPacket_IsReady( &m_packet ) )
		return 0;

	// We have a packet
	m_nPacketReady = 1;

	return m_packet.m_packetType;
}
Exemplo n.º 10
0
int CFfConvert::ConvertFI( AVFrame* src, sqbind::CSqImage *dst )
{_STT();

	if ( !m_psc )
		return 0;
	
_STT_SET_CHECKPOINT( 10 );

	// Ensure source buffer is large enough
	if ( !src )
		return 0;

	// Do we need to allocate destination image?
	if ( dst->getWidth() != m_dst_width || dst->getHeight() != m_dst_height )
		if ( !dst->Create( m_dst_width, m_dst_height ) )
			return 0;
		
_STT_SET_CHECKPOINT( 11 );

	// Fill in picture data
	AVPicture apSrc, apDst;

	// Copy source information
	oexZero( apSrc );
	for ( int i = 0; i < (int)oexSizeOfArray( apSrc.linesize ); i++ )
		apSrc.data[ i ] = src->data[ i ],
		apSrc.linesize[ i ] = src->linesize[ i ];
	
	if ( !CFfFmt::FillAVPicture( &apDst, m_dst_fmt, m_dst_width, m_dst_height, dst->Obj().GetBits() ) )
		return 0;

_STT_SET_CHECKPOINT( 12 );

	if ( m_flip )
		Flip( m_src_fmt, m_src_height, &apSrc );

_STT_SET_CHECKPOINT( 13 );

	int nRet = sws_scale(	m_psc, apSrc.data, apSrc.linesize, 0, m_src_height,
							apDst.data, apDst.linesize );

_STT_SET_CHECKPOINT( 14 );

	if ( 0 >= nRet )
		return 0;

	return 1;
}
Exemplo n.º 11
0
int CPaInput::Open( int bBlocking, int nDev, int nChannels, int nFormat, double dLatency, double dSRate, int fpb )
{_STT();

	// Lose old stream
	Destroy();

	PaStreamParameters psp;
	oexZero( psp );

	if ( 0 > fpb )
		fpb = 1024;

	psp.device = nDev;
	psp.channelCount = nChannels;
	psp.suggestedLatency = dLatency;
	
#	define CNVTYPE( t, v ) case oex::obj::t : psp.sampleFormat = v; break;
	switch( nFormat )
	{	CNVTYPE( tInt8, paInt8 );
		CNVTYPE( tUInt8, paUInt8 );
		CNVTYPE( tInt16, paInt16 );
		CNVTYPE( tInt24, paInt24 );
		CNVTYPE( tInt32, paInt32 );
		CNVTYPE( tFloat, paFloat32 );
		default : psp.sampleFormat = 0; break;
	} // end switch

	m_bBlocking = bBlocking;
	m_nFrameBlockSize = fpb;
	m_nFrameBytes = getFormatBytes( nFormat ) * nChannels;

	// Attempt to open input stream

	if ( bBlocking )
		m_errLast = Pa_OpenStream( &m_stream, &psp, 0, dSRate, fpb, 0, 0, this );
	else
		m_errLast = Pa_OpenStream( &m_stream, &psp, 0, dSRate, fpb, 0, &_PaStreamCallback, this );

	if ( paNoError != m_errLast )
		return 0;

	return 1;
}
Exemplo n.º 12
0
void CFfContainer::Destroy()
{_STT();

	oexAutoLock ll( _g_ffmpeg_lock );
	if ( !ll.IsLocked() ) return;

	m_video_extra.Free();
	m_audio_extra.Free();

	if ( m_pkt.data )
		av_free_packet( &m_pkt );

	if ( m_pCodecContext )
		avcodec_close( m_pCodecContext );

	m_audio_dec.Destroy();

	CloseStream();

	m_pCodecContext = oexNULL;
	m_pFrame = oexNULL;
	m_nVideoStream = -1;
	m_buf.Free();
	oexZero( m_pkt );
	m_nFrames = 0;
	m_nLastFrameFlags = 0;
	m_nLastFrameEncodedSize = 0;

//	m_pAudioCodecContext = oexNULL;
	m_nAudioStream = -1;
//	m_nAudioFrames = 0;

	m_nWrite = 0;
	m_nRead = 0;
	m_bKeyRxd = 0;

	m_vts_offset = 0;
	m_ats_offset = 0;

	m_sUrl.clear();
}
Exemplo n.º 13
0
int CFfContainer::ReadFrame( sqbind::CSqBinary *dat, sqbind::CSqMulti *m )
{_STT();

	if ( !m_pFormatContext )
		return -1;

	if ( !m_nRead )
		return -1;

	if ( m_pkt.data )
		av_free_packet( &m_pkt );
	oexZero( m_pkt );

	int res = av_read_frame( m_pFormatContext, &m_pkt );
	if ( res )
		return -1;

	if ( m )
	{
		(*m)[ oexT( "flags" ) ].set( oexMks( m_pkt.flags ).Ptr() );
		(*m)[ oexT( "size" ) ].set( oexMks( m_pkt.size ).Ptr() );
		(*m)[ oexT( "stream_index" ) ].set( oexMks( m_pkt.stream_index ).Ptr() );
		(*m)[ oexT( "pos" ) ].set( oexMks( m_pkt.pos ).Ptr() );
		(*m)[ oexT( "dts" ) ].set( oexMks( m_pkt.dts ).Ptr() );
		(*m)[ oexT( "pts" ) ].set( oexMks( m_pkt.pts ).Ptr() );
		(*m)[ oexT( "duration" ) ].set( oexMks( m_pkt.duration ).Ptr() );

	} // end if

	// Save flags
	m_nLastFrameFlags = m_pkt.flags;
	m_nLastFrameEncodedSize = m_pkt.size;

	if ( dat )
		dat->setBuffer( (sqbind::CSqBinary::t_byte*)m_pkt.data, m_pkt.size, 0, 0 );

	return m_pkt.stream_index;
}
Exemplo n.º 14
0
int CRtmpdSession::SendPacket2( int format, int csi, int type, int ext, sqbind::CSqMulti *m, int nQueue )
{
	// Sanity checks
	if ( !m || !m->size() )
		return 0;

	// Must have a session
	if ( !m_session.m_sb.sb_socket )
		return 0;

	// Initialize packet
	oexZero( m_packet );

	// Initialize packet headers
	m_packet.m_nChannel = csi;
	m_packet.m_headerType = format;
	m_packet.m_packetType = type;
	m_packet.m_nInfoField2 = ext;
/*
	// Audio
	if ( 8 == type )
		m_packet.m_nTimeStamp = m_nTs++;
	// Video
	else if ( 9 == type )
		m_packet.m_nTimeStamp = m_nTs;
	else
*/		m_packet.m_nTimeStamp = 0;

	// Timestamps?
	m_packet.m_hasAbsTimestamp = ( 0 < m_nTs ) ? 1 : 0;

	sqbind::CSqBinary body;
	if ( !body.Allocate( RTMP_MAX_HEADER_SIZE + 1024 ) )
		return 0;

	// Apparently, somewhere, RTMP_SendPacket() reaches backward
	// in the buffer, and apparently, it's by design.
	// I'm not sure how to feel about that ...

	// Add padding byte for AMF3
	if ( 0x11 == type )
		body.setUsed( RTMP_MAX_HEADER_SIZE + 1 ),
		body.setUCHAR( RTMP_MAX_HEADER_SIZE, 0 );
	else
		body.setUsed( RTMP_MAX_HEADER_SIZE );

	// Is it a raw buffer?
	if ( m->isset( "body" ) )
		body.appendString( (*m)[ "body" ].str() );

	// Serialize our data
	else if ( !SerializePacket( &body, m, 0 ) )
		return 0;

	// Point the packet at the encoded variables
	m_packet.m_body = (char*)body.Ptr( RTMP_MAX_HEADER_SIZE );

	// Set body size
	m_packet.m_nBodySize = body.getUsed() - RTMP_MAX_HEADER_SIZE;

	// Send the packet
	return RTMP_SendPacket( &m_session, &m_packet, nQueue );
}
Exemplo n.º 15
0
int CFfContainer::DecodeFrameBin( sqbind::CSqBinary *in, int fmt, sqbind::CSqBinary *out, sqbind::CSqMulti *m, int flip )
{_STT();

	AVPacket pkt; oexZero( pkt );
	pkt.data = (uint8_t*)in->Ptr();
	pkt.size = in->getUsed();

	if ( !m_pFrame )
		m_pFrame = avcodec_alloc_frame();
	if ( !m_pFrame )
		return -1;

	int gpp = 0, used = 0;

#if defined( FFSQ_VIDEO2 )

	if ( m )
	{
		pkt.flags 					= (*m)[ oexT( "flags" ) ].toint();
		pkt.stream_index 			= (*m)[ oexT( "stream_index" ) ].toint();
		pkt.pos 					= (*m)[ oexT( "pos" ) ].toint64();
		pkt.dts 					= (*m)[ oexT( "dts" ) ].toint64();
		pkt.pts 					= (*m)[ oexT( "pts" ) ].toint64();
		pkt.duration 				= (*m)[ oexT( "duration" ) ].toint();
		pkt.convergence_duration 	= (*m)[ oexT( "convergence_duration" ) ].toint64();

	} // end if

	used = avcodec_decode_video2( m_pCodecContext, m_pFrame, &gpp, &pkt );
	if ( 0 >= used )
	{	oexSHOW( used );
		return -1;
	} // end if

#else

	used = avcodec_decode_video( m_pCodecContext, m_pFrame, &gpp, pkt.data, pkt.size );
	if ( 0 >= used )
	{	oexSHOW( used );
		return -1;
	} // end if

#endif

	if ( !gpp )
		return -1;

	// Save flags
	m_nLastFrameFlags = m_pkt.flags;
	m_nLastFrameEncodedSize = m_pkt.size;

	// Is it already the right format?
	if ( fmt == (int)m_pCodecContext->pix_fmt )
	{	int nSize = CFfConvert::CalcImageSize( fmt, m_pCodecContext->width, m_pCodecContext->height );
		out->setBuffer( (sqbind::CSqBinary::t_byte*)m_pFrame->data[ 0 ], nSize, 0, 0 );
		m_nFrames++;
		return m_pkt.stream_index;
	} // end if

	// Do colorspace conversion
	if ( !CFfConvert::ConvertColorFB( m_pFrame, m_pCodecContext->pix_fmt,
									  m_pCodecContext->width, m_pCodecContext->height,
									  fmt, out, SWS_FAST_BILINEAR, flip ) )
		return -1;

	// Frame
	m_nFrames++;

	return 1;
}
Exemplo n.º 16
0
int CFfAudioDecoder::BufferData( sqbind::CSqBinary *in, sqbind::CSqMulti *m )
{
	// Init packet
	oexZero( m_pkt );
//	av_init_packet( &m_pkt );

	// Init other packet data
	if ( m )
	{
		if ( m->isset( oexT( "flags" ) ) )
			m_pkt.flags = (*m)[ oexT( "flags" ) ].toint();
		if ( m->isset( oexT( "stream_index" ) ) )
			m_pkt.stream_index = (*m)[ oexT( "stream_index" ) ].toint();
		if ( m->isset( oexT( "pos" ) ) )
			m_pkt.pos = (*m)[ oexT( "pos" ) ].toint64();
		if ( m->isset( oexT( "dts" ) ) )
			m_pkt.dts = (*m)[ oexT( "dts" ) ].toint64();
		if ( m->isset( oexT( "pts" ) ) )
			m_pkt.pts = (*m)[ oexT( "pts" ) ].toint64();
		if ( m->isset( oexT( "duration" ) ) )
			m_pkt.duration = (*m)[ oexT( "duration" ) ].toint();
		if ( m->isset( oexT( "convergence_duration" ) ) )
			m_pkt.duration = (*m)[ oexT( "convergence_duration" ) ].toint();

	} // end if

	// Are we adding data?
	if ( in && in->getUsed() )
	{
		// Ensure buffer size
		if ( ( m_tmp.Size() - m_tmp.getUsed() ) < (sqbind::CSqBinary::t_size)( in->getUsed() + FF_INPUT_BUFFER_PADDING_SIZE ) )
		{	oex::oexUINT uMin = 2 * ( m_tmp.Size() + in->getUsed() + FF_INPUT_BUFFER_PADDING_SIZE );
			if ( 32000 > uMin )
				uMin = 32000;
	        m_tmp.Allocate( uMin );
		} // end if

		// Add new data to buffer
		m_tmp.Append( in );

	} // end if

	// Is there a sync sequence?
	if ( m_sync.getUsed() )
	{
		oexSHOW( m_sync.getUsed() );

		const char *s = m_sync.Ptr();
		sqbind::CSqBinary::t_size ls = m_sync.getUsed();

		const char *p = m_tmp.Ptr();
		sqbind::CSqBinary::t_size lp = m_tmp.getUsed();

		// Look for the sync
		while ( lp > ls && oexMemCmp( p, s, ls ) )
			p++, lp--;

		// Shift out unsynced data
		if ( lp < m_tmp.getUsed() )
		{	oexSHOW( lp );
			oexSHOW( m_tmp.getUsed() );
			m_tmp.LShift( m_tmp.getUsed() - lp );
		} // end if

	} // end if

	// Get buffer pointers
	m_pkt.data = (uint8_t*)m_tmp._Ptr();
	m_pkt.size = m_tmp.getUsed();

	// Zero padding
	int nPadding = m_tmp.Size() - m_tmp.getUsed();
	if ( 0 < nPadding )
	{
		// Don't zero more than twice the padding size
		if ( nPadding > ( FF_INPUT_BUFFER_PADDING_SIZE * 2 ) )
			nPadding = FF_INPUT_BUFFER_PADDING_SIZE * 2;

		// Set end to zero to ensure no overreading on damaged blocks
		oexZeroMemory( &m_pkt.data[ m_pkt.size ], nPadding );

	} // end if

	return m_pkt.size;
}