Esempio n. 1
0
int CGsCapture::Init()
{_STT();
	GstElement *pipeline, *camera_src, *screen_sink, *image_sink;
	GstElement *screen_queue, *image_queue;
	GstElement *csp_filter, *image_filter, *tee;
	GstCaps *caps;
	GstBus *bus;
	GMainLoop *loop;

	/* Initialize Gstreamer */
//	gst_init( 0, 0 );

oexM();
	GError *err = 0;
	if ( !gst_init_check( 0, 0, &err ) )
	{	oexSHOW( err->message );
		g_error_free( err );
		oexEcho( "gst_init_check() failed" );
		return -1;
	}
oexM();

	loop = g_main_loop_new (NULL, FALSE);
	oexSHOW( (int)loop );

	/* Create pipeline and attach a callback to it's
	 * message bus */
	pipeline = gst_pipeline_new("test-camera");

	bus = gst_pipeline_get_bus( GST_PIPELINE( pipeline ) );
	gst_bus_add_watch( bus, (GstBusFunc)bus_callback, loop );
	gst_object_unref( GST_OBJECT( bus ) );

/*
	GstElement *filesrc  = gst_element_factory_make ("filesrc", "my_filesource");
	if ( !filesrc )
	{
		oexEcho( "gst_element_factory_make() failed" );
		return -1;
	} // end if
*/

	/* Create elements */
	/* Camera video stream comes from a Video4Linux driver */
	camera_src = gst_element_factory_make("v4l2src", "camera_src");

//	gst_play_error_plugin (VIDEO_SRC, &err);

oexM();

	/* Colorspace filter is needed to make sure that sinks understands
	 * the stream coming from the camera */
	csp_filter = gst_element_factory_make("ffmpegcolorspace", "csp_filter");

oexM();

	/* Tee that copies the stream to multiple outputs */
	tee = gst_element_factory_make("tee", "tee");


	/* Queue creates new thread for the stream */
	screen_queue = gst_element_factory_make("queue", "screen_queue");


	/* Sink that shows the image on screen. Xephyr doesn't support XVideo
	 * extension, so it needs to use ximagesink, but the device uses
	 * xvimagesink */
	screen_sink = gst_element_factory_make(VIDEO_SINK, "screen_sink");
	/* Creates separate thread for the stream from which the image
	 * is captured */
	image_queue = gst_element_factory_make("queue", "image_queue");
	/* Filter to convert stream to use format that the gdkpixbuf library
	 * can use */
	image_filter = gst_element_factory_make("ffmpegcolorspace", "image_filter");

	/* A dummy sink for the image stream. Goes to bitheaven */
	image_sink = gst_element_factory_make("fakesink", "image_sink");

oexM();

	/* Check that elements are correctly initialized */
	if(!(pipeline && bus && camera_src && screen_sink && csp_filter && screen_queue
		&& image_queue && image_filter && image_sink))
	{
		oexSHOW( (long)pipeline );
		oexSHOW( (long)bus );
		oexSHOW( (long)camera_src );
		oexSHOW( (long)screen_sink );
		oexSHOW( (long)csp_filter );
		oexSHOW( (long)screen_queue );
		oexSHOW( (long)image_queue );
		oexSHOW( (long)image_filter );
		oexSHOW( (long)image_sink );

		oexEcho("Couldn't create pipeline elements");
		return -1;
	}

	/* Set image sink to emit handoff-signal before throwing away
	 * it's buffer */
	g_object_set(G_OBJECT(image_sink),
			"signal-handoffs", TRUE, NULL);

	/* Add elements to the pipeline. This has to be done prior to
	 * linking them */
	gst_bin_add_many(GST_BIN(pipeline), camera_src, csp_filter,
			tee, screen_queue, screen_sink, image_queue,
			image_filter, image_sink, NULL);

	/* Specify what kind of video is wanted from the camera */
	caps = gst_caps_new_simple("video/x-raw-rgb",
			"width", G_TYPE_INT, 640,
			"height", G_TYPE_INT, 480,
			NULL);


oexM();

	/* Link the camera source and colorspace filter using capabilities
	 * specified */
	if(!gst_element_link_filtered(camera_src, csp_filter, caps))
	{
		oexEcho( "gst_element_link_filtered() failed" );
		return -1;
	}
	gst_caps_unref(caps);

	/* Connect Colorspace Filter -> Tee -> Screen Queue -> Screen Sink
	 * This finalizes the initialization of the screen-part of the pipeline */
	if(!gst_element_link_many(csp_filter, tee, screen_queue, screen_sink, NULL))
	{
		oexEcho( "gst_element_link_many() failed" );
		return -1;
	}

	/* gdkpixbuf requires 8 bits per sample which is 24 bits per
	 * pixel */
	caps = gst_caps_new_simple("video/x-raw-rgb",
			"width", G_TYPE_INT, 640,
			"height", G_TYPE_INT, 480,
			"bpp", G_TYPE_INT, 24,
			"depth", G_TYPE_INT, 24,
			"framerate", GST_TYPE_FRACTION, 15, 1,
			NULL);

oexM();

	/* Link the image-branch of the pipeline. The pipeline is
	 * ready after this */
	if(!gst_element_link_many(tee, image_queue, image_filter, NULL))
	{	oexEcho( "gst_element_link_many() failed" );
		return -1;
	}

	if(!gst_element_link_filtered(image_filter, image_sink, caps))
	{	oexEcho( "gst_element_link_filtered() failed" );
		return -1;
	}

	gst_caps_unref(caps);

	/* As soon as screen is exposed, window ID will be advised to the sink */
//	g_signal_connect(appdata->screen, "expose-event", G_CALLBACK(expose_cb),
//			 screen_sink);

oexM();

	gst_element_set_state(pipeline, GST_STATE_PLAYING);

oexM();

	{ // Take snap shot

		GstElement *image_sink;

		/* Get the image sink element from the pipeline */
		image_sink = gst_bin_get_by_name(GST_BIN(pipeline),
				"image_sink");

		if ( !image_sink )
		{	oexEcho( "image_sink is null" );
			return -1;
		}


		/* Display a note to the user */
//		hildon_banner_show_information(GTK_WIDGET(appdata->window),
	//		NULL, "Taking Photo");

		/* Connect the "handoff"-signal of the image sink to the
		 * callback. This gets called whenever the sink gets a
		 * buffer it's ready to pass forward on the pipeline */
//		appdata->buffer_cb_id = g_signal_connect(
//				G_OBJECT(image_sink), "handoff",
//				G_CALLBACK(buffer_probe_callback), appdata);
	}

	return 0;
}
Esempio n. 2
0
int CFfContainer::DecodeFrame( int stream, int fmt, sqbind::CSqBinary *dat, sqbind::CSqMulti *m, int flip )
{_STT();

	// Read a frame from the packet
	int res = -1;

	do
	{
		// Read frames from input stream
		res = ReadFrame( oexNULL, m );
		if ( 0 > res )
			return res;

	} while ( res != stream );

	// Waiting key frame?
	if ( !m_bKeyRxd )
	{	if ( 0 == ( m_pkt.flags & AV_PKT_FLAG_KEY ) )
			return -1;
		m_bKeyRxd = 1;
	} // end if

	// Video only atm
	if ( !dat || stream != m_nVideoStream || !m_pCodecContext )
		return -1;

	if ( !m_pFrame )
		m_pFrame = avcodec_alloc_frame();
	if ( !m_pFrame )
		return -1;

	int gpp = 0, used = 0;

#if defined( FFSQ_VIDEO2 )

	used = avcodec_decode_video2( m_pCodecContext, m_pFrame, &gpp, &m_pkt );
	if ( 0 >= used )
	{	oexSHOW( used );
		return -1;
	} // end if

#else

	used = avcodec_decode_video( m_pCodecContext, m_pFrame, &gpp, m_pkt.data, m_pkt.size );
	if ( 0 >= used )
	{	oexSHOW( used );
		return -1;
	} // end if

#endif

	if ( !gpp )
		return -1;

	// Save flags
	m_nLastFrameFlags = m_pkt.flags;
	m_nLastFrameEncodedSize = m_pkt.size;

	// Is it already the right format?
	if ( fmt == (int)m_pCodecContext->pix_fmt )
	{	int nSize = CFfConvert::CalcImageSize( fmt, m_pCodecContext->width, m_pCodecContext->height );
		dat->setBuffer( (sqbind::CSqBinary::t_byte*)m_pFrame->data[ 0 ], nSize, 0, 0 );
		m_nFrames++;
		return m_pkt.stream_index;
	} // end if

	// Do colorspace conversion
	if ( !CFfConvert::ConvertColorFB( m_pFrame, m_pCodecContext->pix_fmt,
									  m_pCodecContext->width, m_pCodecContext->height,
									  fmt, dat, SWS_FAST_BILINEAR, flip ) )
		return -1;

	// Frame
	m_nFrames++;

	return m_pkt.stream_index;
}
Esempio n. 3
0
int CFfContainer::DecodeFrameBin( sqbind::CSqBinary *in, int fmt, sqbind::CSqBinary *out, sqbind::CSqMulti *m, int flip )
{_STT();

	AVPacket pkt; oexZero( pkt );
	pkt.data = (uint8_t*)in->Ptr();
	pkt.size = in->getUsed();

	if ( !m_pFrame )
		m_pFrame = avcodec_alloc_frame();
	if ( !m_pFrame )
		return -1;

	int gpp = 0, used = 0;

#if defined( FFSQ_VIDEO2 )

	if ( m )
	{
		pkt.flags 					= (*m)[ oexT( "flags" ) ].toint();
		pkt.stream_index 			= (*m)[ oexT( "stream_index" ) ].toint();
		pkt.pos 					= (*m)[ oexT( "pos" ) ].toint64();
		pkt.dts 					= (*m)[ oexT( "dts" ) ].toint64();
		pkt.pts 					= (*m)[ oexT( "pts" ) ].toint64();
		pkt.duration 				= (*m)[ oexT( "duration" ) ].toint();
		pkt.convergence_duration 	= (*m)[ oexT( "convergence_duration" ) ].toint64();

	} // end if

	used = avcodec_decode_video2( m_pCodecContext, m_pFrame, &gpp, &pkt );
	if ( 0 >= used )
	{	oexSHOW( used );
		return -1;
	} // end if

#else

	used = avcodec_decode_video( m_pCodecContext, m_pFrame, &gpp, pkt.data, pkt.size );
	if ( 0 >= used )
	{	oexSHOW( used );
		return -1;
	} // end if

#endif

	if ( !gpp )
		return -1;

	// Save flags
	m_nLastFrameFlags = m_pkt.flags;
	m_nLastFrameEncodedSize = m_pkt.size;

	// Is it already the right format?
	if ( fmt == (int)m_pCodecContext->pix_fmt )
	{	int nSize = CFfConvert::CalcImageSize( fmt, m_pCodecContext->width, m_pCodecContext->height );
		out->setBuffer( (sqbind::CSqBinary::t_byte*)m_pFrame->data[ 0 ], nSize, 0, 0 );
		m_nFrames++;
		return m_pkt.stream_index;
	} // end if

	// Do colorspace conversion
	if ( !CFfConvert::ConvertColorFB( m_pFrame, m_pCodecContext->pix_fmt,
									  m_pCodecContext->width, m_pCodecContext->height,
									  fmt, out, SWS_FAST_BILINEAR, flip ) )
		return -1;

	// Frame
	m_nFrames++;

	return 1;
}
Esempio n. 4
0
int CFfAudioDecoder::BufferData( sqbind::CSqBinary *in, sqbind::CSqMulti *m )
{
	// Init packet
	oexZero( m_pkt );
//	av_init_packet( &m_pkt );

	// Init other packet data
	if ( m )
	{
		if ( m->isset( oexT( "flags" ) ) )
			m_pkt.flags = (*m)[ oexT( "flags" ) ].toint();
		if ( m->isset( oexT( "stream_index" ) ) )
			m_pkt.stream_index = (*m)[ oexT( "stream_index" ) ].toint();
		if ( m->isset( oexT( "pos" ) ) )
			m_pkt.pos = (*m)[ oexT( "pos" ) ].toint64();
		if ( m->isset( oexT( "dts" ) ) )
			m_pkt.dts = (*m)[ oexT( "dts" ) ].toint64();
		if ( m->isset( oexT( "pts" ) ) )
			m_pkt.pts = (*m)[ oexT( "pts" ) ].toint64();
		if ( m->isset( oexT( "duration" ) ) )
			m_pkt.duration = (*m)[ oexT( "duration" ) ].toint();
		if ( m->isset( oexT( "convergence_duration" ) ) )
			m_pkt.duration = (*m)[ oexT( "convergence_duration" ) ].toint();

	} // end if

	// Are we adding data?
	if ( in && in->getUsed() )
	{
		// Ensure buffer size
		if ( ( m_tmp.Size() - m_tmp.getUsed() ) < (sqbind::CSqBinary::t_size)( in->getUsed() + FF_INPUT_BUFFER_PADDING_SIZE ) )
		{	oex::oexUINT uMin = 2 * ( m_tmp.Size() + in->getUsed() + FF_INPUT_BUFFER_PADDING_SIZE );
			if ( 32000 > uMin )
				uMin = 32000;
	        m_tmp.Allocate( uMin );
		} // end if

		// Add new data to buffer
		m_tmp.Append( in );

	} // end if

	// Is there a sync sequence?
	if ( m_sync.getUsed() )
	{
		oexSHOW( m_sync.getUsed() );

		const char *s = m_sync.Ptr();
		sqbind::CSqBinary::t_size ls = m_sync.getUsed();

		const char *p = m_tmp.Ptr();
		sqbind::CSqBinary::t_size lp = m_tmp.getUsed();

		// Look for the sync
		while ( lp > ls && oexMemCmp( p, s, ls ) )
			p++, lp--;

		// Shift out unsynced data
		if ( lp < m_tmp.getUsed() )
		{	oexSHOW( lp );
			oexSHOW( m_tmp.getUsed() );
			m_tmp.LShift( m_tmp.getUsed() - lp );
		} // end if

	} // end if

	// Get buffer pointers
	m_pkt.data = (uint8_t*)m_tmp._Ptr();
	m_pkt.size = m_tmp.getUsed();

	// Zero padding
	int nPadding = m_tmp.Size() - m_tmp.getUsed();
	if ( 0 < nPadding )
	{
		// Don't zero more than twice the padding size
		if ( nPadding > ( FF_INPUT_BUFFER_PADDING_SIZE * 2 ) )
			nPadding = FF_INPUT_BUFFER_PADDING_SIZE * 2;

		// Set end to zero to ensure no overreading on damaged blocks
		oexZeroMemory( &m_pkt.data[ m_pkt.size ], nPadding );

	} // end if

	return m_pkt.size;
}