示例#1
0
void process_queue( mlt_deque data_queue, mlt_frame frame, mlt_filter filter )
{
	if ( data_queue != NULL )
	{
		// Create a new queue for those that we can't handle
		mlt_deque temp_queue = mlt_deque_init( );

		// Iterate through each entry on the queue
		while ( mlt_deque_peek_front( data_queue ) != NULL )
		{
			// Get the data feed
			mlt_properties feed = mlt_deque_pop_front( data_queue );

			if ( mlt_properties_get( MLT_FILTER_PROPERTIES( filter ), "debug" ) != NULL )
				mlt_properties_debug( feed, mlt_properties_get( MLT_FILTER_PROPERTIES( filter ), "debug" ), stderr );

			// Process the data feed...
			if ( process_feed( feed, filter, frame ) == 0 )
				mlt_properties_close( feed );
			else
				mlt_deque_push_back( temp_queue, feed );
		}
	
		// Now put the unprocessed feeds back on the stack
		while ( mlt_deque_peek_front( temp_queue ) )
		{
			// Get the data feed
			mlt_properties feed = mlt_deque_pop_front( temp_queue );
	
			// Put it back on the data queue
			mlt_deque_push_back( data_queue, feed );
		}
	
		// Close the temporary queue
		mlt_deque_close( temp_queue );
	}
}
示例#2
0
文件: vdpau.c 项目: aib/mlt
static int vdpau_get_buffer( AVCodecContext *codec_context, AVFrame *frame )
{
	int error = 0;
	producer_avformat self = codec_context->opaque;
	mlt_log_debug( MLT_PRODUCER_SERVICE(self->parent), "vdpau_get_buffer\n" );
	
	if ( self->vdpau && mlt_deque_count( self->vdpau->deque ) )
	{
		struct vdpau_render_state *render = mlt_deque_pop_front( self->vdpau->deque );
		
		if ( render )
		{
			frame->data[0] = (uint8_t*) render;
			frame->data[1] = (uint8_t*) render;
			frame->data[2] = (uint8_t*) render;
			frame->linesize[0] = 0;
			frame->linesize[1] = 0;
			frame->linesize[2] = 0;
			frame->type = FF_BUFFER_TYPE_USER;
			render->state = FF_VDPAU_STATE_USED_FOR_REFERENCE;
			frame->reordered_opaque = codec_context->reordered_opaque;
			if ( frame->reference )
			{
				self->vdpau->ip_age[0] = self->vdpau->ip_age[1] + 1;
				self->vdpau->ip_age[1] = 1;
				self->vdpau->b_age++;
			}
			else
			{
				self->vdpau->ip_age[0] ++;
				self->vdpau->ip_age[1] ++;
				self->vdpau->b_age = 1;
			}
		}
		else
		{
			mlt_log_warning( MLT_PRODUCER_SERVICE(self->parent), "VDPAU surface underrun\n" );
			error = -1;
		}
	}
	else
	{
		mlt_log_warning( MLT_PRODUCER_SERVICE(self->parent), "VDPAU surface underrun\n" );
		error = -1;
	}
	
	return error;
}
示例#3
0
mlt_frame mlt_consumer_rt_frame( mlt_consumer self )
{
	// Frame to return
	mlt_frame frame = NULL;

	// Get the properties
	mlt_properties properties = MLT_CONSUMER_PROPERTIES( self );

	// Check if the user has requested real time or not
	if ( self->real_time > 1 || self->real_time < -1 )
	{
		// see above
		return worker_get_frame( self, properties );
	}
	else if ( self->real_time == 1 || self->real_time == -1 )
	{
		int size = 1;

		// Is the read ahead running?
		if ( self->ahead == 0 )
		{
			int buffer = mlt_properties_get_int( properties, "buffer" );
			int prefill = mlt_properties_get_int( properties, "prefill" );
			consumer_read_ahead_start( self );
			if ( buffer > 1 )
				size = prefill > 0 && prefill < buffer ? prefill : buffer;
		}

		// Get frame from queue
		pthread_mutex_lock( &self->queue_mutex );
		while( self->ahead && mlt_deque_count( self->queue ) < size )
			pthread_cond_wait( &self->queue_cond, &self->queue_mutex );
		frame = mlt_deque_pop_front( self->queue );
		pthread_cond_broadcast( &self->queue_cond );
		pthread_mutex_unlock( &self->queue_mutex );
	}
	else // real_time == 0
	{
		// Get the frame in non real time
		frame = mlt_consumer_get_frame( self );

		// This isn't true, but from the consumers perspective it is
		if ( frame != NULL )
			mlt_properties_set_int( MLT_FRAME_PROPERTIES( frame ), "rendered", 1 );
	}

	return frame;
}
示例#4
0
static void *video_thread( void *arg )
{
	// Identify the arg
	consumer_sdl self = arg;

	// Obtain time of thread start
	struct timeval now;
	int64_t start = 0;
	int64_t elapsed = 0;
	struct timespec tm;
	mlt_frame next = NULL;
	mlt_properties properties = NULL;
	double speed = 0;

	// Get real time flag
	int real_time = mlt_properties_get_int( self->properties, "real_time" );

#if !defined(__APPLE__) && !defined(_WIN32)
	if ( setup_sdl_video(self) )
		self->running = 0;
#endif

	// Determine start time
	gettimeofday( &now, NULL );
	start = ( int64_t )now.tv_sec * 1000000 + now.tv_usec;

	while ( self->running )
	{
		// Pop the next frame
		pthread_mutex_lock( &self->video_mutex );
		next = mlt_deque_pop_front( self->queue );
		while ( next == NULL && self->running )
		{
			pthread_cond_wait( &self->video_cond, &self->video_mutex );
			next = mlt_deque_pop_front( self->queue );
		}
		pthread_mutex_unlock( &self->video_mutex );

		if ( !self->running || next == NULL ) break;

		// Get the properties
		properties =  MLT_FRAME_PROPERTIES( next );

		// Get the speed of the frame
		speed = mlt_properties_get_double( properties, "_speed" );

		// Get the current time
		gettimeofday( &now, NULL );

		// Get the elapsed time
		elapsed = ( ( int64_t )now.tv_sec * 1000000 + now.tv_usec ) - start;

		// See if we have to delay the display of the current frame
		if ( mlt_properties_get_int( properties, "rendered" ) == 1 && self->running )
		{
			// Obtain the scheduled playout time
			int64_t scheduled = mlt_properties_get_int( properties, "playtime" );

			// Determine the difference between the elapsed time and the scheduled playout time
			int64_t difference = scheduled - elapsed;

			// Smooth playback a bit
			if ( real_time && ( difference > 20000 && speed == 1.0 ) )
			{
				tm.tv_sec = difference / 1000000;
				tm.tv_nsec = ( difference % 1000000 ) * 500;
				nanosleep( &tm, NULL );
			}

			// Show current frame if not too old
			if ( !real_time || ( difference > -10000 || speed != 1.0 || mlt_deque_count( self->queue ) < 2 ) )
				consumer_play_video( self, next );

			// If the queue is empty, recalculate start to allow build up again
			if ( real_time && ( mlt_deque_count( self->queue ) == 0 && speed == 1.0 ) )
			{
				gettimeofday( &now, NULL );
				start = ( ( int64_t )now.tv_sec * 1000000 + now.tv_usec ) - scheduled + 20000;
			}
		}
		else
		{
			static int dropped = 0;
			mlt_log_info( MLT_CONSUMER_SERVICE(&self->parent), "dropped video frame %d\n", ++dropped );
		}

		// This frame can now be closed
		mlt_frame_close( next );
		next = NULL;
	}

	if ( next != NULL )
		mlt_frame_close( next );

	mlt_consumer_stopped( &self->parent );

	return NULL;
}
示例#5
0
文件: vdpau.c 项目: aib/mlt
static int vdpau_decoder_init( producer_avformat self )
{
	mlt_log_debug( MLT_PRODUCER_SERVICE(self->parent), "vdpau_decoder_init\n" );
	int success = 1;
	
	self->video_codec->opaque = self;
	self->video_codec->get_format = vdpau_get_format;
	self->video_codec->get_buffer = vdpau_get_buffer;
	self->video_codec->release_buffer = vdpau_release_buffer;
	self->video_codec->draw_horiz_band = vdpau_draw_horiz;
	self->video_codec->slice_flags = SLICE_FLAG_CODED_ORDER | SLICE_FLAG_ALLOW_FIELD;
	self->video_codec->pix_fmt = PIX_FMT_VDPAU_H264;
	
	VdpDecoderProfile profile = VDP_DECODER_PROFILE_H264_HIGH;
	uint32_t max_references = self->video_codec->refs;
	pthread_mutex_lock( &mlt_sdl_mutex );
	VdpStatus status = vdp_decoder_create( self->vdpau->device,
		profile, self->video_codec->width, self->video_codec->height, max_references, &self->vdpau->decoder );
	pthread_mutex_unlock( &mlt_sdl_mutex );
	
	if ( status == VDP_STATUS_OK )
	{
			int i, n = FFMIN( self->video_codec->refs + 2, MAX_VDPAU_SURFACES );

			self->vdpau->deque = mlt_deque_init();
			for ( i = 0; i < n; i++ )
			{
				if ( VDP_STATUS_OK == vdp_surface_create( self->vdpau->device, VDP_CHROMA_TYPE_420,
					self->video_codec->width, self->video_codec->height, &self->vdpau->render_states[i].surface ) )
				{
					mlt_log_debug( MLT_PRODUCER_SERVICE(self->parent), "successfully created VDPAU surface %x\n",
						self->vdpau->render_states[i].surface );
					mlt_deque_push_back( self->vdpau->deque, &self->vdpau->render_states[i] );
				}
				else
				{
					mlt_log_info( MLT_PRODUCER_SERVICE(self->parent), "failed to create VDPAU surface %dx%d\n",
						self->video_codec->width, self->video_codec->height );
					while ( mlt_deque_count( self->vdpau->deque ) )
					{
						struct vdpau_render_state *render = mlt_deque_pop_front( self->vdpau->deque );
						vdp_surface_destroy( render->surface );
					}
					mlt_deque_close( self->vdpau->deque );
					success = 0;
					break;
				}
			}
			if ( self->vdpau )
				self->vdpau->b_age = self->vdpau->ip_age[0] = self->vdpau->ip_age[1] = 256*256*256*64; // magic from Avidemux
	}
	else
	{
		success = 0;
		self->vdpau->decoder = VDP_INVALID_HANDLE;
		mlt_log_error( MLT_PRODUCER_SERVICE(self->parent), "VDPAU failed to initialize decoder (%s)\n",
			vdp_get_error_string( status ) );
	}
	
	return success;
}
示例#6
0
static mlt_frame worker_get_frame( mlt_consumer self, mlt_properties properties )
{
	// Frame to return
	mlt_frame frame = NULL;

	double fps = mlt_properties_get_double( properties, "fps" );
	int threads = abs( self->real_time );
	int buffer = mlt_properties_get_int( properties, "_buffer" );
	buffer = buffer > 0 ? buffer : mlt_properties_get_int( properties, "buffer" );
	// This is a heuristic to determine a suitable minimum buffer size for the number of threads.
	int headroom = 2 + threads * threads;
	buffer = buffer < headroom ? headroom : buffer;

	// Start worker threads if not already started.
	if ( ! self->ahead )
	{
		int prefill = mlt_properties_get_int( properties, "prefill" );
		prefill = prefill > 0 && prefill < buffer ? prefill : buffer;

		consumer_work_start( self );

		// Fill the work queue.
		int i = buffer;
		while ( self->ahead && i-- )
		{
			frame = mlt_consumer_get_frame( self );
			if ( frame )
			{
				pthread_mutex_lock( &self->queue_mutex );
				mlt_deque_push_back( self->queue, frame );
				pthread_cond_signal( &self->queue_cond );
				pthread_mutex_unlock( &self->queue_mutex );
			}
		}

		// Wait for prefill
		while ( self->ahead && first_unprocessed_frame( self ) < prefill )
		{
			pthread_mutex_lock( &self->done_mutex );
			pthread_cond_wait( &self->done_cond, &self->done_mutex );
			pthread_mutex_unlock( &self->done_mutex );
		}
		self->process_head = threads;
	}

//	mlt_log_verbose( MLT_CONSUMER_SERVICE(self), "size %d done count %d work count %d process_head %d\n",
//		threads, first_unprocessed_frame( self ), mlt_deque_count( self->queue ), self->process_head );

	// Feed the work queue
	while ( self->ahead && mlt_deque_count( self->queue ) < buffer )
	{
		frame = mlt_consumer_get_frame( self );
		if ( ! frame )
			return frame;
		pthread_mutex_lock( &self->queue_mutex );
		mlt_deque_push_back( self->queue, frame );
		pthread_cond_signal( &self->queue_cond );
		pthread_mutex_unlock( &self->queue_mutex );
	}

	// Wait if not realtime.
	mlt_frame head_frame = MLT_FRAME( mlt_deque_peek_front( self->queue ) );
	while ( self->ahead && self->real_time < 0 &&
		!( head_frame && mlt_properties_get_int( MLT_FRAME_PROPERTIES( head_frame ), "rendered" ) ) )
	{
		pthread_mutex_lock( &self->done_mutex );
		pthread_cond_wait( &self->done_cond, &self->done_mutex );
		pthread_mutex_unlock( &self->done_mutex );
	}
	
	// Get the frame from the queue.
	pthread_mutex_lock( &self->queue_mutex );
	frame = mlt_deque_pop_front( self->queue );
	pthread_mutex_unlock( &self->queue_mutex );

	// Adapt the worker process head to the runtime conditions.
	if ( self->real_time > 0 )
	{
		if ( frame && mlt_properties_get_int( MLT_FRAME_PROPERTIES( frame ), "rendered" ) )
		{
			self->consecutive_dropped = 0;
			if ( self->process_head > threads && self->consecutive_rendered >= self->process_head )
				self->process_head--;
			else
				self->consecutive_rendered++;
		}
		else
		{
			self->consecutive_rendered = 0;
			if ( self->process_head < buffer - threads && self->consecutive_dropped > threads )
				self->process_head++;
			else
				self->consecutive_dropped++;
		}
//		mlt_log_verbose( MLT_CONSUMER_SERVICE(self), "dropped %d rendered %d process_head %d\n",
//			self->consecutive_dropped, self->consecutive_rendered, self->process_head );

		// Check for too many consecutively dropped frames
		if ( self->consecutive_dropped > mlt_properties_get_int( properties, "drop_max" ) )
		{
			int orig_buffer = mlt_properties_get_int( properties, "buffer" );
			int prefill = mlt_properties_get_int( properties, "prefill" );
			mlt_log_verbose( self, "too many frames dropped - " );

			// If using a default low-latency buffer level (SDL) and below the limit
			if ( ( orig_buffer == 1 || prefill == 1 ) && buffer < (threads + 1) * 10 )
			{
				// Auto-scale the buffer to compensate
				mlt_log_verbose( self, "increasing buffer to %d\n", buffer + threads );
				mlt_properties_set_int( properties, "_buffer", buffer + threads );
				self->consecutive_dropped = fps / 2;
			}
			else
			{
				// Tell the consumer to render it
				mlt_log_verbose( self, "forcing next frame\n" );
				mlt_properties_set_int( MLT_FRAME_PROPERTIES( frame ), "rendered", 1 );
				self->consecutive_dropped = 0;
			}
		}
	}
	
	return frame;
}
示例#7
0
    mlt_frame getFrame()
    {
        struct timeval now;
        struct timespec tm;
        double fps = mlt_producer_get_fps( getProducer() );
        mlt_position position = mlt_producer_position( getProducer() );
        mlt_frame frame = mlt_cache_get_frame( m_cache, position );

        // Allow the buffer to fill to the requested initial buffer level.
        if ( m_isBuffering )
        {
            int prefill = mlt_properties_get_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "prefill" );
            int buffer = mlt_properties_get_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "buffer" );

            m_isBuffering = false;
            prefill = prefill > buffer ? buffer : prefill;
            pthread_mutex_lock( &m_mutex );
            while ( mlt_deque_count( m_queue ) < prefill )
            {
                // Wait up to buffer/fps seconds
                gettimeofday( &now, NULL );
                long usec = now.tv_sec * 1000000 + now.tv_usec;
                usec += 1000000 * buffer / fps;
                tm.tv_sec = usec / 1000000;
                tm.tv_nsec = (usec % 1000000) * 1000;
                if ( pthread_cond_timedwait( &m_condition, &m_mutex, &tm ) )
                    break;
            }
            pthread_mutex_unlock( &m_mutex );
        }

        if ( !frame )
        {
            // Wait if queue is empty
            pthread_mutex_lock( &m_mutex );
            while ( mlt_deque_count( m_queue ) < 1 )
            {
                // Wait up to twice frame duration
                gettimeofday( &now, NULL );
                long usec = now.tv_sec * 1000000 + now.tv_usec;
                usec += 2000000 / fps;
                tm.tv_sec = usec / 1000000;
                tm.tv_nsec = (usec % 1000000) * 1000;
                if ( pthread_cond_timedwait( &m_condition, &m_mutex, &tm ) )
                    // Stop waiting if error (timed out)
                    break;
            }
            frame = ( mlt_frame ) mlt_deque_pop_front( m_queue );
            pthread_mutex_unlock( &m_mutex );

            // add to cache
            if ( frame )
            {
                mlt_frame_set_position( frame, position );
                mlt_cache_put_frame( m_cache, frame );
            }
        }

        // Set frame timestamp and properties
        if ( frame )
        {
            mlt_profile profile = mlt_service_profile( MLT_PRODUCER_SERVICE( getProducer() ) );
            mlt_properties properties = MLT_FRAME_PROPERTIES( frame );
            mlt_properties_set_int( properties, "progressive", profile->progressive );
            mlt_properties_set_int( properties, "meta.media.progressive", profile->progressive );
            mlt_properties_set_int( properties, "top_field_first", m_topFieldFirst );
            mlt_properties_set_double( properties, "aspect_ratio", mlt_profile_sar( profile ) );
            mlt_properties_set_int( properties, "meta.media.sample_aspect_num", profile->sample_aspect_num );
            mlt_properties_set_int( properties, "meta.media.sample_aspect_den", profile->sample_aspect_den );
            mlt_properties_set_int( properties, "meta.media.frame_rate_num", profile->frame_rate_num );
            mlt_properties_set_int( properties, "meta.media.frame_rate_den", profile->frame_rate_den );
            mlt_properties_set_int( properties, "width", profile->width );
            mlt_properties_set_int( properties, "meta.media.width", profile->width );
            mlt_properties_set_int( properties, "height", profile->height );
            mlt_properties_set_int( properties, "meta.media.height", profile->height );
            mlt_properties_set_int( properties, "format", mlt_image_yuv422 );
            mlt_properties_set_int( properties, "colorspace", m_colorspace );
            mlt_properties_set_int( properties, "meta.media.colorspace", m_colorspace );
            mlt_properties_set_int( properties, "audio_frequency", 48000 );
            mlt_properties_set_int( properties, "audio_channels",
                                    mlt_properties_get_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "channels" ) );
        }
        else
            mlt_log_warning( getProducer(), "buffer underrun\n" );

        return frame;
    }
示例#8
0
static void *video_thread( void *arg )
{
	// Identify the arg
	consumer_sdl self = arg;

	// Obtain time of thread start
	struct timeval now;
	int64_t start = 0;
	int64_t elapsed = 0;
	struct timespec tm;
	mlt_frame next = NULL;
	mlt_properties properties = NULL;
	double speed = 0;

	// Get real time flag
	int real_time = mlt_properties_get_int( self->properties, "real_time" );

	// Get the current time
	gettimeofday( &now, NULL );

	// Determine start time
	start = ( int64_t )now.tv_sec * 1000000 + now.tv_usec;

	while ( self->running )
	{
		// Pop the next frame
		pthread_mutex_lock( &self->video_mutex );
		next = mlt_deque_pop_front( self->queue );
		while ( next == NULL && self->running )
		{
			pthread_cond_wait( &self->video_cond, &self->video_mutex );
			next = mlt_deque_pop_front( self->queue );
		}
		pthread_mutex_unlock( &self->video_mutex );

		if ( !self->running || next == NULL ) break;

		// Get the properties
		properties =  MLT_FRAME_PROPERTIES( next );

		// Get the speed of the frame
		speed = mlt_properties_get_double( properties, "_speed" );

		// Get the current time
		gettimeofday( &now, NULL );

		// Get the elapsed time
		elapsed = ( ( int64_t )now.tv_sec * 1000000 + now.tv_usec ) - start;

		// See if we have to delay the display of the current frame
		if ( mlt_properties_get_int( properties, "rendered" ) == 1 )
		{
			// Obtain the scheduled playout time
			int64_t scheduled = mlt_properties_get_int( properties, "playtime" );

			// Determine the difference between the elapsed time and the scheduled playout time
			int64_t difference = scheduled - elapsed;

			// Smooth playback a bit
			if ( real_time && ( difference > 20000 && speed == 1.0 ) )
			{
				tm.tv_sec = difference / 1000000;
				tm.tv_nsec = ( difference % 1000000 ) * 500;
				nanosleep( &tm, NULL );
			}

			// Show current frame if not too old
			if ( !real_time || ( difference > -10000 || speed != 1.0 || mlt_deque_count( self->queue ) < 2 ) )
				consumer_play_video( self, next );

			// If the queue is empty, recalculate start to allow build up again
			if ( real_time && ( mlt_deque_count( self->queue ) == 0 && speed == 1.0 ) )
			{
				gettimeofday( &now, NULL );
				start = ( ( int64_t )now.tv_sec * 1000000 + now.tv_usec ) - scheduled + 20000;
			}
		}

		// This frame can now be closed
		mlt_frame_close( next );
		next = NULL;
	}

	// This consumer is stopping. But audio has already been played for all
	// the frames in the queue. Spit out all the frames so that the display has
	// the option to catch up with the audio.
	if ( next != NULL ) {
		consumer_play_video( self, next );
		mlt_frame_close( next );
		next = NULL;
	}
	while ( mlt_deque_count( self->queue ) > 0 ) {
		next = mlt_deque_pop_front( self->queue );
		consumer_play_video( self, next );
		mlt_frame_close( next );
		next = NULL;
	}

	mlt_consumer_stopped( &self->parent );

	return NULL;
}