Exemple #1
0
static int producer_get_frame( mlt_producer parent, mlt_frame_ptr frame, int track )
{
	mlt_tractor self = parent->child;

	// We only respond to the first track requests
	if ( track == 0 && self->producer != NULL )
	{
		int i = 0;
		int done = 0;
		mlt_frame temp = NULL;
		int count = 0;
		int image_count = 0;

		// Get the properties of the parent producer
		mlt_properties properties = MLT_PRODUCER_PROPERTIES( parent );

		// Try to obtain the multitrack associated to the tractor
		mlt_multitrack multitrack = mlt_properties_get_data( properties, "multitrack", NULL );

		// Or a specific producer
		mlt_producer producer = mlt_properties_get_data( properties, "producer", NULL );

		// Determine whether this tractor feeds to the consumer or stops here
		int global_feed = mlt_properties_get_int( properties, "global_feed" );

		// If we don't have one, we're in trouble...
		if ( multitrack != NULL )
		{
			// The output frame will hold the 'global' data feeds (ie: those which are targetted for the final frame)
			mlt_deque data_queue = mlt_deque_init( );

			// Used to garbage collect all frames
			char label[ 30 ];

			// Get the id of the tractor
			char *id = mlt_properties_get( properties, "_unique_id" );

			// Will be used to store the frame properties object
			mlt_properties frame_properties = NULL;

			// We'll store audio and video frames to use here
			mlt_frame audio = NULL;
			mlt_frame video = NULL;
			mlt_frame first_video = NULL;

			// Temporary properties
			mlt_properties temp_properties = NULL;

			// Get the multitrack's producer
			mlt_producer target = MLT_MULTITRACK_PRODUCER( multitrack );
			mlt_producer_seek( target, mlt_producer_frame( parent ) );
			mlt_producer_set_speed( target, mlt_producer_get_speed( parent ) );

			// We will create one frame and attach everything to it
			*frame = mlt_frame_init( MLT_PRODUCER_SERVICE( parent ) );

			// Get the properties of the frame
			frame_properties = MLT_FRAME_PROPERTIES( *frame );

			// Loop through each of the tracks we're harvesting
			for ( i = 0; !done; i ++ )
			{
				// Get a frame from the producer
				mlt_service_get_frame( self->producer, &temp, i );

				// Get the temporary properties
				temp_properties = MLT_FRAME_PROPERTIES( temp );

				// Pass all unique meta properties from the producer's frame to the new frame
				mlt_properties_lock( temp_properties );
				int props_count = mlt_properties_count( temp_properties );
				int j;
				for ( j = 0; j < props_count; j ++ )
				{
					char *name = mlt_properties_get_name( temp_properties, j );
					if ( !strncmp( name, "meta.", 5 ) && !mlt_properties_get( frame_properties, name ) )
						mlt_properties_set( frame_properties, name, mlt_properties_get_value( temp_properties, j ) );
				}
				mlt_properties_unlock( temp_properties );

				// Copy the format conversion virtual functions
				if ( ! (*frame)->convert_image && temp->convert_image )
					(*frame)->convert_image = temp->convert_image;
				if ( ! (*frame)->convert_audio && temp->convert_audio )
					(*frame)->convert_audio = temp->convert_audio;

				// Check for last track
				done = mlt_properties_get_int( temp_properties, "last_track" );

				// Handle fx only tracks
				if ( mlt_properties_get_int( temp_properties, "fx_cut" ) )
				{
					int hide = ( video == NULL ? 1 : 0 ) | ( audio == NULL ? 2 : 0 );
					mlt_properties_set_int( temp_properties, "hide", hide );
				}

				// We store all frames with a destructor on the output frame
				sprintf( label, "_%s_%d", id, count ++ );
				mlt_properties_set_data( frame_properties, label, temp, 0, ( mlt_destructor )mlt_frame_close, NULL );

				// We want to append all 'final' feeds to the global queue
				if ( !done && mlt_properties_get_data( temp_properties, "data_queue", NULL ) != NULL )
				{
					// Move the contents of this queue on to the output frames data queue
					mlt_deque sub_queue = mlt_properties_get_data( MLT_FRAME_PROPERTIES( temp ), "data_queue", NULL );
					mlt_deque temp = mlt_deque_init( );
					while ( global_feed && mlt_deque_count( sub_queue ) )
					{
						mlt_properties p = mlt_deque_pop_back( sub_queue );
						if ( mlt_properties_get_int( p, "final" ) )
							mlt_deque_push_back( data_queue, p );
						else
							mlt_deque_push_back( temp, p );
					}
					while( mlt_deque_count( temp ) )
						mlt_deque_push_front( sub_queue, mlt_deque_pop_back( temp ) );
					mlt_deque_close( temp );
				}

				// Now do the same with the global queue but without the conditional behaviour
				if ( mlt_properties_get_data( temp_properties, "global_queue", NULL ) != NULL )
				{
					mlt_deque sub_queue = mlt_properties_get_data( MLT_FRAME_PROPERTIES( temp ), "global_queue", NULL );
					while ( mlt_deque_count( sub_queue ) )
					{
						mlt_properties p = mlt_deque_pop_back( sub_queue );
						mlt_deque_push_back( data_queue, p );
					}
				}

				// Pick up first video and audio frames
				if ( !done && !mlt_frame_is_test_audio( temp ) && !( mlt_properties_get_int( temp_properties, "hide" ) & 2 ) )
				{
					// Order of frame creation is starting to get problematic
					if ( audio != NULL )
					{
						mlt_deque_push_front( MLT_FRAME_AUDIO_STACK( temp ), producer_get_audio );
						mlt_deque_push_front( MLT_FRAME_AUDIO_STACK( temp ), audio );
					}
					audio = temp;
				}
				if ( !done && !mlt_frame_is_test_card( temp ) && !( mlt_properties_get_int( temp_properties, "hide" ) & 1 ) )
				{
					if ( video != NULL )
					{
						mlt_deque_push_front( MLT_FRAME_IMAGE_STACK( temp ), producer_get_image );
						mlt_deque_push_front( MLT_FRAME_IMAGE_STACK( temp ), video );
					}
					video = temp;
					if ( first_video == NULL )
						first_video = temp;

					mlt_properties_set_int( MLT_FRAME_PROPERTIES( temp ), "image_count", ++ image_count );
					image_count = 1;
				}
			}

			// Now stack callbacks
			if ( audio != NULL )
			{
				mlt_frame_push_audio( *frame, audio );
				mlt_frame_push_audio( *frame, producer_get_audio );
			}

			if ( video != NULL )
			{
				mlt_properties video_properties = MLT_FRAME_PROPERTIES( first_video );
				mlt_frame_push_service( *frame, video );
				mlt_frame_push_service( *frame, producer_get_image );
				if ( global_feed )
					mlt_properties_set_data( frame_properties, "data_queue", data_queue, 0, NULL, NULL );
				mlt_properties_set_data( video_properties, "global_queue", data_queue, 0, destroy_data_queue, NULL );
				mlt_properties_set_int( frame_properties, "width", mlt_properties_get_int( video_properties, "width" ) );
				mlt_properties_set_int( frame_properties, "height", mlt_properties_get_int( video_properties, "height" ) );
				mlt_properties_pass_list( frame_properties, video_properties, "meta.media.width, meta.media.height" );
				mlt_properties_set_int( frame_properties, "progressive", mlt_properties_get_int( video_properties, "progressive" ) );
				mlt_properties_set_double( frame_properties, "aspect_ratio", mlt_properties_get_double( video_properties, "aspect_ratio" ) );
				mlt_properties_set_int( frame_properties, "image_count", image_count );
				mlt_properties_set_data( frame_properties, "_producer", mlt_frame_get_original_producer( first_video ), 0, NULL, NULL );
			}
			else
			{
				destroy_data_queue( data_queue );
			}

			mlt_frame_set_position( *frame, mlt_producer_frame( parent ) );
			mlt_properties_set_int( MLT_FRAME_PROPERTIES( *frame ), "test_audio", audio == NULL );
			mlt_properties_set_int( MLT_FRAME_PROPERTIES( *frame ), "test_image", video == NULL );
		}
		else if ( producer != NULL )
		{
			mlt_producer_seek( producer, mlt_producer_frame( parent ) );
			mlt_producer_set_speed( producer, mlt_producer_get_speed( parent ) );
			mlt_service_get_frame( self->producer, frame, track );
		}
		else
		{
			mlt_log( MLT_PRODUCER_SERVICE( parent ), MLT_LOG_ERROR, "tractor without a multitrack!!\n" );
			mlt_service_get_frame( self->producer, frame, track );
		}

		// Prepare the next frame
		mlt_producer_prepare_next( parent );

		// Indicate our found status
		return 0;
	}
	else
	{
		// Generate a test card
		*frame = mlt_frame_init( MLT_PRODUCER_SERVICE( parent ) );
		return 0;
	}
}
	HRESULT render( mlt_frame frame )
	{
		HRESULT result = S_OK;
		// Get the audio		
		double speed = mlt_properties_get_double( MLT_FRAME_PROPERTIES(frame), "_speed" );
		if ( speed == 1.0 )
		{
			mlt_audio_format format = mlt_audio_s16;
			int frequency = bmdAudioSampleRate48kHz;
			int samples = mlt_sample_calculator( m_fps, frequency, m_count );
			int16_t *pcm = 0;
			
			if ( !mlt_frame_get_audio( frame, (void**) &pcm, &format, &frequency, &m_channels, &samples ) )
			{
				int count = samples;
				
				if ( !m_isPrerolling )
				{
					uint32_t audioCount = 0;
					uint32_t videoCount = 0;
					
					// Check for resync
					m_deckLinkOutput->GetBufferedAudioSampleFrameCount( &audioCount );
					m_deckLinkOutput->GetBufferedVideoFrameCount( &videoCount );
					
					// Underflow typically occurs during non-normal speed playback.
					if ( audioCount < 1 || videoCount < 1 )
					{
						// Upon switching to normal playback, buffer some frames faster than realtime.
						mlt_log_info( &m_consumer, "buffer underrun: audio buf %u video buf %u frames\n", audioCount, videoCount );
						m_prerollCounter = 0;
					}
					
					// While rebuffering
					if ( isBuffering() )
					{
						// Only append audio to reach the ideal level and not overbuffer.
						int ideal = ( m_preroll - 1 ) * bmdAudioSampleRate48kHz / m_fps;
						int actual = m_fifo->used / m_channels + audioCount;
						int diff = ideal / 2 - actual;
						count = diff < 0 ? 0 : diff < count ? diff : count;
					}
				}
				if ( count > 0 )
					sample_fifo_append( m_fifo, pcm, count * m_channels );
			}
		}
		
		// Create video frames while pre-rolling
		if ( m_isPrerolling )
		{
			createFrame();
			if ( !m_videoFrame )
			{
				mlt_log_error( &m_consumer, "failed to create video frame\n" );
				return S_FALSE;
			}
		}
		
		// Get the video
		if ( mlt_properties_get_int( MLT_FRAME_PROPERTIES( frame ), "rendered") )
		{
			mlt_image_format format = mlt_image_yuv422;
			uint8_t* image = 0;
			uint8_t* buffer = 0;

			if ( !mlt_frame_get_image( frame, &image, &format, &m_width, &m_height, 0 ) )
			{
				m_videoFrame = (IDeckLinkMutableVideoFrame*) mlt_deque_pop_back( m_videoFrameQ );
				m_videoFrame->GetBytes( (void**) &buffer );
				if ( m_displayMode->GetFieldDominance() == bmdUpperFieldFirst )
					// convert lower field first to top field first
					swab( image, buffer + m_width * 2, m_width * ( m_height - 1 ) * 2 );
				else
					swab( image, buffer, m_width * m_height * 2 );
				m_deckLinkOutput->ScheduleVideoFrame( m_videoFrame, m_count * m_duration, m_duration, m_timescale );
				mlt_deque_push_front( m_videoFrameQ, m_videoFrame );
			}
		}
		else
		{
			mlt_log_verbose( &m_consumer, "dropped video frame\n" );
		}
		++m_count;

		// Check for end of pre-roll
		if ( ++m_prerollCounter > m_preroll && m_isPrerolling )
		{
			// Start audio and video output
			m_deckLinkOutput->EndAudioPreroll();
			m_deckLinkOutput->StartScheduledPlayback( 0, m_timescale, 1.0 );
			m_isPrerolling = false;
		}

		return result;
	}