Exemplo n.º 1
0
static int get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
{
	context cx = mlt_frame_pop_audio( frame );
	mlt_frame nested_frame = mlt_frame_pop_audio( frame );
	int result = 0;

	// if not repeating last frame
	if ( mlt_frame_get_position( nested_frame ) != cx->audio_position )
	{
		double fps = mlt_profile_fps( cx->profile );
		if ( mlt_producer_get_fps( cx->self ) < fps )
			fps = mlt_producer_get_fps( cx->self );
		*samples = mlt_sample_calculator( fps, *frequency, cx->audio_counter++ );
		result = mlt_frame_get_audio( nested_frame, buffer, format, frequency, channels, samples );
		int size = mlt_audio_format_size( *format, *samples, *channels );
		int16_t *new_buffer = mlt_pool_alloc( size );

		mlt_frame_set_audio( frame, new_buffer, *format, size, mlt_pool_release );
		memcpy( new_buffer, *buffer, size );
		*buffer = new_buffer;
		cx->audio_position = mlt_frame_get_position( nested_frame );
	}
	else
	{
		// otherwise return no samples
		*samples = 0;
		*buffer = NULL;
	}

	return result;
}
Exemplo n.º 2
0
	IDeckLinkDisplayMode* getDisplayMode()
	{
		mlt_profile profile = mlt_service_profile( MLT_CONSUMER_SERVICE( getConsumer() ) );
		IDeckLinkDisplayModeIterator* iter = NULL;
		IDeckLinkDisplayMode* mode = NULL;
		IDeckLinkDisplayMode* result = 0;

		if ( m_deckLinkOutput->GetDisplayModeIterator( &iter ) == S_OK )
		{
			while ( !result && iter->Next( &mode ) == S_OK )
			{
				m_width = mode->GetWidth();
				m_height = mode->GetHeight();
				mode->GetFrameRate( &m_duration, &m_timescale );
				m_fps = (double) m_timescale / m_duration;
				int p = mode->GetFieldDominance() == bmdProgressiveFrame;
				mlt_log_verbose( getConsumer(), "BMD mode %dx%d %.3f fps prog %d\n", m_width, m_height, m_fps, p );

				if ( m_width == profile->width && p == profile->progressive
					 && (int) m_fps == (int) mlt_profile_fps( profile )
					 && ( m_height == profile->height || ( m_height == 486 && profile->height == 480 ) ) )
					result = mode;
				else
					SAFE_RELEASE( mode );
			}
			SAFE_RELEASE( iter );
		}

		return result;
	}
Exemplo n.º 3
0
    BMDDisplayMode getDisplayMode( mlt_profile profile, int vancLines )
    {
        IDeckLinkDisplayModeIterator* iter = NULL;
        IDeckLinkDisplayMode* mode = NULL;
        BMDDisplayMode result = (BMDDisplayMode) bmdDisplayModeNotSupported;

        if ( m_decklinkInput->GetDisplayModeIterator( &iter ) == S_OK )
        {
            while ( !result && iter->Next( &mode ) == S_OK )
            {
                int width = mode->GetWidth();
                int height = mode->GetHeight();
                BMDTimeValue duration;
                BMDTimeScale timescale;
                mode->GetFrameRate( &duration, &timescale );
                double fps = (double) timescale / duration;
                int p = mode->GetFieldDominance() == bmdProgressiveFrame;
                m_topFieldFirst = mode->GetFieldDominance() == bmdUpperFieldFirst;
                m_colorspace = ( mode->GetFlags() & bmdDisplayModeColorspaceRec709 ) ? 709 : 601;
                mlt_log_verbose( getProducer(), "BMD mode %dx%d %.3f fps prog %d tff %d\n", width, height, fps, p, m_topFieldFirst );

                if ( width == profile->width && p == profile->progressive
                        && ( height + vancLines == profile->height || ( height == 486 && profile->height == 480 + vancLines ) )
                        && fps == mlt_profile_fps( profile ) )
                    result = mode->GetDisplayMode();
                SAFE_RELEASE( mode );
            }
            SAFE_RELEASE( iter );
        }

        return result;
    }
Exemplo n.º 4
0
static int transition_get_image( mlt_frame a_frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable ){
	
	mlt_frame b_frame = mlt_frame_pop_frame( a_frame );
	mlt_transition transition = mlt_frame_pop_service( a_frame );
	mlt_properties properties = MLT_TRANSITION_PROPERTIES( transition );
	mlt_properties a_props = MLT_FRAME_PROPERTIES( a_frame );
	mlt_properties b_props = MLT_FRAME_PROPERTIES( b_frame );

	int invert = mlt_properties_get_int( properties, "invert" );

	uint8_t *images[]={NULL,NULL,NULL};

	*format = mlt_image_rgb24a;
	mlt_frame_get_image( a_frame, &images[0], format, width, height, 0 );
	mlt_frame_get_image( b_frame, &images[1], format, width, height, 0 );
	
	double position = mlt_transition_get_position( transition, a_frame );
	mlt_profile profile = mlt_service_profile( MLT_TRANSITION_SERVICE( transition ) );
	double time = position / mlt_profile_fps( profile );
	process_frei0r_item( MLT_TRANSITION_SERVICE(transition), position, time, properties, !invert ? a_frame : b_frame, images, width, height );
	
	*width = mlt_properties_get_int( !invert ? a_props : b_props, "width" );
        *height = mlt_properties_get_int( !invert ? a_props : b_props, "height" );
	*image = mlt_properties_get_data( !invert ? a_props : b_props , "image", NULL );
	return 0;
}
Exemplo n.º 5
0
static void analyze_audio( mlt_filter filter, void* buffer, int samples, int frequency )
{
	mlt_properties properties = MLT_FILTER_PROPERTIES( filter );
	private_data* pdata = (private_data*)filter->child;
	int result = -1;
	double in_loudness = 0.0;
	mlt_profile profile = mlt_service_profile( MLT_FILTER_SERVICE(filter) );
	double fps = mlt_profile_fps( profile );

	ebur128_add_frames_float( pdata->r128, buffer, samples );

	if( pdata->time_elapsed_ms < 400 )
	{
		// Waiting for first program loudness measurement.
		// Use window loudness as initial guess.
		result = ebur128_loudness_window( pdata->r128, pdata->time_elapsed_ms, &in_loudness );
		pdata->time_elapsed_ms += samples * 1000 / frequency;
	}
	else
	{
		result = ebur128_loudness_global( pdata->r128, &in_loudness );
	}

	if( result == EBUR128_SUCCESS && in_loudness != HUGE_VAL && in_loudness != -HUGE_VAL )
	{
		mlt_properties_set_double( properties, "in_loudness", in_loudness );
		double target_loudness = mlt_properties_get_double( properties, "target_loudness" );
		pdata->target_gain = target_loudness - in_loudness;

		// Make sure gain limits are not exceeded.
		double max_gain = mlt_properties_get_double( properties, "max_gain" );
		double min_gain = mlt_properties_get_double( properties, "min_gain" );
		if( pdata->target_gain > max_gain )
		{
			pdata->target_gain = max_gain;
		}
		else if ( pdata->target_gain < min_gain )
		{
			pdata->target_gain = min_gain;
		}
	}

	// Make sure gain does not change too quickly.
	pdata->start_gain = pdata->end_gain;
	pdata->end_gain = pdata->target_gain;
	double max_frame_gain = mlt_properties_get_double( properties, "max_rate" ) / fps;
	if( pdata->start_gain - pdata->end_gain > max_frame_gain )
	{
		pdata->end_gain = pdata->start_gain - max_frame_gain;
	}
	else if( pdata->end_gain - pdata->start_gain > max_frame_gain )
	{
		pdata->end_gain = pdata->start_gain + max_frame_gain;
	}
	mlt_properties_set_double( properties, "out_gain", pdata->end_gain );
}
Exemplo n.º 6
0
static void on_jack_seek( mlt_properties owner, mlt_filter filter, mlt_position *position )
{
	mlt_properties properties = MLT_FILTER_PROPERTIES( filter );
	mlt_log_verbose( MLT_FILTER_SERVICE(filter), "%s: %d\n", __FUNCTION__, *position );
	mlt_properties_set_int( properties, "_sync_guard", 1 );
	mlt_profile profile = mlt_service_profile( MLT_FILTER_SERVICE( filter ) );
	jack_client_t *jack_client = mlt_properties_get_data( properties, "jack_client", NULL );
	jack_nframes_t jack_frame = jack_get_sample_rate( jack_client );
	jack_frame *= *position / mlt_profile_fps( profile );
	jack_transport_locate( jack_client, jack_frame );
}
Exemplo n.º 7
0
static void apply_profile_properties( mlt_consumer self, mlt_profile profile, mlt_properties properties )
{
	mlt_event_block( self->event_listener );
	mlt_properties_set_double( properties, "fps", mlt_profile_fps( profile ) );
	mlt_properties_set_int( properties, "frame_rate_num", profile->frame_rate_num );
	mlt_properties_set_int( properties, "frame_rate_den", profile->frame_rate_den );
	mlt_properties_set_int( properties, "width", profile->width );
	mlt_properties_set_int( properties, "height", profile->height );
	mlt_properties_set_int( properties, "progressive", profile->progressive );
	mlt_properties_set_double( properties, "aspect_ratio", mlt_profile_sar( profile )  );
	mlt_properties_set_int( properties, "sample_aspect_num", profile->sample_aspect_num );
	mlt_properties_set_int( properties, "sample_aspect_den", profile->sample_aspect_den );
	mlt_properties_set_double( properties, "display_ratio", mlt_profile_dar( profile )  );
	mlt_properties_set_int( properties, "display_aspect_num", profile->display_aspect_num );
	mlt_properties_set_int( properties, "display_aspect_num", profile->display_aspect_num );
	mlt_properties_set_int( properties, "colorspace", profile->colorspace );
	mlt_event_unblock( self->event_listener );
}
Exemplo n.º 8
0
static int filter_get_image( mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{

	mlt_filter filter = mlt_frame_pop_service( frame );
	*format = mlt_image_rgb24a;
	mlt_log_debug( MLT_FILTER_SERVICE( filter ), "frei0r %dx%d\n", *width, *height );
	int error = mlt_frame_get_image( frame, image, format, width, height, 0 );

	if ( error == 0 && *image )
	{
		double position = mlt_filter_get_position( filter, frame );
		mlt_profile profile = mlt_service_profile( MLT_FILTER_SERVICE( filter ) );
		double time = position / mlt_profile_fps( profile );
		int length = mlt_filter_get_length2( filter, frame );
		process_frei0r_item( MLT_FILTER_SERVICE(filter), position, time, length, frame, image, width, height );
	}

	return error;
}
Exemplo n.º 9
0
static void get_timecode_str( mlt_filter filter, mlt_frame frame, char* text )
{
	int frames = mlt_frame_get_position( frame );
	double fps = mlt_profile_fps( mlt_service_profile( MLT_FILTER_SERVICE( filter ) ) );
	char tc[12] = "";
	if (fps == 0)
	{
		strncat( text, "-", MAX_TEXT_LEN - strlen( text ) - 1 );
	}
	else
	{
		int seconds = frames / fps;
		frames = frames % lrint( fps );
		int minutes = seconds / 60;
		seconds = seconds % 60;
		int hours = minutes / 60;
		minutes = minutes % 60;
		sprintf(tc, "%.2d:%.2d:%.2d:%.2d", hours, minutes, seconds, frames);
		strncat( text, tc, MAX_TEXT_LEN - strlen( text ) - 1 );
	}
}
Exemplo n.º 10
0
static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_format *format, int *width, int *height, int writable )
{
	
	// Obtain properties of frame
	mlt_properties properties = MLT_FRAME_PROPERTIES( frame );

	// Obtain the producer for this frame
	mlt_producer producer = mlt_properties_get_data( properties, "producer_frei0r", NULL );

	// Obtain properties of producer
	mlt_properties producer_props = MLT_PRODUCER_PROPERTIES( producer );

	// Choose suitable out values if nothing specific requested
	if ( *width <= 0 )
		*width = mlt_service_profile( MLT_PRODUCER_SERVICE(producer) )->width;
	if ( *height <= 0 )
		*height = mlt_service_profile( MLT_PRODUCER_SERVICE(producer) )->height;

	// Allocate the image
	int size = *width * ( *height + 1 ) * 4;

	// Allocate the image
	*buffer = mlt_pool_alloc( size );

	// Update the frame
	mlt_frame_set_image( frame, *buffer, size, mlt_pool_release );

	*format = mlt_image_rgb24a;
	if ( *buffer != NULL )
	{
		double position = mlt_frame_get_position( frame );
		mlt_profile profile = mlt_service_profile( MLT_PRODUCER_SERVICE( producer ) );
		double time = position / mlt_profile_fps( profile );
		process_frei0r_item( MLT_PRODUCER_SERVICE(producer), position, time, producer_props, frame, buffer, width, height );
	}

    return 0;
}
Exemplo n.º 11
0
static int jack_sync( jack_transport_state_t state, jack_position_t *jack_pos, void *arg )
{
	mlt_filter filter = (mlt_filter) arg;
	mlt_properties properties = MLT_FILTER_PROPERTIES( filter );
	mlt_profile profile = mlt_service_profile( MLT_FILTER_SERVICE(filter) );
	mlt_position position = mlt_profile_fps( profile ) * jack_pos->frame / jack_pos->frame_rate + 0.5;
	int result = 1;

	mlt_log_debug( MLT_FILTER_SERVICE(filter), "%s frame %u rate %u pos %d last_pos %d\n",
		JACKSTATE(state), jack_pos->frame, jack_pos->frame_rate, position,
		mlt_properties_get_position( properties, "_last_pos" ) );
	if ( state == JackTransportStopped )
	{
		mlt_events_fire( properties, "jack-stopped", &position, NULL );
		mlt_properties_set_int( properties, "_sync_guard", 0 );
	}
	else if ( state == JackTransportStarting )
	{
		result = 0;
		if ( !mlt_properties_get_int( properties, "_sync_guard" ) )
		{
			mlt_properties_set_int( properties, "_sync_guard", 1 );
			mlt_events_fire( properties, "jack-started", &position, NULL );
		}
		else if ( position >= mlt_properties_get_position( properties, "_last_pos" ) - 2 )
		{
			mlt_properties_set_int( properties, "_sync_guard", 0 );
			result = 1;
		}
	}
	else
	{
		mlt_properties_set_int( properties, "_sync_guard", 0 );
	}

	return result;
}
Exemplo n.º 12
0
static void mlt_consumer_property_changed( mlt_properties owner, mlt_consumer self, char *name )
{
	if ( !strcmp( name, "mlt_profile" ) )
	{
		// Get the properies
		mlt_properties properties = MLT_CONSUMER_PROPERTIES( self );

		// Get the current profile
		mlt_profile profile = mlt_service_profile( MLT_CONSUMER_SERVICE( self ) );

		// Load the new profile
		mlt_profile new_profile = mlt_profile_init( mlt_properties_get( properties, name ) );

		if ( new_profile )
		{
			// Copy the profile
			if ( profile != NULL )
			{
				free( profile->description );
				memcpy( profile, new_profile, sizeof( struct mlt_profile_s ) );
				profile->description = strdup( new_profile->description );
			}
			else
			{
				profile = new_profile;
			}

			// Apply to properties
			apply_profile_properties( self, profile, properties );
			mlt_profile_close( new_profile );
		}
 	}
	else if ( !strcmp( name, "frame_rate_num" ) )
	{
		mlt_properties properties = MLT_CONSUMER_PROPERTIES( self );
		mlt_profile profile = mlt_service_profile( MLT_CONSUMER_SERVICE( self ) );
		if ( profile )
		{
			profile->frame_rate_num = mlt_properties_get_int( properties, "frame_rate_num" );
			mlt_properties_set_double( properties, "fps", mlt_profile_fps( profile ) );
		}
	}
	else if ( !strcmp( name, "frame_rate_den" ) )
	{
		mlt_properties properties = MLT_CONSUMER_PROPERTIES( self );
		mlt_profile profile = mlt_service_profile( MLT_CONSUMER_SERVICE( self ) );
		if ( profile )
		{
			profile->frame_rate_den = mlt_properties_get_int( properties, "frame_rate_den" );
			mlt_properties_set_double( properties, "fps", mlt_profile_fps( profile ) );
		}
	}
	else if ( !strcmp( name, "width" ) )
	{
		mlt_properties properties = MLT_CONSUMER_PROPERTIES( self );
		mlt_profile profile = mlt_service_profile( MLT_CONSUMER_SERVICE( self ) );
		if ( profile )
			profile->width = mlt_properties_get_int( properties, "width" );
	}
	else if ( !strcmp( name, "height" ) )
	{
		mlt_properties properties = MLT_CONSUMER_PROPERTIES( self );
		mlt_profile profile = mlt_service_profile( MLT_CONSUMER_SERVICE( self ) );
		if ( profile )
			profile->height = mlt_properties_get_int( properties, "height" );
	}
	else if ( !strcmp( name, "progressive" ) )
	{
		mlt_properties properties = MLT_CONSUMER_PROPERTIES( self );
		mlt_profile profile = mlt_service_profile( MLT_CONSUMER_SERVICE( self ) );
		if ( profile )
			profile->progressive = mlt_properties_get_int( properties, "progressive" );
	}
	else if ( !strcmp( name, "sample_aspect_num" ) )
	{
		mlt_properties properties = MLT_CONSUMER_PROPERTIES( self );
		mlt_profile profile = mlt_service_profile( MLT_CONSUMER_SERVICE( self ) );
		if ( profile )
		{
			profile->sample_aspect_num = mlt_properties_get_int( properties, "sample_aspect_num" );
			mlt_properties_set_double( properties, "aspect_ratio", mlt_profile_sar( profile )  );
		}
	}
	else if ( !strcmp( name, "sample_aspect_den" ) )
	{
		mlt_properties properties = MLT_CONSUMER_PROPERTIES( self );
		mlt_profile profile = mlt_service_profile( MLT_CONSUMER_SERVICE( self ) );
		if ( profile )
		{
			profile->sample_aspect_den = mlt_properties_get_int( properties, "sample_aspect_den" );
			mlt_properties_set_double( properties, "aspect_ratio", mlt_profile_sar( profile )  );
		}
	}
	else if ( !strcmp( name, "display_aspect_num" ) )
	{
		mlt_properties properties = MLT_CONSUMER_PROPERTIES( self );
		mlt_profile profile = mlt_service_profile( MLT_CONSUMER_SERVICE( self ) );
		if ( profile )
		{
			profile->display_aspect_num = mlt_properties_get_int( properties, "display_aspect_num" );
			mlt_properties_set_double( properties, "display_ratio", mlt_profile_dar( profile )  );
		}
	}
	else if ( !strcmp( name, "display_aspect_den" ) )
	{
		mlt_properties properties = MLT_CONSUMER_PROPERTIES( self );
		mlt_profile profile = mlt_service_profile( MLT_CONSUMER_SERVICE( self ) );
		if ( profile )
		{
			profile->display_aspect_den = mlt_properties_get_int( properties, "display_aspect_den" );
			mlt_properties_set_double( properties, "display_ratio", mlt_profile_dar( profile )  );
		}
	}
	else if ( !strcmp( name, "colorspace" ) )
	{
		mlt_properties properties = MLT_CONSUMER_PROPERTIES( self );
		mlt_profile profile = mlt_service_profile( MLT_CONSUMER_SERVICE( self ) );
		if ( profile )
			profile->colorspace = mlt_properties_get_int( properties, "colorspace" );
	}
}
Exemplo n.º 13
0
static double get_aspect_ratio( AVStream *stream, AVCodecContext *codec_context, AVPacket *pkt )
{
	double aspect_ratio = 1.0;

	if ( codec_context->codec_id == CODEC_ID_DVVIDEO )
	{
		if ( pkt )
		{
			if ( dv_is_pal( pkt ) )
			{
				aspect_ratio = dv_is_wide( pkt )
					? 64.0/45.0 // 16:9 PAL
					: 16.0/15.0; // 4:3 PAL
			}
			else
			{
				aspect_ratio = dv_is_wide( pkt )
					? 32.0/27.0 // 16:9 NTSC
					: 8.0/9.0; // 4:3 NTSC
			}
		}
		else
		{
			AVRational ar =
#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0)
				stream->sample_aspect_ratio;
#else
				codec_context->sample_aspect_ratio;
#endif
			// Override FFmpeg's notion of DV aspect ratios, which are
			// based upon a width of 704. Since we do not have a normaliser
			// that crops (nor is cropping 720 wide ITU-R 601 video always desirable)
			// we just coerce the values to facilitate a passive behaviour through
			// the rescale normaliser when using equivalent producers and consumers.
			// = display_aspect / (width * height)
			if ( ar.num == 10 && ar.den == 11 )
				aspect_ratio = 8.0/9.0; // 4:3 NTSC
			else if ( ar.num == 59 && ar.den == 54 )
				aspect_ratio = 16.0/15.0; // 4:3 PAL
			else if ( ar.num == 40 && ar.den == 33 )
				aspect_ratio = 32.0/27.0; // 16:9 NTSC
			else if ( ar.num == 118 && ar.den == 81 )
				aspect_ratio = 64.0/45.0; // 16:9 PAL
		}
	}
	else
	{
		AVRational codec_sar = codec_context->sample_aspect_ratio;
		AVRational stream_sar =
#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0)
			stream->sample_aspect_ratio;
#else
			{ 0, 1 };
#endif
		if ( codec_sar.num > 0 )
			aspect_ratio = av_q2d( codec_sar );
		else if ( stream_sar.num > 0 )
			aspect_ratio = av_q2d( stream_sar );
	}
	return aspect_ratio;
}

/** Open the file.
*/

static int producer_open( mlt_producer this, mlt_profile profile, char *file )
{
	// Return an error code (0 == no error)
	int error = 0;

	// Context for avformat
	AVFormatContext *context = NULL;

	// Get the properties
	mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );

	// We will treat everything with the producer fps
	double fps = mlt_profile_fps( profile );

	// Lock the mutex now
	avformat_lock( );

	// If "MRL", then create AVInputFormat
	AVInputFormat *format = NULL;
	AVFormatParameters *params = NULL;
	char *standard = NULL;
	char *mrl = strchr( file, ':' );

	// AV option (0 = both, 1 = video, 2 = audio)
	int av = 0;

	// Only if there is not a protocol specification that avformat can handle
	if ( mrl && !url_exist( file ) )
	{
		// 'file' becomes format abbreviation
		mrl[0] = 0;

		// Lookup the format
		format = av_find_input_format( file );

		// Eat the format designator
		file = ++mrl;

		if ( format )
		{
			// Allocate params
			params = calloc( sizeof( AVFormatParameters ), 1 );

			// These are required by video4linux (defaults)
			params->width = 640;
			params->height = 480;
			params->time_base= (AVRational){1,25};
			// params->device = file;
			params->channels = 2;
			params->sample_rate = 48000;
		}

		// XXX: this does not work anymore since avdevice
		// TODO: make producer_avddevice?
		// Parse out params
		mrl = strchr( file, '?' );
		while ( mrl )
		{
			mrl[0] = 0;
			char *name = strdup( ++mrl );
			char *value = strchr( name, ':' );
			if ( value )
			{
				value[0] = 0;
				value++;
				char *t = strchr( value, '&' );
				if ( t )
					t[0] = 0;
				if ( !strcmp( name, "frame_rate" ) )
					params->time_base.den = atoi( value );
				else if ( !strcmp( name, "frame_rate_base" ) )
					params->time_base.num = atoi( value );
				else if ( !strcmp( name, "sample_rate" ) )
					params->sample_rate = atoi( value );
				else if ( !strcmp( name, "channels" ) )
					params->channels = atoi( value );
				else if ( !strcmp( name, "width" ) )
					params->width = atoi( value );
				else if ( !strcmp( name, "height" ) )
					params->height = atoi( value );
				else if ( !strcmp( name, "standard" ) )
				{
					standard = strdup( value );
					params->standard = standard;
				}
				else if ( !strcmp( name, "av" ) )
					av = atoi( value );
			}
			free( name );
			mrl = strchr( mrl, '&' );
		}
	}

	// Now attempt to open the file
	error = av_open_input_file( &context, file, format, 0, params ) < 0;

	// Cleanup AVFormatParameters
	free( standard );
	free( params );

	// If successful, then try to get additional info
	if ( error == 0 )
	{
		// Get the stream info
		error = av_find_stream_info( context ) < 0;

		// Continue if no error
		if ( error == 0 )
		{
			// We will default to the first audio and video streams found
			int audio_index = -1;
			int video_index = -1;
			int av_bypass = 0;

			// Now set properties where we can (use default unknowns if required)
			if ( context->duration != AV_NOPTS_VALUE )
			{
				// This isn't going to be accurate for all formats
				mlt_position frames = ( mlt_position )( ( ( double )context->duration / ( double )AV_TIME_BASE ) * fps + 0.5 );
				mlt_properties_set_position( properties, "out", frames - 1 );
				mlt_properties_set_position( properties, "length", frames );
			}

			// Find default audio and video streams
			find_default_streams( properties, context, &audio_index, &video_index );

			if ( context->start_time != AV_NOPTS_VALUE )
				mlt_properties_set_double( properties, "_start_time", context->start_time );

			// Check if we're seekable (something funny about mpeg here :-/)
			if ( strcmp( file, "pipe:" ) && strncmp( file, "http://", 6 )  && strncmp( file, "udp:", 4 )  && strncmp( file, "tcp:", 4 ) && strncmp( file, "rtsp:", 5 )  && strncmp( file, "rtp:", 4 ) )
			{
				mlt_properties_set_int( properties, "seekable", av_seek_frame( context, -1, mlt_properties_get_double( properties, "_start_time" ), AVSEEK_FLAG_BACKWARD ) >= 0 );
				mlt_properties_set_data( properties, "dummy_context", context, 0, producer_file_close, NULL );
				av_open_input_file( &context, file, NULL, 0, NULL );
				av_find_stream_info( context );
			}
			else
				av_bypass = 1;

			// Store selected audio and video indexes on properties
			mlt_properties_set_int( properties, "_audio_index", audio_index );
			mlt_properties_set_int( properties, "_video_index", video_index );
			mlt_properties_set_int( properties, "_last_position", -1 );

			// Fetch the width, height and aspect ratio
			if ( video_index != -1 )
			{
				AVCodecContext *codec_context = context->streams[ video_index ]->codec;
				mlt_properties_set_int( properties, "width", codec_context->width );
				mlt_properties_set_int( properties, "height", codec_context->height );

				if ( codec_context->codec_id == CODEC_ID_DVVIDEO )
				{
					// Fetch the first frame of DV so we can read it directly
					AVPacket pkt;
					int ret = 0;
					while ( ret >= 0 )
					{
						ret = av_read_frame( context, &pkt );
						if ( ret >= 0 && pkt.stream_index == video_index && pkt.size > 0 )
						{
							mlt_properties_set_double( properties, "aspect_ratio",
								get_aspect_ratio( context->streams[ video_index ], codec_context, &pkt ) );
							break;
						}
					}
				}
				else
				{
					mlt_properties_set_double( properties, "aspect_ratio",
						get_aspect_ratio( context->streams[ video_index ], codec_context, NULL ) );
				}
			}

			// Read Metadata
			if (context->title != NULL)
				mlt_properties_set(properties, "meta.attr.title.markup", context->title );
			if (context->author != NULL)
				mlt_properties_set(properties, "meta.attr.author.markup", context->author );
			if (context->copyright != NULL)
				mlt_properties_set(properties, "meta.attr.copyright.markup", context->copyright );
			if (context->comment != NULL)
				mlt_properties_set(properties, "meta.attr.comment.markup", context->comment );
			if (context->album != NULL)
				mlt_properties_set(properties, "meta.attr.album.markup", context->album );
			if (context->year != 0)
				mlt_properties_set_int(properties, "meta.attr.year.markup", context->year );
			if (context->track != 0)
				mlt_properties_set_int(properties, "meta.attr.track.markup", context->track );

			// We're going to cheat here - for a/v files, we will have two contexts (reasoning will be clear later)
			if ( av == 0 && audio_index != -1 && video_index != -1 )
			{
				// We'll use the open one as our video_context
				mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL );

				// And open again for our audio context
				av_open_input_file( &context, file, NULL, 0, NULL );
				av_find_stream_info( context );

				// Audio context
				mlt_properties_set_data( properties, "audio_context", context, 0, producer_file_close, NULL );
			}
			else if ( av != 2 && video_index != -1 )
			{
				// We only have a video context
				mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL );
			}
			else if ( audio_index != -1 )
			{
				// We only have an audio context
				mlt_properties_set_data( properties, "audio_context", context, 0, producer_file_close, NULL );
			}
			else
			{
				// Something has gone wrong
				error = -1;
			}

			mlt_properties_set_int( properties, "av_bypass", av_bypass );
		}
	}

	// Unlock the mutex now
	avformat_unlock( );

	return error;
}

/** Convert a frame position to a time code.
*/

static double producer_time_of_frame( mlt_producer this, mlt_position position )
{
	return ( double )position / mlt_producer_get_fps( this );
}

static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt, mlt_image_format format, int width, int height )
{
#ifdef SWSCALE
	if ( format == mlt_image_yuv420p )
	{
		struct SwsContext *context = sws_getContext( width, height, pix_fmt,
			width, height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
		AVPicture output;
		output.data[0] = buffer;
		output.data[1] = buffer + width * height;
		output.data[2] = buffer + ( 3 * width * height ) / 2;
		output.linesize[0] = width;
		output.linesize[1] = width >> 1;
		output.linesize[2] = width >> 1;
		sws_scale( context, frame->data, frame->linesize, 0, height,
			output.data, output.linesize);
		sws_freeContext( context );
	}
	else if ( format == mlt_image_rgb24 )
	{
		struct SwsContext *context = sws_getContext( width, height, pix_fmt,
			width, height, PIX_FMT_RGB24, SWS_FAST_BILINEAR, NULL, NULL, NULL);
		AVPicture output;
		avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height );
		sws_scale( context, frame->data, frame->linesize, 0, height,
			output.data, output.linesize);
		sws_freeContext( context );
	}
	else
	{
		struct SwsContext *context = sws_getContext( width, height, pix_fmt,
			width, height, PIX_FMT_YUYV422, SWS_FAST_BILINEAR, NULL, NULL, NULL);
		AVPicture output;
		avpicture_fill( &output, buffer, PIX_FMT_YUYV422, width, height );
		sws_scale( context, frame->data, frame->linesize, 0, height,
			output.data, output.linesize);
		sws_freeContext( context );
	}
#else
	if ( format == mlt_image_yuv420p )
	{
		AVPicture pict;
		pict.data[0] = buffer;
		pict.data[1] = buffer + width * height;
		pict.data[2] = buffer + ( 3 * width * height ) / 2;
		pict.linesize[0] = width;
		pict.linesize[1] = width >> 1;
		pict.linesize[2] = width >> 1;
		img_convert( &pict, PIX_FMT_YUV420P, (AVPicture *)frame, pix_fmt, width, height );
	}
Exemplo n.º 14
0
static int process_feed( mlt_properties feed, mlt_filter filter, mlt_frame frame )
{
	// Error return
	int error = 1;

	// Get the properties of the data show filter
	mlt_properties filter_properties = MLT_FILTER_PROPERTIES( filter );

	// Get the type requested by the feeding filter
	char *type = mlt_properties_get( feed, "type" );

	// Fetch the filter associated to this type
	mlt_filter requested = mlt_properties_get_data( filter_properties, type, NULL );

	// If it doesn't exist, then create it now
	if ( requested == NULL )
	{
		// Source filter from profile
		requested = obtain_filter( filter, type );

		// Store it on the properties for subsequent retrieval/destruction
		mlt_properties_set_data( filter_properties, type, requested, 0, ( mlt_destructor )mlt_filter_close, NULL );
	}

	// If we have one, then process it now...
	if ( requested != NULL )
	{
		int i = 0;
		mlt_properties properties = MLT_FILTER_PROPERTIES( requested );
		static const char *prefix = "properties.";
		int len = strlen( prefix );

		// Determine if this is an absolute or relative feed
		int absolute = mlt_properties_get_int( feed, "absolute" );

		// Make do with what we have
		int length = !absolute ? 
					 mlt_properties_get_int( feed, "out" ) - mlt_properties_get_int( feed, "in" ) + 1 :
					 mlt_properties_get_int( feed, "out" ) + 1;

		// Repeat period
		int period = mlt_properties_get_int( properties, "period" );
		period = period == 0 ? 1 : period;

		// Pass properties from feed into requested
		for ( i = 0; i < mlt_properties_count( properties ); i ++ )
		{
			char *name = mlt_properties_get_name( properties, i );
			char *key = mlt_properties_get_value( properties, i );
			if ( !strncmp( name, prefix, len ) )
			{
				if ( !strncmp( name + len, "length[", 7 ) )
				{
					mlt_properties_set_position( properties, key, ( length - period ) / period );
				}
				else
				{
					char *value = mlt_properties_get( feed, name + len );
					if ( value != NULL )
					{
						// check for metadata keywords in metadata markup if user requested so
						if ( mlt_properties_get_int( filter_properties, "dynamic" ) == 1  && !strcmp( name + strlen( name ) - 6, "markup") )
						{
							// Find keywords which should be surrounded by '#', like: #title#
							char* keywords = strtok( value, "#" );
							char result[512] = ""; // XXX: how much is enough?
							int ct = 0;
							int fromStart = ( value[0] == '#' ) ? 1 : 0;
							
							while ( keywords != NULL )
							{
								if ( ct % 2 == fromStart )
								{
									// backslash in front of # suppresses substitution
									if ( keywords[ strlen( keywords ) -1 ] == '\\' )
									{
										// keep characters except backslash
										strncat( result, keywords, strlen( keywords ) -1 );
										strcat( result, "#" );
										ct++;
									}
									else
									{
										strcat( result, keywords );
									}
								}
								else if ( !strcmp( keywords, "timecode" ) )
								{
									// special case: replace #timecode# with current frame timecode
									int pos = mlt_properties_get_int( feed, "position" );
									char *tc = frame_to_timecode( pos, mlt_profile_fps( mlt_service_profile( MLT_FILTER_SERVICE( filter ) ) ) );
									strncat( result, tc, sizeof( result ) - strlen( result ) - 1 );
									free( tc );
								}
								else if ( !strcmp( keywords, "frame" ) )
								{
									// special case: replace #frame# with current frame number
									int pos = mlt_properties_get_int( feed, "position" );
									char s[12];
									snprintf( s, sizeof(s) - 1, "%d", pos );
									s[sizeof( s ) - 1] = '\0';
									strcat( result, s );
								}
                                                                else if ( !strcmp( keywords, "now" ) )
                                                                {
                                                                        time_t now;
                                                                        struct tm *t;
                                                                        char s[] = "xx:xx ";

                                                                        time (&now);
                                                                        t = localtime(&now);

                                                                        snprintf( s, sizeof(s), "%02d:%02d", t->tm_hour, t->tm_min);
									s[sizeof( s )] = '\0';
									strcat( result, s );
                                                                }
								else
								{
									// replace keyword with metadata value
									char *metavalue = metadata_value( MLT_FRAME_PROPERTIES( frame ), keywords );
									strncat( result, metavalue ? metavalue : "-", sizeof( result ) - strlen( result ) -1 );
								}
								keywords = strtok( NULL, "#" );
								ct++;
							}
							mlt_properties_set( properties, key, (char*) result );
						}
						else mlt_properties_set( properties, key, value );
					}
				}
			}
		}

		// Set the original position on the frame
		if ( absolute == 0 )
			mlt_frame_set_position( frame, mlt_properties_get_int( feed, "position" ) - mlt_properties_get_int( feed, "in" ) );
		else
			mlt_frame_set_position( frame, mlt_properties_get_int( feed, "position" ) );

		// Process the filter
		mlt_filter_process( requested, frame );

		// Should be ok...
		error = 0;
	}

	return error;
}
Exemplo n.º 15
0
    bool start( mlt_profile profile = 0 )
    {
        if ( m_started )
            return false;
        try
        {
            // Initialize some members
            m_vancLines = mlt_properties_get_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "vanc" );
            if ( m_vancLines == -1 )
                m_vancLines = profile->height <= 512 ? 26 : 32;

            if ( !profile )
                profile = mlt_service_profile( MLT_PRODUCER_SERVICE( getProducer() ) );

            // Get the display mode
            BMDDisplayMode displayMode = getDisplayMode( profile, m_vancLines );
            if ( displayMode == (BMDDisplayMode) bmdDisplayModeNotSupported )
            {
                mlt_log_info( getProducer(), "profile = %dx%d %f fps %s\n", profile->width, profile->height,
                              mlt_profile_fps( profile ), profile->progressive? "progressive" : "interlace" );
                throw "Profile is not compatible with decklink.";
            }

            // Determine if supports input format detection
#ifdef WIN32
            BOOL doesDetectFormat = FALSE;
#else
            bool doesDetectFormat = false;
#endif
            IDeckLinkAttributes *decklinkAttributes = 0;
            if ( m_decklink->QueryInterface( IID_IDeckLinkAttributes, (void**) &decklinkAttributes ) == S_OK )
            {
                if ( decklinkAttributes->GetFlag( BMDDeckLinkSupportsInputFormatDetection, &doesDetectFormat ) != S_OK )
                    doesDetectFormat = false;
                SAFE_RELEASE( decklinkAttributes );
            }
            mlt_log_verbose( getProducer(), "%s format detection\n", doesDetectFormat ? "supports" : "does not support" );

            // Enable video capture
            BMDPixelFormat pixelFormat = bmdFormat8BitYUV;
            BMDVideoInputFlags flags = doesDetectFormat ? bmdVideoInputEnableFormatDetection : bmdVideoInputFlagDefault;
            if ( S_OK != m_decklinkInput->EnableVideoInput( displayMode, pixelFormat, flags ) )
                throw "Failed to enable video capture.";

            // Enable audio capture
            BMDAudioSampleRate sampleRate = bmdAudioSampleRate48kHz;
            BMDAudioSampleType sampleType = bmdAudioSampleType16bitInteger;
            int channels = mlt_properties_get_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "channels" );
            if ( S_OK != m_decklinkInput->EnableAudioInput( sampleRate, sampleType, channels ) )
                throw "Failed to enable audio capture.";

            // Start capture
            m_dropped = 0;
            mlt_properties_set_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "dropped", m_dropped );
            m_started = m_decklinkInput->StartStreams() == S_OK;
            if ( !m_started )
                throw "Failed to start capture.";
        }
        catch ( const char *error )
        {
            m_decklinkInput->DisableVideoInput();
            mlt_log_error( getProducer(), "%s\n", error );
            return false;
        }
        return true;
    }
Exemplo n.º 16
0
double mlt_producer_get_fps( mlt_producer self )
{
	mlt_profile profile = mlt_service_profile( MLT_PRODUCER_SERVICE( self ) );
	return mlt_profile_fps( profile );
}
Exemplo n.º 17
0
mlt_producer producer_framebuffer_init( mlt_profile profile, mlt_service_type type, const char *id, char *arg )
{
	if ( !arg ) return NULL;
	mlt_producer producer = NULL;
	producer = calloc( 1, sizeof( struct mlt_producer_s ) );
	if ( !producer )
		return NULL;

	if ( mlt_producer_init( producer, NULL ) )
	{
		free( producer );
		return NULL;
	}

	// Wrap loader
	mlt_producer real_producer;
	
	// Check if a speed was specified.
	/** 

	* Speed must be appended to the filename with '?'. To play your video at 50%:
	 melt framebuffer:my_video.mpg?0.5

	* Stroboscope effect can be obtained by adding a stobe=x parameter, where
	 x is the number of frames that will be ignored.

	* You can play the movie backwards by adding reverse=1

	* You can freeze the clip at a determined position by adding freeze=frame_pos
	  add freeze_after=1 to freeze only paste position or freeze_before to freeze before it

	**/

	double speed = 0.0;
	char *props = strdup( arg );
	char *ptr = strrchr( props, '?' );
	
	if ( ptr )
	{
		speed = atof( ptr + 1 );
		if ( speed != 0.0 )
			// If speed was valid, then strip it and the delimiter.
			// Otherwise, an invalid speed probably means this '?' was not a delimiter.
			*ptr = '\0';
	}
		
	real_producer = mlt_factory_producer( profile, "abnormal", props );
	free( props );

	if (speed == 0.0) speed = 1.0;

	if ( producer != NULL && real_producer != NULL)
	{
		// Get the properties of this producer
		mlt_properties properties = MLT_PRODUCER_PROPERTIES( producer );

		mlt_properties_set( properties, "resource", arg);

		// Store the producer and fitler
		mlt_properties_set_data( properties, "producer", real_producer, 0, ( mlt_destructor )mlt_producer_close, NULL );

		// Grab some stuff from the real_producer
		mlt_properties_pass_list( properties, MLT_PRODUCER_PROPERTIES( real_producer ), "length, width, height, aspect_ratio" );

		if ( speed < 0 )
		{
			speed = -speed;
			mlt_properties_set_int( properties, "reverse", 1 );
		}

		if ( speed != 1.0 )
		{
			double real_length = ( (double)  mlt_producer_get_length( real_producer ) ) / speed;
			mlt_properties_set_position( properties, "length", real_length );
			mlt_properties real_properties = MLT_PRODUCER_PROPERTIES( real_producer );
			const char* service = mlt_properties_get( real_properties, "mlt_service" );
			if ( service && !strcmp( service, "avformat" ) )
			{
				int n = mlt_properties_count( real_properties );
				int i;
				for ( i = 0; i < n; i++ )
				{
					if ( strstr( mlt_properties_get_name( real_properties, i ), "stream.frame_rate" ) )
					{
						double source_fps = mlt_properties_get_double( real_properties, mlt_properties_get_name( real_properties, i ) );
						if ( source_fps > mlt_profile_fps( profile ) )
						{
							mlt_properties_set_double( real_properties, "force_fps", source_fps * speed );
							mlt_properties_set_position( real_properties, "length", real_length );
							mlt_properties_set_position( real_properties, "out", real_length - 1 );
							speed = 1.0;
						}
						break;
					}
				}
			}
		}
		mlt_properties_set_position( properties, "out", mlt_producer_get_length( producer ) - 1 );

		// Since we control the seeking, prevent it from seeking on its own
		mlt_producer_set_speed( real_producer, 0 );
		mlt_producer_set_speed( producer, speed );

		// Override the get_frame method
		producer->get_frame = producer_get_frame;
	}
	else
	{
		if ( producer )
			mlt_producer_close( producer );
		if ( real_producer )
			mlt_producer_close( real_producer );

		producer = NULL;
	}
	return producer;
}
Exemplo n.º 18
0
static int filter_get_image( mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{
	mlt_filter filter = mlt_frame_pop_service( frame );
	*format = mlt_image_rgb24;
	mlt_properties_set_int( MLT_FRAME_PROPERTIES(frame), "consumer_deinterlace", 1 );
	int error = mlt_frame_get_image( frame, image, format, width, height, 1 );

	if ( !error && *image )
	{
		videostab self = filter->child;
		mlt_position length = mlt_filter_get_length2( filter, frame );
		int h = *height;
		int w = *width;

		// Service locks are for concurrency control
		mlt_service_lock( MLT_FILTER_SERVICE( filter ) );
		if ( !self->initialized )
		{
			// Initialize our context
			self->initialized = 1;
			self->es = es_init( w, h );
			self->pos_i = (vc*) malloc( length * sizeof(vc) );
			self->pos_h = (vc*) malloc( length * sizeof(vc) );
			self->pos_y = (vc*) malloc( h * sizeof(vc) );
			self->rs = rs_init( w, h );
		}
		char *vectors = mlt_properties_get( MLT_FILTER_PROPERTIES(filter), "vectors" );
		if ( !vectors )
		{
			// Analyse
			int pos = (int) mlt_filter_get_position( filter, frame );
			self->pos_i[pos] = vc_add( pos == 0 ? vc_zero() : self->pos_i[pos - 1], es_estimate( self->es, *image ) );

			// On last frame
			if ( pos == length - 1 )
			{
				mlt_profile profile = mlt_service_profile( MLT_FILTER_SERVICE(filter) );
				double fps =  mlt_profile_fps( profile );

				// Filter and store the results
				hipass( self->pos_i, self->pos_h, length, fps );
				serialize_vectors( self, length );
			}
		} else {
			// Apply
			if ( self->initialized != 2 )
			{
				// Load analysis results from property
				self->initialized = 2;
				deserialize_vectors( self, vectors, length );
			}
			if ( self->initialized == 2 )
			{
				// Stabilize
				float shutter_angle = mlt_properties_get_double( MLT_FRAME_PROPERTIES(frame) , "shutterangle" );
				float pos = mlt_filter_get_position( filter, frame );
				int i;

				for (i = 0; i < h; i ++)
					self->pos_y[i] = interp( self->lanc_kernels,self->pos_h, length, pos + (i - h / 2.0) * shutter_angle / (h * 360.0) );
				rs_resample( self->lanc_kernels,self->rs, *image, self->pos_y );
			}
		}
		mlt_service_unlock( MLT_FILTER_SERVICE( filter ) );
	}
	return error;
}
Exemplo n.º 19
0
static int get_frame( mlt_producer self, mlt_frame_ptr frame, int index )
{
	mlt_properties properties = MLT_PRODUCER_PROPERTIES(self);
	context cx = mlt_properties_get_data( properties, "context", NULL );

	if ( !cx )
	{
		// Allocate and initialize our context
		cx = mlt_pool_alloc( sizeof( struct context_s ) );
		memset( cx, 0, sizeof( *cx ) );
		mlt_properties_set_data( properties, "context", cx, 0, mlt_pool_release, NULL );
		cx->self = self;
		char *profile_name = mlt_properties_get( properties, "profile" );
		if ( !profile_name )
			profile_name = mlt_properties_get( properties, "mlt_profile" );
		mlt_profile profile = mlt_service_profile( MLT_PRODUCER_SERVICE( self ) );

		if ( profile_name )
		{
			cx->profile = mlt_profile_init( profile_name );
			cx->profile->is_explicit = 1;
		}
		else
		{
			cx->profile = mlt_profile_clone( profile );
			cx->profile->is_explicit = 0;
		}

		// Encapsulate a real producer for the resource
		cx->producer = mlt_factory_producer( cx->profile, NULL,
			mlt_properties_get( properties, "resource" ) );
		if ( ( profile_name && !strcmp( profile_name, "auto" ) ) ||
			mlt_properties_get_int( properties, "autoprofile" ) )
		{
			mlt_profile_from_producer( cx->profile, cx->producer );
			mlt_producer_close( cx->producer );
			cx->producer = mlt_factory_producer( cx->profile, NULL,	mlt_properties_get( properties, "resource" ) );
		}

		// Since we control the seeking, prevent it from seeking on its own
		mlt_producer_set_speed( cx->producer, 0 );
		cx->audio_position = -1;

		// We will encapsulate a consumer
		cx->consumer = mlt_consumer_new( cx->profile );
		// Do not use _pass_list on real_time so that it defaults to 0 in the absence of
		// an explicit real_time property.
		mlt_properties_set_int( MLT_CONSUMER_PROPERTIES( cx->consumer ), "real_time",
			mlt_properties_get_int( properties, "real_time" ) );
		mlt_properties_pass_list( MLT_CONSUMER_PROPERTIES( cx->consumer ), properties,
			"buffer, prefill, deinterlace_method, rescale" );
	
		// Connect it all together
		mlt_consumer_connect( cx->consumer, MLT_PRODUCER_SERVICE( cx->producer ) );
		mlt_consumer_start( cx->consumer );
	}

	// Generate a frame
	*frame = mlt_frame_init( MLT_PRODUCER_SERVICE( self ) );
	if ( *frame )
	{
		// Seek the producer to the correct place
		// Calculate our positions
		double actual_position = (double) mlt_producer_frame( self );
		if ( mlt_producer_get_speed( self ) != 0 )
			actual_position *= mlt_producer_get_speed( self );
		mlt_position need_first = floor( actual_position );
		mlt_producer_seek( cx->producer,
			lrint( need_first * mlt_profile_fps( cx->profile ) / mlt_producer_get_fps( self ) ) );

		// Get the nested frame
		mlt_frame nested_frame = mlt_consumer_rt_frame( cx->consumer );

		// Stack the producer and our methods on the nested frame
		mlt_frame_push_service( *frame, nested_frame );
		mlt_frame_push_service( *frame, cx );
		mlt_frame_push_get_image( *frame, get_image );
		mlt_frame_push_audio( *frame, nested_frame );
		mlt_frame_push_audio( *frame, cx );
		mlt_frame_push_audio( *frame, get_audio );
		
		// Give the returned frame temporal identity
		mlt_frame_set_position( *frame, mlt_producer_position( self ) );
		
		// Store the nested frame on the produced frame for destruction
		mlt_properties frame_props = MLT_FRAME_PROPERTIES( *frame );
		mlt_properties_set_data( frame_props, "_producer_consumer.frame", nested_frame, 0, (mlt_destructor) mlt_frame_close, NULL );

		// Inform the normalizers about our video properties
		mlt_properties_set_double( frame_props, "aspect_ratio", mlt_profile_sar( cx->profile ) );
		mlt_properties_set_int( frame_props, "width", cx->profile->width );
		mlt_properties_set_int( frame_props, "height", cx->profile->height );
		mlt_properties_set_int( frame_props, "meta.media.width", cx->profile->width );
		mlt_properties_set_int( frame_props, "meta.media.height", cx->profile->height );
		mlt_properties_set_int( frame_props, "progressive", cx->profile->progressive );
	}

	// Calculate the next timecode
	mlt_producer_prepare_next( self );

	return 0;
}