コード例 #1
0
ファイル: transition_frei0r.c プロジェクト: j-b-m/mlt
static int transition_get_image( mlt_frame a_frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable ){
	
	mlt_frame b_frame = mlt_frame_pop_frame( a_frame );
	mlt_transition transition = mlt_frame_pop_service( a_frame );
	mlt_properties properties = MLT_TRANSITION_PROPERTIES( transition );
	mlt_properties a_props = MLT_FRAME_PROPERTIES( a_frame );
	mlt_properties b_props = MLT_FRAME_PROPERTIES( b_frame );

	int invert = mlt_properties_get_int( properties, "invert" );

	uint8_t *images[]={NULL,NULL,NULL};

	*format = mlt_image_rgb24a;
	mlt_frame_get_image( a_frame, &images[0], format, width, height, 0 );
	mlt_frame_get_image( b_frame, &images[1], format, width, height, 0 );
	
	double position = mlt_transition_get_position( transition, a_frame );
	mlt_profile profile = mlt_service_profile( MLT_TRANSITION_SERVICE( transition ) );
	double time = position / mlt_profile_fps( profile );
	process_frei0r_item( MLT_TRANSITION_SERVICE(transition), position, time, properties, !invert ? a_frame : b_frame, images, width, height );
	
	*width = mlt_properties_get_int( !invert ? a_props : b_props, "width" );
        *height = mlt_properties_get_int( !invert ? a_props : b_props, "height" );
	*image = mlt_properties_get_data( !invert ? a_props : b_props , "image", NULL );
	return 0;
}
コード例 #2
0
static mlt_frame process( mlt_transition transition, mlt_frame a_frame, mlt_frame b_frame )
{
	mlt_service service = MLT_TRANSITION_SERVICE(transition);

	if ( !GlslManager::init_chain( service ) ) {
		// Create the Movit effect chain
		EffectChain* chain = GlslManager::get_chain( service );
		mlt_profile profile = mlt_service_profile( service );
		Input* b_input = new MltInput( profile->width, profile->height );
		ImageFormat output_format;
		output_format.color_space = COLORSPACE_sRGB;
		output_format.gamma_curve = GAMMA_sRGB;
		chain->add_input( b_input );
		chain->add_output( output_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED );
		chain->set_dither_bits( 8 );

		Effect* effect = chain->add_effect( new OverlayEffect(),
			GlslManager::get_input( service ), b_input );

		// Save these new input on properties for get_image
		mlt_properties_set_data( MLT_TRANSITION_PROPERTIES(transition),
			"movit input B", b_input, 0, NULL, NULL );
	}

	// Push the transition on to the frame
	mlt_frame_push_service( a_frame, transition );

	// Push the b_frame on to the stack
	mlt_frame_push_frame( a_frame, b_frame );

	// Push the transition method
	mlt_frame_push_get_image( a_frame, get_image );

	return a_frame;
}
コード例 #3
0
ファイル: mlt_transition.c プロジェクト: jjcare/mlt
static int get_image_b( mlt_frame b_frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{
	mlt_transition self = mlt_frame_pop_service( b_frame );
	mlt_frame a_frame = mlt_frame_pop_frame( b_frame );
	mlt_properties a_props = MLT_FRAME_PROPERTIES( a_frame );
	mlt_properties b_props = MLT_FRAME_PROPERTIES( b_frame );

	// Set scaling from A frame if not already provided.
	if ( !mlt_properties_get( b_props, "rescale.interp" ) )
	{
		const char *rescale = mlt_properties_get( a_props, "rescale.interp" );
		if ( !rescale || !strcmp( rescale, "none" ) )
			rescale = "nearest";
		mlt_properties_set( b_props, "rescale.interp", rescale );
	}

	// Ensure sane aspect ratio
	if ( mlt_frame_get_aspect_ratio( b_frame ) == 0.0 )
		mlt_frame_set_aspect_ratio( b_frame, mlt_profile_sar( mlt_service_profile( MLT_TRANSITION_SERVICE(self) ) ) );

	mlt_properties_pass_list( b_props, a_props,
		"consumer_deinterlace, deinterlace_method, consumer_tff" );

	return mlt_frame_get_image( b_frame, image, format, width, height, writable );
}
コード例 #4
0
mlt_service MLTWebVfx::createTransition() {
    mlt_transition self = mlt_transition_new();
    if (self) {
        self->process = transitionProcess;
        // Video only transition
        mlt_properties_set_int(MLT_TRANSITION_PROPERTIES(self), "_transition_type", 1);
        return MLT_TRANSITION_SERVICE(self);
    }
    return 0;
}
コード例 #5
0
ファイル: filter_mask_apply.c プロジェクト: mltframework/mlt
static int get_image(mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable)
{
	mlt_transition transition = mlt_frame_pop_service(frame);
	*format = mlt_frame_pop_service_int(frame);
	int error = mlt_frame_get_image(frame, image, format, width, height, writable);
	if (!error) {
		mlt_properties properties = MLT_FRAME_PROPERTIES(frame);
		mlt_frame clone = mlt_properties_get_data(properties, "mask frame", NULL);
		if (clone) {
			mlt_frame_push_get_image(frame, dummy_get_image);
			mlt_service_lock(MLT_TRANSITION_SERVICE(transition));
			mlt_transition_process(transition, clone, frame);
			mlt_service_unlock(MLT_TRANSITION_SERVICE(transition));
			error = mlt_frame_get_image(clone, image, format, width, height, writable);
			if (!error) {
				int size = mlt_image_format_size(*format, *width, *height, NULL);
				mlt_frame_set_image(frame, *image, size, NULL);
			}
		}
	}
	return error;
}
コード例 #6
0
ファイル: mlt_transition.c プロジェクト: jjcare/mlt
static int get_image_a( mlt_frame a_frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{
	mlt_transition self = mlt_frame_pop_service( a_frame );
	mlt_properties a_props = MLT_FRAME_PROPERTIES( a_frame );

	// All transitions get scaling
	const char *rescale = mlt_properties_get( a_props, "rescale.interp" );
	if ( !rescale || !strcmp( rescale, "none" ) )
		mlt_properties_set( a_props, "rescale.interp", "nearest" );

	// Ensure sane aspect ratio
	if ( mlt_frame_get_aspect_ratio( a_frame ) == 0.0 )
		mlt_frame_set_aspect_ratio( a_frame, mlt_profile_sar( mlt_service_profile( MLT_TRANSITION_SERVICE(self) ) ) );

	return mlt_frame_get_image( a_frame, image, format, width, height, writable );
}
コード例 #7
0
ファイル: mlt_field.c プロジェクト: mltframework/mlt
int mlt_field_plant_transition( mlt_field self, mlt_transition that, int a_track, int b_track )
{
	// Connect the transition to the last producer
	int result = mlt_transition_connect( that, self->producer, a_track, b_track );

	// If successful, then we'll use self for connecting in the future
	if ( result == 0 )
	{
		// This is now the new producer
		self->producer = MLT_TRANSITION_SERVICE( that );

		// Reconnect tractor to new producer
		mlt_tractor_connect( self->tractor, self->producer );

		// Fire an event
		mlt_events_fire( mlt_field_properties( self ), "service-changed", NULL );
	}

	return result;
}
コード例 #8
0
ファイル: transition_vqm.cpp プロジェクト: aib/mlt
mlt_transition transition_vqm_init( mlt_profile profile, mlt_service_type type, const char *id, void *arg )
{
	mlt_transition transition = mlt_transition_new();

	if ( transition )
	{
		mlt_properties properties = MLT_TRANSITION_PROPERTIES( transition );

		if ( !createQApplicationIfNeeded( MLT_TRANSITION_SERVICE(transition) ) )
		{
			mlt_transition_close( transition );
			return NULL;
		}
		transition->process = process;
		mlt_properties_set_int( properties, "_transition_type", 1 ); // video only
		mlt_properties_set_int( properties, "window_size", 8 );
		printf( "frame psnr[Y] psnr[Cb] psnr[Cr] ssim[Y] ssim[Cb] ssim[Cr]\n" );
	}

	return transition;
}
コード例 #9
0
static int transitionGetImage(mlt_frame aFrame, uint8_t **image, mlt_image_format *format, int *width, int *height, int /*writable*/) {
    int error = 0;

    mlt_frame bFrame = mlt_frame_pop_frame(aFrame);
    mlt_transition transition = (mlt_transition)mlt_frame_pop_service(aFrame);

    mlt_position position = mlt_transition_get_position(transition, aFrame);
    mlt_position length = mlt_transition_get_length(transition);

    // Get the aFrame image, we will write our output to it
    *format = mlt_image_rgb24;
    if ((error = mlt_frame_get_image(aFrame, image, format, width, height, 1)) != 0)
        return error;
    // Get the bFrame image, we won't write to it
    uint8_t *bImage = NULL;
    int bWidth = 0, bHeight = 0;
    if ((error = mlt_frame_get_image(bFrame, &bImage, format, &bWidth, &bHeight, 0)) != 0)
        return error;

    { // Scope the lock
        MLTWebVfx::ServiceLocker locker(MLT_TRANSITION_SERVICE(transition));
        if (!locker.initialize(*width, *height))
            return 1;

        MLTWebVfx::ServiceManager* manager = locker.getManager();
        WebVfx::Image renderedImage(*image, *width, *height,
                                    *width * *height * WebVfx::Image::BytesPerPixel);
        manager->setImageForName(manager->getSourceImageName(), &renderedImage);
        WebVfx::Image targetImage(bImage, bWidth, bHeight,
                                  bWidth * bHeight * WebVfx::Image::BytesPerPixel);
        manager->setImageForName(manager->getTargetImageName(), &targetImage);
        manager->render(&renderedImage, position, length);
    }

    return error;
}
コード例 #10
0
ファイル: transition_mix.c プロジェクト: j-b-m/mlt
static int transition_get_audio( mlt_frame frame_a, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
{
	int error = 0;

	// Get the b frame from the stack
	mlt_frame frame_b = mlt_frame_pop_audio( frame_a );

	// Get the effect
	mlt_transition transition = mlt_frame_pop_audio( frame_a );

	// Get the properties of the b frame
	mlt_properties b_props = MLT_FRAME_PROPERTIES( frame_b );

	transition_mix self = transition->child;
	int16_t *buffer_b, *buffer_a;
	int frequency_b = *frequency, frequency_a = *frequency;
	int channels_b = *channels, channels_a = *channels;
	int samples_b = *samples, samples_a = *samples;

	// We can only mix s16
	*format = mlt_audio_s16;
	mlt_frame_get_audio( frame_b, (void**) &buffer_b, format, &frequency_b, &channels_b, &samples_b );
	mlt_frame_get_audio( frame_a, (void**) &buffer_a, format, &frequency_a, &channels_a, &samples_a );

	if ( buffer_b == buffer_a )
	{
		*samples = samples_b;
		*channels = channels_b;
		*buffer = buffer_b;
		*frequency = frequency_b;
		return error;
	}

	int silent = mlt_properties_get_int( MLT_FRAME_PROPERTIES( frame_a ), "silent_audio" );
	mlt_properties_set_int( MLT_FRAME_PROPERTIES( frame_a ), "silent_audio", 0 );
	if ( silent )
		memset( buffer_a, 0, samples_a * channels_a * sizeof( int16_t ) );

	silent = mlt_properties_get_int( b_props, "silent_audio" );
	mlt_properties_set_int( b_props, "silent_audio", 0 );
	if ( silent )
		memset( buffer_b, 0, samples_b * channels_b * sizeof( int16_t ) );

	// determine number of samples to process
	*samples = MIN( self->src_buffer_count + samples_b, self->dest_buffer_count + samples_a );
	*channels = MIN( MIN( channels_b, channels_a ), MAX_CHANNELS );
	*frequency = frequency_a;

	// Prevent src buffer overflow by discarding oldest samples.
	samples_b = MIN( samples_b, MAX_SAMPLES * MAX_CHANNELS / channels_b );
	size_t bytes = PCM16_BYTES( samples_b, channels_b );
	if ( PCM16_BYTES( self->src_buffer_count + samples_b, channels_b ) > MAX_BYTES ) {
		mlt_log_verbose( MLT_TRANSITION_SERVICE(transition), "buffer overflow: src_buffer_count %d\n",
					  self->src_buffer_count );
		self->src_buffer_count = MAX_SAMPLES * MAX_CHANNELS / channels_b - samples_b;
		memmove( self->src_buffer, &self->src_buffer[MAX_SAMPLES * MAX_CHANNELS - samples_b * channels_b],
				 PCM16_BYTES( samples_b, channels_b ) );
	}
	// Buffer new src samples.
	memcpy( &self->src_buffer[self->src_buffer_count * channels_b], buffer_b, bytes );
	self->src_buffer_count += samples_b;
	buffer_b = self->src_buffer;

	// Prevent dest buffer overflow by discarding oldest samples.
	samples_a = MIN( samples_a, MAX_SAMPLES * MAX_CHANNELS / channels_a );
	bytes = PCM16_BYTES( samples_a, channels_a );
	if ( PCM16_BYTES( self->dest_buffer_count + samples_a, channels_a ) > MAX_BYTES ) {
		mlt_log_verbose( MLT_TRANSITION_SERVICE(transition), "buffer overflow: dest_buffer_count %d\n",
					  self->dest_buffer_count );
		self->dest_buffer_count = MAX_SAMPLES * MAX_CHANNELS / channels_a - samples_a;
		memmove( self->dest_buffer, &self->dest_buffer[MAX_SAMPLES * MAX_CHANNELS - samples_a * channels_a],
				 PCM16_BYTES( samples_a, channels_a ) );
	}
	// Buffer the new dest samples.
	memcpy( &self->dest_buffer[self->dest_buffer_count * channels_a], buffer_a, bytes );
	self->dest_buffer_count += samples_a;
	buffer_a = self->dest_buffer;

	// Do the mixing.
	if ( mlt_properties_get_int( MLT_TRANSITION_PROPERTIES(transition), "combine" ) )
	{
		double weight = 1.0;
		if ( mlt_properties_get_int( MLT_FRAME_PROPERTIES( frame_a ), "meta.mixdown" ) )
			weight = 1.0 - mlt_properties_get_double( MLT_FRAME_PROPERTIES( frame_a ), "meta.volume" );
		combine_audio( weight, buffer_a, buffer_b, channels_a, channels_b, *channels, *samples );
	}
	else
	{
		double mix_start = 0.5, mix_end = 0.5;
		if ( mlt_properties_get( b_props, "audio.previous_mix" ) )
			mix_start = mlt_properties_get_double( b_props, "audio.previous_mix" );
		if ( mlt_properties_get( b_props, "audio.mix" ) )
			mix_end = mlt_properties_get_double( b_props, "audio.mix" );
		if ( mlt_properties_get_int( b_props, "audio.reverse" ) )
		{
			mix_start = 1.0 - mix_start;
			mix_end = 1.0 - mix_end;
		}
		mix_audio( mix_start, mix_end, buffer_a, buffer_b, channels_a, channels_b, *channels, *samples );
	}

	// Copy the audio into the frame.
	bytes = PCM16_BYTES( *samples, *channels );
	*buffer = mlt_pool_alloc( bytes );
	memcpy( *buffer, buffer_a, bytes );
	mlt_frame_set_audio( frame_a, *buffer, *format, bytes, mlt_pool_release );

	if ( mlt_properties_get_int( b_props, "_speed" ) == 0 )
	{
		// Flush the buffer when paused and scrubbing.
		samples_b = self->src_buffer_count;
		samples_a = self->dest_buffer_count;
	}
	else
	{
		// Determine the maximum amount of latency permitted in the buffer.
		int max_latency = CLAMP( *frequency / 1000, 0, MAX_SAMPLES ); // samples in 1ms
		// samples_b becomes the new target src buffer count.
		samples_b = CLAMP( self->src_buffer_count - *samples, 0, max_latency );
		// samples_b becomes the number of samples to consume: difference between actual and the target.
		samples_b = self->src_buffer_count - samples_b;
		// samples_a becomes the new target dest buffer count.
		samples_a = CLAMP( self->dest_buffer_count - *samples, 0, max_latency );
		// samples_a becomes the number of samples to consume: difference between actual and the target.
		samples_a = self->dest_buffer_count - samples_a;
	}

	// Consume the src buffer.
	self->src_buffer_count -= samples_b;
	if ( self->src_buffer_count ) {
		memmove( self->src_buffer, &self->src_buffer[samples_b * channels_b],
			PCM16_BYTES( self->src_buffer_count, channels_b ));
	}
	// Consume the dest buffer.
	self->dest_buffer_count -= samples_a;
	if ( self->dest_buffer_count ) {
		memmove( self->dest_buffer, &self->dest_buffer[samples_a * channels_a],
			PCM16_BYTES( self->dest_buffer_count, channels_a ));
	}

	return error;
}
コード例 #11
0
ファイル: transition_affine.c プロジェクト: mltframework/mlt
static int transition_get_image( mlt_frame a_frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{
	// Get the b frame from the stack
	mlt_frame b_frame = mlt_frame_pop_frame( a_frame );

	// Get the transition object
	mlt_transition transition = mlt_frame_pop_service( a_frame );

	// Get the properties of the transition
	mlt_properties properties = MLT_TRANSITION_PROPERTIES( transition );

	// Get the properties of the a frame
	mlt_properties a_props = MLT_FRAME_PROPERTIES( a_frame );

	// Get the properties of the b frame
	mlt_properties b_props = MLT_FRAME_PROPERTIES( b_frame );

	// Image, format, width, height and image for the b frame
	uint8_t *b_image = NULL;
	mlt_image_format b_format = mlt_image_rgb24a;
	int b_width = mlt_properties_get_int( b_props, "meta.media.width" );
	int b_height = mlt_properties_get_int( b_props, "meta.media.height" );
	double b_ar = mlt_frame_get_aspect_ratio( b_frame );
	double b_dar = b_ar * b_width / b_height;

	// Assign the current position
	mlt_position position =  mlt_transition_get_position( transition, a_frame );

	int mirror = mlt_properties_get_position( properties, "mirror" );
	int length = mlt_transition_get_length( transition );
	if ( mlt_properties_get_int( properties, "always_active" ) )
	{
		mlt_properties props = mlt_properties_get_data( b_props, "_producer", NULL );
		mlt_position in = mlt_properties_get_int( props, "in" );
		mlt_position out = mlt_properties_get_int( props, "out" );
		length = out - in + 1;
	}

	// Obtain the normalised width and height from the a_frame
	mlt_profile profile = mlt_service_profile( MLT_TRANSITION_SERVICE( transition ) );
	int normalised_width = profile->width;
	int normalised_height = profile->height;

	double consumer_ar = mlt_profile_sar( profile );

	if ( mirror && position > length / 2 )
		position = abs( position - length );

	// Fetch the a frame image
	*format = mlt_image_rgb24a;
	int error = mlt_frame_get_image( a_frame, image, format, width, height, 1 );
	if (error || !image)
		return error;

	// Calculate the region now
	mlt_rect result = {0, 0, normalised_width, normalised_height, 1.0};
	mlt_service_lock( MLT_TRANSITION_SERVICE( transition ) );

	if (mlt_properties_get(properties, "geometry"))
	{
		// Structures for geometry
		struct mlt_geometry_item_s geometry;
		composite_calculate( transition, &geometry, normalised_width, normalised_height, ( double )position );
		result.x = geometry.x;
		result.y = geometry.y;
		result.w = geometry.w;
		result.h = geometry.h;
		result.o = geometry.mix / 100.0f;
	}
	else if (mlt_properties_get(properties, "rect"))
	{
		// Determine length and obtain cycle
		double cycle = mlt_properties_get_double( properties, "cycle" );
	
		// Allow a repeat cycle
		if ( cycle >= 1 )
			length = cycle;
		else if ( cycle > 0 )
			length *= cycle;
		
		mlt_position anim_pos = repeat_position(properties, "rect", position, length);
		result = mlt_properties_anim_get_rect(properties, "rect", anim_pos, length);
		if (mlt_properties_get(properties, "rect") && strchr(mlt_properties_get(properties, "rect"), '%')) {
			result.x *= normalised_width;
			result.y *= normalised_height;
			result.w *= normalised_width;
			result.h *= normalised_height;
		}
		result.o = (result.o == DBL_MIN)? 1.0 : MIN(result.o, 1.0);
	}
	mlt_service_unlock( MLT_TRANSITION_SERVICE( transition ) );

	double geometry_w = result.w;
	double geometry_h = result.h;

	if ( !mlt_properties_get_int( properties, "fill" ) )
	{
		double geometry_dar = result.w * consumer_ar / result.h;

		if ( b_dar > geometry_dar )
		{
			result.w = MIN( result.w, b_width * b_ar / consumer_ar );
			result.h = result.w * consumer_ar / b_dar;
		}
		else
		{
			result.h = MIN( result.h, b_height );
			result.w = result.h * b_dar / consumer_ar;
		}
	}

	// Fetch the b frame image
	result.w = ( result.w * *width / normalised_width );
	result.h = ( result.h * *height / normalised_height );
	result.x = ( result.x * *width / normalised_width );
	result.y = ( result.y * *height / normalised_height );

	if (mlt_properties_get_int(properties, "b_scaled")) {
		// Request b frame image size just what is needed.
		b_width = result.w;
		b_height = result.h;
		// Set the rescale interpolation to match the frame
		mlt_properties_set( b_props, "rescale.interp", mlt_properties_get( a_props, "rescale.interp" ) );
	} else {
		// Request full resolution of b frame image.
		mlt_properties_set_int( b_props, "rescale_width", b_width );
		mlt_properties_set_int( b_props, "rescale_height", b_height );

		// Suppress padding and aspect normalization.
		mlt_properties_set( b_props, "rescale.interp", "none" );
	}

	// This is not a field-aware transform.
	mlt_properties_set_int( b_props, "consumer_deinterlace", 1 );

	error = mlt_frame_get_image( b_frame, &b_image, &b_format, &b_width, &b_height, 0 );
	if (error || !b_image) {
		// Remove potentially large image on the B frame. 
		mlt_frame_set_image( b_frame, NULL, 0, NULL );
		return error;
	}

	// Check that both images are of the correct format and process
	if ( *format == mlt_image_rgb24a && b_format == mlt_image_rgb24a )
	{
		double sw, sh;
		// Get values from the transition
		double scale_x = mlt_properties_anim_get_double( properties, "scale_x", position, length );
		double scale_y = mlt_properties_anim_get_double( properties, "scale_y", position, length );
		int scale = mlt_properties_get_int( properties, "scale" );
		double geom_scale_x = (double) b_width / result.w;
		double geom_scale_y = (double) b_height / result.h;
		struct sliced_desc desc = {
			.a_image = *image,
			.b_image = b_image,
			.interp = interpBL_b32,
			.a_width = *width,
			.a_height = *height,
			.b_width = b_width,
			.b_height = b_height,
			.lower_x = -(result.x + result.w / 2.0), // center
			.lower_y = -(result.y + result.h / 2.0), // middle
			.mix = result.o,
			.x_offset = (double) b_width / 2.0,
			.y_offset = (double) b_height / 2.0,
			.b_alpha = mlt_properties_get_int( properties, "b_alpha" ),
			// Affine boundaries
			.minima = 0,
			.xmax = b_width - 1,
			.ymax = b_height - 1
		};

		// Recalculate vars if alignment supplied.
		if ( mlt_properties_get( properties, "halign" ) || mlt_properties_get( properties, "valign" ) )
		{
			double halign = alignment_parse( mlt_properties_get( properties, "halign" ) );
			double valign = alignment_parse( mlt_properties_get( properties, "valign" ) );
			desc.x_offset = halign * b_width / 2.0;
			desc.y_offset = valign * b_height / 2.0;
			desc.lower_x = -(result.x + geometry_w * halign / 2.0f);
			desc.lower_y = -(result.y + geometry_h * valign / 2.0f);
		}

		affine_init( desc.affine.matrix );

		// Compute the affine transform
		get_affine( &desc.affine, transition, ( double )position, length );
		desc.dz = MapZ( desc.affine.matrix, 0, 0 );
		if ( (int) fabs( desc.dz * 1000 ) < 25 )
			return 0;

		// Factor scaling into the transformation based on output resolution.
		if ( mlt_properties_get_int( properties, "distort" ) )
		{
			scale_x = geom_scale_x * ( scale_x == 0 ? 1 : scale_x );
			scale_y = geom_scale_y * ( scale_y == 0 ? 1 : scale_y );
		}
		else
		{
			// Determine scale with respect to aspect ratio.
			double consumer_dar = consumer_ar * normalised_width / normalised_height;
			
			if ( b_dar > consumer_dar )
			{
				scale_x = geom_scale_x * ( scale_x == 0 ? 1 : scale_x );
				scale_y = geom_scale_x * ( scale_y == 0 ? 1 : scale_y );
				scale_y *= b_ar / consumer_ar;
			}
			else
			{
				scale_x = geom_scale_y * ( scale_x == 0 ? 1 : scale_x );
				scale_y = geom_scale_y * ( scale_y == 0 ? 1 : scale_y );
				scale_x *= consumer_ar / b_ar;
			}
		}
		if ( scale )
		{
			affine_max_output( desc.affine.matrix, &sw, &sh, desc.dz, *width, *height );
			affine_scale( desc.affine.matrix, sw * MIN( geom_scale_x, geom_scale_y ), sh * MIN( geom_scale_x, geom_scale_y ) );
		}
		else if ( scale_x != 0 && scale_y != 0 )
		{
			affine_scale( desc.affine.matrix, scale_x, scale_y );
		}


		char *interps = mlt_properties_get( a_props, "rescale.interp" );
		// Copy in case string is changed.
		if ( interps )
			interps = strdup( interps );

		// Set the interpolation function
		if ( interps == NULL || strcmp( interps, "nearest" ) == 0 || strcmp( interps, "neighbor" ) == 0 || strcmp( interps, "tiles" ) == 0 || strcmp( interps, "fast_bilinear" ) == 0 )
		{
			desc.interp = interpNN_b32;
			// uses lrintf. Values should be >= -0.5 and < max + 0.5
			desc.minima -= 0.5;
			desc.xmax += 0.49;
			desc.ymax += 0.49;
		}
		else if ( strcmp( interps, "bilinear" ) == 0 )
		{
			desc.interp = interpBL_b32;
			// uses floorf.
		}
		else if ( strcmp( interps, "bicubic" ) == 0 ||  strcmp( interps, "hyper" ) == 0 || strcmp( interps, "sinc" ) == 0 || strcmp( interps, "lanczos" ) == 0 || strcmp( interps, "spline" ) == 0 )
		{
			// TODO: lanczos 8x8
			// TODO: spline 4x4 or 6x6
			desc.interp = interpBC_b32;
			// uses ceilf. Values should be > -1 and <= max.
			desc.minima -= 1;
		}
		free( interps );

		// Do the transform with interpolation
		int threads = mlt_properties_get_int(properties, "threads");
		threads = CLAMP(threads, 0, mlt_slices_count_normal());
		if (threads == 1)
			sliced_proc(0, 0, 1, &desc);
		else
			mlt_slices_run_normal(threads, sliced_proc, &desc);
		
		// Remove potentially large image on the B frame. 
		mlt_frame_set_image( b_frame, NULL, 0, NULL );
	}

	return 0;
}
コード例 #12
0
ファイル: transition_region.c プロジェクト: amongll/AVFX
static int create_instance( mlt_transition transition, char *name, char *value, int count )
{
	// Return from this function
	int error = 0;

	// Duplicate the value
	char *type = strdup( value );

	// Pointer to filter argument
	char *arg = type == NULL ? NULL : strchr( type, ':' );

	// New filter being created
	mlt_filter filter = NULL;

	// Cleanup type and arg
	if ( arg != NULL )
		*arg ++ = '\0';

	// Create the filter
	mlt_profile profile = mlt_service_profile( MLT_TRANSITION_SERVICE( transition ) );
	if ( type )
		filter = mlt_factory_filter( profile, type, arg );

	// If we have a filter, then initialise and store it
	if ( filter != NULL )
	{
		// Properties of transition
		mlt_properties properties = MLT_TRANSITION_PROPERTIES( transition );

		// String to hold the property name
		char id[ 256 ];

		// String to hold the passdown key
		char key[ 256 ];

		// Construct id
		sprintf( id, "_filter_%d", count );

		// Counstruct key
		sprintf( key, "%s.", name );

		// Just in case, let's assume that the filter here has a composite
		//mlt_properties_set( MLT_FILTER_PROPERTIES( filter ), "composite.geometry", "0%/0%:100%x100%" );
		//mlt_properties_set_int( MLT_FILTER_PROPERTIES( filter ), "composite.fill", 1 );

		// Pass all the key properties on the filter down
		mlt_properties_pass( MLT_FILTER_PROPERTIES( filter ), properties, key );
		mlt_properties_pass_list( MLT_FILTER_PROPERTIES( filter ), properties, "in, out, length" );

		// Ensure that filter is assigned
		mlt_properties_set_data( properties, id, filter, 0, ( mlt_destructor )mlt_filter_close, NULL );
	}
	else
	{
		// Indicate that an error has occurred
		error = 1;
	}

	// Cleanup
	free( type );

	// Return error condition
	return error;
}
コード例 #13
0
ファイル: transition_region.c プロジェクト: amongll/AVFX
static int transition_get_image( mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{
	// Error we will return
	int error = 0;

	// We will get the 'b frame' from the frame stack
	mlt_frame b_frame = mlt_frame_pop_frame( frame );

	// Get the watermark transition object
	mlt_transition transition = mlt_frame_pop_service( frame );

	// Get the properties of the transitionfin
	mlt_properties properties = MLT_TRANSITION_PROPERTIES( transition );

	// Get the properties of the a frame
	mlt_properties a_props = MLT_FRAME_PROPERTIES( frame );

	mlt_service_lock( MLT_TRANSITION_SERVICE( transition ) );

	// Get the composite from the transition
	mlt_transition composite = mlt_properties_get_data( properties, "composite", NULL );

	// Look for the first filter
	mlt_filter filter = mlt_properties_get_data( properties, "_filter_0", NULL );

	// Get the position
	mlt_position position = mlt_transition_get_position( transition, frame );

	// Create a composite if we don't have one
	if ( composite == NULL )
	{
		// Create composite via the factory
		mlt_profile profile = mlt_service_profile( MLT_TRANSITION_SERVICE( transition ) );
		composite = mlt_factory_transition( profile, "composite", NULL );

		// If we have one
		if ( composite != NULL )
		{
			// Get the properties
			mlt_properties composite_properties = MLT_TRANSITION_PROPERTIES( composite );

			// We want to ensure that we don't get a wobble...
			//mlt_properties_set_int( composite_properties, "distort", 1 );
			mlt_properties_set_int( composite_properties, "progressive", 1 );

			// Pass all the composite. properties on the transition down
			mlt_properties_pass( composite_properties, properties, "composite." );

			// Register the composite for reuse/destruction
			mlt_properties_set_data( properties, "composite", composite, 0, ( mlt_destructor )mlt_transition_close, NULL );
		}
	}
	else
	{
		// Pass all current properties down
		mlt_properties composite_properties = MLT_TRANSITION_PROPERTIES( composite );
		mlt_properties_pass( composite_properties, properties, "composite." );
	}

	// Create filters
	if ( filter == NULL )
	{
		// Loop Variable
		int i = 0;

		// Number of filters created
		int count = 0;

		// Loop for all properties
		for ( i = 0; i < mlt_properties_count( properties ); i ++ )
		{
			// Get the name of this property
			char *name = mlt_properties_get_name( properties, i );

			// If the name does not contain a . and matches filter
			if ( strchr( name, '.' ) == NULL && !strncmp( name, "filter", 6 ) )
			{
				// Get the filter constructor
				char *value = mlt_properties_get_value( properties, i );

				// Create an instance
				if ( create_instance( transition, name, value, count ) == 0 )
					count ++;
			}
		}
	
		// Look for the first filter again
		filter = mlt_properties_get_data( properties, "_filter_0", NULL );
	}
	else
	{
		// Pass all properties down
		mlt_filter temp = NULL;

		// Loop Variable
		int i = 0;

		// Number of filters found
		int count = 0;

		// Loop for all properties
		for ( i = 0; i < mlt_properties_count( properties ); i ++ )
		{
			// Get the name of this property
			char *name = mlt_properties_get_name( properties, i );

			// If the name does not contain a . and matches filter
			if ( strchr( name, '.' ) == NULL && !strncmp( name, "filter", 6 ) )
			{
				// Strings to hold the id and pass down key
				char id[ 256 ];
				char key[ 256 ];

				// Construct id and key
				sprintf( id, "_filter_%d", count );
				sprintf( key, "%s.", name );

				// Get the filter
				temp = mlt_properties_get_data( properties, id, NULL );

				if ( temp != NULL )
				{
					mlt_properties_pass( MLT_FILTER_PROPERTIES( temp ), properties, key );
					count ++;
				}
			}
		}
	}

	mlt_properties_set_int( a_props, "width", *width );
	mlt_properties_set_int( a_props, "height", *height );

	// Only continue if we have both filter and composite
	if ( composite != NULL )
	{
		// Get the resource of this filter (could be a shape [rectangle/circle] or an alpha provider of choice
		const char *resource =  mlt_properties_get( properties, "resource" );

		// Get the old resource in case it's changed
		char *old_resource =  mlt_properties_get( properties, "_old_resource" );

		// String to hold the filter to query on
		char id[ 256 ];

		// Index to hold the count
		int i = 0;

		// We will get the 'b frame' from the composite only if it's NULL (region filter)
		if ( b_frame == NULL )
		{
			// Copy the region
			b_frame = composite_copy_region( composite, frame, position );

			// Ensure a destructor
			char *name = mlt_properties_get( properties, "_unique_id" );
			mlt_properties_set_data( a_props, name, b_frame, 0, ( mlt_destructor )mlt_frame_close, NULL );
		}

		// Properties of the B framr
		mlt_properties b_props = MLT_FRAME_PROPERTIES( b_frame );

		// filter_only prevents copying the alpha channel of the shape to the output frame
		// by compositing filtered frame over itself
		if ( mlt_properties_get_int( properties, "filter_only" ) )
		{
			char *name = mlt_properties_get( properties, "_unique_id" );
			frame = composite_copy_region( composite, b_frame, position );
			mlt_properties_set_data( b_props, name, frame, 0, ( mlt_destructor )mlt_frame_close, NULL );
		}

		// Make sure the filter is in the correct position
		while ( filter != NULL )
		{
			// Stack this filter
			if ( mlt_properties_get_int( MLT_FILTER_PROPERTIES( filter ), "off" ) == 0 )
				mlt_filter_process( filter, b_frame );

			// Generate the key for the next
			sprintf( id, "_filter_%d", ++ i );

			// Get the next filter
			filter = mlt_properties_get_data( properties, id, NULL );
		}

		// Allow filters to be attached to a region filter
		filter = mlt_properties_get_data( properties, "_region_filter", NULL );
		if ( filter != NULL )
			mlt_service_apply_filters( MLT_FILTER_SERVICE( filter ), b_frame, 0 );

		// Hmm - this is probably going to go wrong....
		mlt_frame_set_position( frame, position );

		// Get the b frame and process with composite if successful
		mlt_transition_process( composite, frame, b_frame );

		// If we have a shape producer copy the alpha mask from the shape frame to the b_frame
		if ( strcmp( resource, "rectangle" ) != 0 )
		{
			// Get the producer from the transition
			mlt_producer producer = mlt_properties_get_data( properties, "producer", NULL );

			// If We have no producer then create one
			if ( producer == NULL || ( old_resource != NULL && strcmp( resource, old_resource ) ) )
			{
				// Get the factory producer service
				char *factory = mlt_properties_get( properties, "factory" );

				// Store the old resource
				mlt_properties_set( properties, "_old_resource", resource );

				// Special case circle resource
				if ( strcmp( resource, "circle" ) == 0 )
					resource = "pixbuf:<svg width='100' height='100'><circle cx='50' cy='50' r='50' fill='black'/></svg>";

				// Create the producer
				mlt_profile profile = mlt_service_profile( MLT_TRANSITION_SERVICE( transition ) );
				producer = mlt_factory_producer( profile, factory, resource );

				// If we have one
				if ( producer != NULL )
				{
					// Get the producer properties
					mlt_properties producer_properties = MLT_PRODUCER_PROPERTIES( producer );

					// Ensure that we loop
					mlt_properties_set( producer_properties, "eof", "loop" );

					// Now pass all producer. properties on the transition down
					mlt_properties_pass( producer_properties, properties, "producer." );

					// Register the producer for reuse/destruction
					mlt_properties_set_data( properties, "producer", producer, 0, ( mlt_destructor )mlt_producer_close, NULL );
				}
			}

			// Now use the shape producer
			if ( producer != NULL )
			{
				// We will get the alpha frame from the producer
				mlt_frame shape_frame = NULL;

				// Make sure the producer is in the correct position
				mlt_producer_seek( producer, position );

				// Get the shape frame
				if ( mlt_service_get_frame( MLT_PRODUCER_SERVICE( producer ), &shape_frame, 0 ) == 0 )
				{
					// Ensure that the shape frame will be closed
					mlt_properties_set_data( b_props, "shape_frame", shape_frame, 0, ( mlt_destructor )mlt_frame_close, NULL );
					if ( mlt_properties_get_int(properties, "holecolor") ) {
						mlt_properties_set_int(b_props, "holecolor", mlt_properties_get_int(properties,"holecolor"));
					}

					// Specify the callback for evaluation
					b_frame->get_alpha_mask = filter_get_alpha_mask;
				}
			}
		}

		// Get the image
		error = mlt_frame_get_image( frame, image, format, width, height, 0 );
	}

	mlt_service_unlock( MLT_TRANSITION_SERVICE( transition ) );

	return error;
}
コード例 #14
0
ファイル: transition_affine.c プロジェクト: gmarco/mlt-orig
static int transition_get_image( mlt_frame a_frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{
	// Get the b frame from the stack
	mlt_frame b_frame = mlt_frame_pop_frame( a_frame );

	// Get the transition object
	mlt_transition transition = mlt_frame_pop_service( a_frame );

	// Get the properties of the transition
	mlt_properties properties = MLT_TRANSITION_PROPERTIES( transition );

	// Get the properties of the a frame
	mlt_properties a_props = MLT_FRAME_PROPERTIES( a_frame );

	// Get the properties of the b frame
	mlt_properties b_props = MLT_FRAME_PROPERTIES( b_frame );

	// Image, format, width, height and image for the b frame
	uint8_t *b_image = NULL;
	mlt_image_format b_format = mlt_image_rgb24a;
	int b_width;
	int b_height;

	// Assign the current position
	mlt_position position =  mlt_transition_get_position( transition, a_frame );

	int mirror = mlt_properties_get_position( properties, "mirror" );
	int length = mlt_transition_get_length( transition );
	if ( mlt_properties_get_int( properties, "always_active" ) )
	{
		mlt_properties props = mlt_properties_get_data( b_props, "_producer", NULL );
		mlt_position in = mlt_properties_get_int( props, "in" );
		mlt_position out = mlt_properties_get_int( props, "out" );
		length = out - in + 1;
	}

	// Obtain the normalised width and height from the a_frame
	mlt_profile profile = mlt_service_profile( MLT_TRANSITION_SERVICE( transition ) );
	int normalised_width = profile->width;
	int normalised_height = profile->height;

	double consumer_ar = mlt_profile_sar( mlt_service_profile( MLT_TRANSITION_SERVICE(transition) ) );

	// Structures for geometry
	struct mlt_geometry_item_s result;

	if ( mirror && position > length / 2 )
		position = abs( position - length );

	// Fetch the a frame image
	*format = mlt_image_rgb24a;
	mlt_frame_get_image( a_frame, image, format, width, height, 1 );

	// Calculate the region now
	mlt_service_lock( MLT_TRANSITION_SERVICE( transition ) );
	composite_calculate( transition, &result, normalised_width, normalised_height, ( float )position );
	mlt_service_unlock( MLT_TRANSITION_SERVICE( transition ) );

	// Fetch the b frame image
	result.w = ( result.w * *width / normalised_width );
	result.h = ( result.h * *height / normalised_height );
	result.x = ( result.x * *width / normalised_width );
	result.y = ( result.y * *height / normalised_height );

	// Request full resolution of b frame image.
	b_width = mlt_properties_get_int( b_props, "meta.media.width" );
	b_height = mlt_properties_get_int( b_props, "meta.media.height" );
	mlt_properties_set_int( b_props, "rescale_width", b_width );
	mlt_properties_set_int( b_props, "rescale_height", b_height );

	// Suppress padding and aspect normalization.
	char *interps = mlt_properties_get( a_props, "rescale.interp" );
	if ( interps )
		interps = strdup( interps );
	mlt_properties_set( b_props, "rescale.interp", "none" );

	// This is not a field-aware transform.
	mlt_properties_set_int( b_props, "consumer_deinterlace", 1 );

	mlt_frame_get_image( b_frame, &b_image, &b_format, &b_width, &b_height, 0 );

	// Check that both images are of the correct format and process
	if ( *format == mlt_image_rgb24a && b_format == mlt_image_rgb24a )
	{
		float x, y;
		float dx, dy;
		float dz;
		float sw, sh;
		uint8_t *p = *image;

		// Get values from the transition
		float scale_x = mlt_properties_get_double( properties, "scale_x" );
		float scale_y = mlt_properties_get_double( properties, "scale_y" );
		int scale = mlt_properties_get_int( properties, "scale" );
		int b_alpha = mlt_properties_get_int( properties, "b_alpha" );
		float geom_scale_x = (float) b_width / result.w;
		float geom_scale_y = (float) b_height / result.h;
		float cx = result.x + result.w / 2.0;
		float cy = result.y + result.h / 2.0;
		float lower_x = - cx;
		float lower_y = - cy;
		float x_offset = (float) b_width / 2.0;
		float y_offset = (float) b_height / 2.0;
		affine_t affine;
		interpp interp = interpBL_b32;
		int i, j; // loop counters

		affine_init( affine.matrix );

		// Compute the affine transform
		get_affine( &affine, transition, ( float )position );
		dz = MapZ( affine.matrix, 0, 0 );
		if ( ( int )abs( dz * 1000 ) < 25 )
		{
			if ( interps )
				free( interps );
			return 0;
		}

		// Factor scaling into the transformation based on output resolution.
		if ( mlt_properties_get_int( properties, "distort" ) )
		{
			scale_x = geom_scale_x * ( scale_x == 0 ? 1 : scale_x );
			scale_y = geom_scale_y * ( scale_y == 0 ? 1 : scale_y );
		}
		else
		{
			// Determine scale with respect to aspect ratio.
			double consumer_dar = consumer_ar * normalised_width / normalised_height;
			double b_ar = mlt_properties_get_double( b_props, "aspect_ratio" );
			double b_dar = b_ar * b_width / b_height;
			
			if ( b_dar > consumer_dar )
			{
				scale_x = geom_scale_x * ( scale_x == 0 ? 1 : scale_x );
				scale_y = geom_scale_x * ( scale_y == 0 ? 1 : scale_y );
			}
			else
			{
				scale_x = geom_scale_y * ( scale_x == 0 ? 1 : scale_x );
				scale_y = geom_scale_y * ( scale_y == 0 ? 1 : scale_y );
			}
			scale_x *= consumer_ar / b_ar;
		}
		if ( scale )
		{
			affine_max_output( affine.matrix, &sw, &sh, dz, *width, *height );
			affine_scale( affine.matrix, sw * MIN( geom_scale_x, geom_scale_y ), sh * MIN( geom_scale_x, geom_scale_y ) );
		}
		else if ( scale_x != 0 && scale_y != 0 )
		{
			affine_scale( affine.matrix, scale_x, scale_y );
		}

		// Set the interpolation function
		if ( interps == NULL || strcmp( interps, "nearest" ) == 0 || strcmp( interps, "neighbor" ) == 0 )
			interp = interpNN_b32;
		else if ( strcmp( interps, "tiles" ) == 0 || strcmp( interps, "fast_bilinear" ) == 0 )
			interp = interpNN_b32;
		else if ( strcmp( interps, "bilinear" ) == 0 )
			interp = interpBL_b32;
		else if ( strcmp( interps, "bicubic" ) == 0 )
			interp = interpBC_b32;
		 // TODO: lanczos 8x8
		else if ( strcmp( interps, "hyper" ) == 0 || strcmp( interps, "sinc" ) == 0 || strcmp( interps, "lanczos" ) == 0 )
			interp = interpBC_b32;
		else if ( strcmp( interps, "spline" ) == 0 ) // TODO: spline 4x4 or 6x6
			interp = interpBC_b32;

		// Do the transform with interpolation
		for ( i = 0, y = lower_y; i < *height; i++, y++ )
		{
			for ( j = 0, x = lower_x; j < *width; j++, x++ )
			{
				dx = MapX( affine.matrix, x, y ) / dz + x_offset;
				dy = MapY( affine.matrix, x, y ) / dz + y_offset;
				if ( dx >= 0 && dx < (b_width - 1) && dy >=0 && dy < (b_height - 1) )
					interp( b_image, b_width, b_height, dx, dy, result.mix/100.0, p, b_alpha );
				p += 4;
			}
		}
	}
	if ( interps )
		free( interps );

	return 0;
}
コード例 #15
0
static int get_image( mlt_frame a_frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{
	int error = 0;

	// Get the b frame from the stack
	mlt_frame b_frame = (mlt_frame) mlt_frame_pop_frame( a_frame );

	// Get the transition object
	mlt_transition transition = (mlt_transition) mlt_frame_pop_service( a_frame );

	// Get the properties of the transition
	mlt_properties properties = MLT_TRANSITION_PROPERTIES( transition );

	// Get the properties of the a frame
	mlt_properties a_props = MLT_FRAME_PROPERTIES( a_frame );

	// Get the movit objects
	mlt_service service = MLT_TRANSITION_SERVICE( transition );
	mlt_service_lock( service );
	EffectChain* chain = GlslManager::get_chain( service );
	MltInput* a_input = GlslManager::get_input( service );
	MltInput* b_input = (MltInput*) mlt_properties_get_data( properties, "movit input B", NULL );
	mlt_image_format output_format = *format;

	if ( !chain || !a_input ) {
		mlt_service_unlock( service );
		return 2;
	}

	// Get the frames' textures
	GLuint* texture_id[2] = {0, 0};
	*format = mlt_image_glsl_texture;
	mlt_frame_get_image( a_frame, (uint8_t**) &texture_id[0], format, width, height, 0 );
	a_input->useFBOInput( chain, *texture_id[0] );
	*format = mlt_image_glsl_texture;
	mlt_frame_get_image( b_frame, (uint8_t**) &texture_id[1], format, width, height, 0 );
	b_input->useFBOInput( chain, *texture_id[1] );

	// Set resolution to that of the a_frame
	*width = mlt_properties_get_int( a_props, "width" );
	*height = mlt_properties_get_int( a_props, "height" );

	// Setup rendering to an FBO
	GlslManager* glsl = GlslManager::get_instance();
	glsl_fbo fbo = glsl->get_fbo( *width, *height );
	if ( output_format == mlt_image_glsl_texture ) {
		glsl_texture texture = glsl->get_texture( *width, *height, GL_RGBA );

		glBindFramebuffer( GL_FRAMEBUFFER, fbo->fbo );
		check_error();
		glFramebufferTexture2D( GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture->texture, 0 );
		check_error();
		glBindFramebuffer( GL_FRAMEBUFFER, 0 );
		check_error();

		GlslManager::render( service, chain, fbo->fbo, *width, *height );

		glFinish();
		check_error();
		glBindFramebuffer( GL_FRAMEBUFFER, 0 );
		check_error();

		*image = (uint8_t*) &texture->texture;
		mlt_frame_set_image( a_frame, *image, 0, NULL );
		mlt_properties_set_data( properties, "movit.convert", texture, 0,
			(mlt_destructor) GlslManager::release_texture, NULL );
		*format = output_format;
	}
	else {
		// Use a PBO to hold the data we read back with glReadPixels()
		// (Intel/DRI goes into a slow path if we don't read to PBO)
		GLenum gl_format = ( output_format == mlt_image_rgb24a || output_format == mlt_image_opengl )?
			GL_RGBA : GL_RGB;
		int img_size = *width * *height * ( gl_format == GL_RGB? 3 : 4 );
		glsl_pbo pbo = glsl->get_pbo( img_size );
		glsl_texture texture = glsl->get_texture( *width, *height, gl_format );

		if ( fbo && pbo && texture ) {
			// Set the FBO
			glBindFramebuffer( GL_FRAMEBUFFER, fbo->fbo );
			check_error();
			glFramebufferTexture2D( GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture->texture, 0 );
			check_error();
			glBindFramebuffer( GL_FRAMEBUFFER, 0 );
			check_error();

			GlslManager::render( service, chain, fbo->fbo, *width, *height );

			// Read FBO into PBO
			glBindBuffer( GL_PIXEL_PACK_BUFFER_ARB, pbo->pbo );
			check_error();
			glBufferData( GL_PIXEL_PACK_BUFFER_ARB, img_size, NULL, GL_STREAM_READ );
			check_error();
			glReadPixels( 0, 0, *width, *height, gl_format, GL_UNSIGNED_BYTE, BUFFER_OFFSET(0) );
			check_error();

			// Copy from PBO
			uint8_t* buf = (uint8_t*) glMapBuffer( GL_PIXEL_PACK_BUFFER_ARB, GL_READ_ONLY );
			check_error();

			*format = gl_format == GL_RGBA ? mlt_image_rgb24a : mlt_image_rgb24;
			*image = (uint8_t*) mlt_pool_alloc( img_size );
			mlt_frame_set_image( a_frame, *image, img_size, mlt_pool_release );
			memcpy( *image, buf, img_size );

			// Release PBO and FBO
			glUnmapBuffer( GL_PIXEL_PACK_BUFFER_ARB );
			check_error();
			glBindBuffer( GL_PIXEL_PACK_BUFFER_ARB, 0 );
			check_error();
			glBindFramebuffer( GL_FRAMEBUFFER, 0 );
			check_error();
			glBindTexture( GL_TEXTURE_2D, 0 );
			check_error();
			GlslManager::release_texture( texture );
		}
		else {
			error = 1;
		}
	}
	if ( fbo ) GlslManager::release_fbo( fbo );
	mlt_service_lock( service );

	return error;
}