Пример #1
0
static int attach_boundry_to_frame( mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{
	// Get the filter object
	mlt_filter filter = mlt_frame_pop_service( frame );

	// Get the filter's property object
	mlt_properties filter_properties = MLT_FILTER_PROPERTIES(filter);

	// Get the frame properties
	mlt_properties frame_properties = MLT_FRAME_PROPERTIES(frame);

	// Get the frame position
	mlt_position position = mlt_filter_get_position( filter, frame );
	
	mlt_service_lock( MLT_FILTER_SERVICE( filter ) );

	// Get the geometry object
	mlt_geometry geometry = mlt_properties_get_data(filter_properties, "filter_geometry", NULL);
	if (geometry == NULL) {
		mlt_geometry geom = mlt_geometry_init();
		char *arg = mlt_properties_get(filter_properties, "geometry");

		// Initialize with the supplied geometry
		struct mlt_geometry_item_s item;
		mlt_geometry_parse_item( geom, &item, arg  );

		item.frame = 0;
		item.key = 1;
		item.mix = 100;

		mlt_geometry_insert( geom, &item );
		mlt_geometry_interpolate( geom );
		mlt_properties_set_data( filter_properties, "filter_geometry", geom, 0, (mlt_destructor)mlt_geometry_close, (mlt_serialiser)mlt_geometry_serialise );
		geometry = mlt_properties_get_data(filter_properties, "filter_geometry", NULL);
	}

	mlt_service_unlock( MLT_FILTER_SERVICE( filter ) );

	// Get the current geometry item
	mlt_geometry_item geometry_item = mlt_pool_alloc( sizeof( struct mlt_geometry_item_s ) );
	mlt_geometry_fetch(geometry, geometry_item, position);

	// Cleanse the geometry item
	geometry_item->w = geometry_item->x < 0 ? geometry_item->w + geometry_item->x : geometry_item->w;
	geometry_item->h = geometry_item->y < 0 ? geometry_item->h + geometry_item->y : geometry_item->h;
	geometry_item->x = geometry_item->x < 0 ? 0 : geometry_item->x;
	geometry_item->y = geometry_item->y < 0 ? 0 : geometry_item->y;
	geometry_item->w = geometry_item->w < 0 ? 0 : geometry_item->w;
	geometry_item->h = geometry_item->h < 0 ? 0 : geometry_item->h;

	mlt_properties_set_data( frame_properties, "bounds", geometry_item, sizeof( struct mlt_geometry_item_s ), mlt_pool_release, NULL );

	// Get the new image
	int error = mlt_frame_get_image( frame, image, format, width, height, 1 );

	if( error != 0 )
		mlt_properties_debug( frame_properties, "error after mlt_frame_get_image() in autotrack_rectangle attach_boundry_to_frame", stderr );

	return error;
}
Пример #2
0
static void draw_spectrum( mlt_filter filter, mlt_frame frame, QImage* qimg )
{
	mlt_properties filter_properties = MLT_FILTER_PROPERTIES( filter );
	mlt_position position = mlt_filter_get_position( filter, frame );
	mlt_position length = mlt_filter_get_length2( filter, frame );
	mlt_rect rect = mlt_properties_anim_get_rect( filter_properties, "rect", position, length );
	if ( strchr( mlt_properties_get( filter_properties, "rect" ), '%' ) ) {
		rect.x *= qimg->width();
		rect.w *= qimg->width();
		rect.y *= qimg->height();
		rect.h *= qimg->height();
	}
	char* graph_type = mlt_properties_get( filter_properties, "type" );
	int mirror = mlt_properties_get_int( filter_properties, "mirror" );
	int fill = mlt_properties_get_int( filter_properties, "fill" );
	double tension = mlt_properties_get_double( filter_properties, "tension" );

	QRectF r( rect.x, rect.y, rect.w, rect.h );
	QPainter p( qimg );

	if( mirror ) {
		// Draw two half rectangle instead of one full rectangle.
		r.setHeight( r.height() / 2.0 );
	}

	setup_graph_painter( p, r, filter_properties );
	setup_graph_pen( p, r, filter_properties );

	int bands = mlt_properties_get_int( filter_properties, "bands" );
	if ( bands == 0 ) {
		// "0" means match rectangle width
		bands = r.width();
	}
	float* spectrum = (float*)mlt_pool_alloc( bands * sizeof(float) );

	convert_fft_to_spectrum( filter, frame, bands, spectrum );

	if( graph_type && graph_type[0] == 'b' ) {
		paint_bar_graph( p, r, bands, spectrum );
	} else {
		paint_line_graph( p, r, bands, spectrum, tension, fill );
	}

	if( mirror ) {
		// Second rectangle is mirrored.
		p.translate( 0, r.y() * 2 + r.height() * 2 );
		p.scale( 1, -1 );

		if( graph_type && graph_type[0] == 'b' ) {
			paint_bar_graph( p, r, bands, spectrum );
		} else {
			paint_line_graph( p, r, bands, spectrum, tension, fill );
		}
	}

	mlt_pool_release( spectrum );

	p.end();
}
Пример #3
0
double mlt_filter_get_progress( mlt_filter self, mlt_frame frame )
{
	double position = mlt_filter_get_position( self, frame );
	double length = mlt_filter_get_length2( self, frame );
	if (length > 1)
		return position / (length - 1);
	else
		return 1.0;
}
Пример #4
0
static void analyze_image( mlt_filter filter, mlt_frame frame, uint8_t* vs_image, VSPixelFormat vs_format, int width, int height )
{
	mlt_properties properties = MLT_FILTER_PROPERTIES( filter );
	vs_data* data = (vs_data*)filter->child;
	mlt_position pos = mlt_filter_get_position( filter, frame );

	// If any frames are skipped, analysis data will be incomplete.
	if( data->analyze_data && pos != data->analyze_data->last_position + 1 )
	{
		mlt_log_error( MLT_FILTER_SERVICE(filter), "Bad frame sequence\n" );
		destory_analyze_data( data->analyze_data );
		data->analyze_data = NULL;
	}

	if ( !data->analyze_data && pos == 0 )
	{
		// Analysis must start on the first frame
		init_analyze_data( filter, frame, vs_format, width, height );
	}

	if( data->analyze_data )
	{
		// Initialize the VSFrame to be analyzed.
		VSMotionDetect* md = &data->analyze_data->md;
		LocalMotions localmotions;
		VSFrame vsFrame;
		vsFrameFillFromBuffer( &vsFrame, vs_image, &md->fi );

		// Detect and save motions.
		if( vsMotionDetection( md, &localmotions, &vsFrame ) == VS_OK )
		{
			vsWriteToFile( md, data->analyze_data->results, &localmotions);
			vs_vector_del( &localmotions );
		}
		else
		{
			mlt_log_error( MLT_FILTER_SERVICE(filter), "Motion detection failed\n" );
			destory_analyze_data( data->analyze_data );
			data->analyze_data = NULL;
		}

		// Publish the motions if this is the last frame.
		if ( pos + 1 == mlt_filter_get_length2( filter, frame ) )
		{
			mlt_log_info( MLT_FILTER_SERVICE(filter), "Analysis complete\n" );
			destory_analyze_data( data->analyze_data );
			data->analyze_data = NULL;
			mlt_properties_set( properties, "results", mlt_properties_get( properties, "filename" ) );
		}
		else
		{
			data->analyze_data->last_position = pos;
		}
	}
}
Пример #5
0
static int filter_get_image( mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{
	mlt_filter filter =  (mlt_filter) mlt_frame_pop_service( frame );
	mlt_properties properties = MLT_FILTER_PROPERTIES( filter );
	mlt_position position = mlt_filter_get_position( filter, frame );
	mlt_position length = mlt_filter_get_length2( filter, frame );

	// Get the image
	*format = mlt_image_yuv422;
	int error = mlt_frame_get_image( frame, image, format, width, height, 1 );

	// Only process if we have no error and a valid colour space
	if ( error == 0 )
	{
		double level = 1.0;

		// Use animated "level" property only if it has been set since init
		char* level_property = mlt_properties_get( MLT_FILTER_PROPERTIES( filter ), "level" );
		if ( level_property != NULL )
		{
			level = mlt_properties_anim_get_double( properties, "level", position, length );
		}
		else
		{
			// Get level using old "start,"end" mechanics
			// Get the starting brightness level
			level = fabs( mlt_properties_get_double( MLT_FILTER_PROPERTIES( filter ), "start" ) );

			// If there is an end adjust gain to the range
			if ( mlt_properties_get( MLT_FILTER_PROPERTIES( filter ), "end" ) != NULL )
			{
				// Determine the time position of this frame in the transition duration
				double end = fabs( mlt_properties_get_double( MLT_FILTER_PROPERTIES( filter ), "end" ) );
				level += ( end - level ) * mlt_filter_get_progress( filter, frame );
			}
		}

		// Only process if level is something other than 1
		if ( level != 1.0 )
		{
			int i = *width * *height + 1;
			uint8_t *p = *image;
			int32_t m = level * ( 1 << 16 );
			int32_t n = 128 * ( ( 1 << 16 ) - m );

			while ( --i )
			{
				p[0] = CLAMP( (p[0] * m) >> 16, 16, 235 );
				p[1] = CLAMP( (p[1] * m + n) >> 16, 16, 240 );
				p += 2;
			}
		}
Пример #6
0
/** Get the image.
*/
static int filter_get_image( mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{
	int error = 0;
	mlt_filter filter = mlt_frame_pop_service( frame );
	mlt_properties properties = MLT_FILTER_PROPERTIES( filter );
	mlt_producer producer = mlt_properties_get_data( properties, "_producer", NULL );
	mlt_transition transition = mlt_properties_get_data( properties, "_transition", NULL );
	mlt_frame text_frame = NULL;
	mlt_position position = 0;

	// Configure this filter
	mlt_service_lock( MLT_FILTER_SERVICE( filter ) );
	setup_producer( filter, producer, frame );
	setup_transition( filter, transition );
	mlt_service_unlock( MLT_FILTER_SERVICE( filter ) );

	// Make sure the producer is in the correct position
	position = mlt_filter_get_position( filter, frame );
	mlt_producer_seek( producer, position );

	// Get the b frame and process with transition if successful
	if ( mlt_service_get_frame( MLT_PRODUCER_SERVICE( producer ), &text_frame, 0 ) == 0 )
	{
		// Get the a and b frame properties
		mlt_properties a_props = MLT_FRAME_PROPERTIES( frame );
		mlt_properties b_props = MLT_FRAME_PROPERTIES( text_frame );

		// Set the frame and text_frame to be in the same position and have same consumer requirements
		mlt_frame_set_position( text_frame, position );
		mlt_frame_set_position( frame, position );
		mlt_properties_set_int( b_props, "consumer_deinterlace", mlt_properties_get_int( a_props, "consumer_deinterlace" ) );

		// Apply all filters that are attached to this filter to the b frame
		mlt_service_apply_filters( MLT_FILTER_SERVICE( filter ), text_frame, 0 );

		// Process the frame
		mlt_transition_process( transition, frame, text_frame );

		// Get the image
		*format = mlt_image_yuv422;
		error = mlt_frame_get_image( frame, image, format, width, height, 1 );

		// Close the b frame
		mlt_frame_close( text_frame );
	}

	return error;
}
Пример #7
0
static int filter_get_image( mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{
	mlt_filter filter = (mlt_filter) mlt_frame_pop_service( frame );
	mlt_properties properties = MLT_FILTER_PROPERTIES( filter );
	mlt_position position = mlt_frame_get_position( frame );

	// Get the image
	*format = mlt_image_yuv422;
	int error = mlt_frame_get_image( frame, image, format, width, height, 0 );

	// Only process if we have no error and a valid colour space
	if ( error == 0 )
	{
		double factor = mlt_properties_get_double( properties, "start" );

		mlt_position f_pos = mlt_filter_get_position( filter, frame );
		mlt_position f_len = mlt_filter_get_length2( filter, frame );
		int speed = mlt_properties_anim_get_int( properties, "speed", f_pos, f_len );
		int deformX = mlt_properties_anim_get_int( properties, "deformX", f_pos, f_len );
		int deformY = mlt_properties_anim_get_int( properties, "deformY", f_pos, f_len );

		if ( mlt_properties_get( properties, "end" ) )
		{
			// Determine the time position of this frame in the transition duration
			double end = fabs( mlt_properties_get_double( MLT_FILTER_PROPERTIES( filter ), "end" ) );
			factor += ( end - factor ) * mlt_filter_get_progress( filter, frame );
		}

		// If animated property "wave" is set, use its value. 
		char* wave_property = mlt_properties_get( properties, "wave" );
		if ( wave_property )
		{
			factor = mlt_properties_anim_get_double( properties, "wave", f_pos, f_len );
		}

		if (factor != 0) 
		{
			int image_size = *width * (*height) * 2;
			uint8_t *dst = mlt_pool_alloc (image_size);
			DoWave(*image, *width, (*height), dst, position, speed, factor, deformX, deformY);
			*image = dst;
			mlt_frame_set_image( frame, *image, image_size, mlt_pool_release );
		}
	}

	return error;
}
Пример #8
0
static void draw_waveforms( mlt_filter filter, mlt_frame frame, QImage* qimg, int16_t* audio, int channels, int samples )
{
	mlt_properties filter_properties = MLT_FILTER_PROPERTIES( filter );
	mlt_position position = mlt_filter_get_position( filter, frame );
	mlt_position length = mlt_filter_get_length2( filter, frame );
	int show_channel = mlt_properties_get_int( filter_properties, "show_channel" );
	int fill = mlt_properties_get_int( filter_properties, "fill" );
	mlt_rect rect = mlt_properties_anim_get_rect( filter_properties, "rect", position, length );
	if ( strchr( mlt_properties_get( filter_properties, "rect" ), '%' ) ) {
		rect.x *= qimg->width();
		rect.w *= qimg->width();
		rect.y *= qimg->height();
		rect.h *= qimg->height();
	}

	QRectF r( rect.x, rect.y, rect.w, rect.h );

	QPainter p( qimg );

	setup_graph_painter( p, r, filter_properties );

	if ( show_channel == 0 ) // Show all channels
	{
		QRectF c_rect = r;
		qreal c_height = r.height() / channels;
		for ( int c = 0; c < channels; c++ )
		{
			// Divide the rectangle into smaller rectangles for each channel.
			c_rect.setY( r.y() + c_height * c );
			c_rect.setHeight( c_height );
			setup_graph_pen( p, c_rect, filter_properties );
			paint_waveform( p, c_rect, audio + c, samples, channels, fill );
		}
	} else if ( show_channel > 0 ) { // Show one specific channel
		if ( show_channel > channels ) {
			// Sanity
			show_channel = 1;
		}
		setup_graph_pen( p, r, filter_properties );
		paint_waveform( p, r, audio + show_channel - 1, samples, channels, fill );
	}

	p.end();
}
Пример #9
0
static int filter_get_image( mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{

	mlt_filter filter = mlt_frame_pop_service( frame );
	*format = mlt_image_rgb24a;
	mlt_log_debug( MLT_FILTER_SERVICE( filter ), "frei0r %dx%d\n", *width, *height );
	int error = mlt_frame_get_image( frame, image, format, width, height, 0 );

	if ( error == 0 && *image )
	{
		double position = mlt_filter_get_position( filter, frame );
		mlt_profile profile = mlt_service_profile( MLT_FILTER_SERVICE( filter ) );
		double time = position / mlt_profile_fps( profile );
		int length = mlt_filter_get_length2( filter, frame );
		process_frei0r_item( MLT_FILTER_SERVICE(filter), position, time, length, frame, image, width, height );
	}

	return error;
}
Пример #10
0
static int filter_get_image( mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{
	mlt_filter filter = (mlt_filter) mlt_frame_pop_service( frame );
	mlt_properties properties = MLT_FILTER_PROPERTIES( filter );
	mlt_position pos = mlt_filter_get_position( filter, frame );
	mlt_position len = mlt_filter_get_length2( filter, frame );

	*format = mlt_image_yuv422;
	int error = mlt_frame_get_image( frame, image, format, width, height, 1 );

	if ( error == 0 && *image )
	{
		int h = *height;
		int w = *width;

		double position = mlt_filter_get_progress( filter, frame );
		srand(position*10000);

		int noise = mlt_properties_anim_get_int( properties, "noise", pos, len );
		double contrast = mlt_properties_anim_get_double( properties, "contrast", pos, len ) / 100.0;
		double brightness = 127.0 * (mlt_properties_anim_get_double( properties, "brightness", pos, len ) -100.0 ) / 100.0;
		
		int x = 0,y = 0,pix = 0;
		for ( x = 0; x < w; x++ )
		{
			for( y = 0; y < h; y++ )
			{
				uint8_t* pixel = (*image + (y) * w * 2 + (x) * 2 );
				if (*pixel > 20)
				{
					pix = MIN ( MAX ( ( (double)*pixel -127.0  ) * contrast + 127.0 + brightness , 0 ) , 255 ) ;
					if ( noise > 0 ) pix -= ( rand() % noise - noise );

					*pixel = MIN ( MAX ( pix , 0 ) , 255 );
				}
			}
		}
	}

	return error;
}
Пример #11
0
static int filter_get_image( mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{
	// Get the filter
	mlt_filter filter = mlt_frame_pop_service( frame );

	// Get the properties of the filter
	mlt_properties properties = MLT_FILTER_PROPERTIES( filter );

	mlt_service_lock( MLT_FILTER_SERVICE( filter ) );

	// Get the region transition
	mlt_transition transition = mlt_properties_get_data( properties, "_transition", NULL );

	// Create the transition if not available
	if ( transition == NULL )
	{
		// Create the transition
		mlt_profile profile = mlt_service_profile( MLT_FILTER_SERVICE( filter ) );
		transition = mlt_factory_transition( profile, "region", NULL );

		// Register with the filter
		mlt_properties_set_data( properties, "_transition", transition, 0, ( mlt_destructor )mlt_transition_close, NULL );

		// Pass a reference to this filter down
		mlt_properties_set_data( MLT_TRANSITION_PROPERTIES( transition ), "_region_filter", filter, 0, NULL, NULL );
	}

	mlt_service_unlock( MLT_FILTER_SERVICE( filter ) );

	// Pass all properties down
	mlt_properties_inherit( MLT_TRANSITION_PROPERTIES( transition ), properties );

	// Make the frame's position relative to this filter's in point
	mlt_frame_set_position( frame, mlt_filter_get_position( filter, frame ) );

	// Process the frame
	mlt_transition_process( transition, frame, NULL );

	return mlt_frame_get_image( frame, image, format, width, height, writable );
}
Пример #12
0
static int filter_get_image( mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{
	mlt_filter filter = (mlt_filter) mlt_frame_pop_service( frame );
	mlt_properties properties = MLT_FILTER_PROPERTIES( filter );
	mlt_position position = mlt_filter_get_position( filter, frame );
	mlt_position length = mlt_filter_get_length2( filter, frame );

	*format = mlt_image_yuv422;
	int error = mlt_frame_get_image( frame, image, format, width, height, 1 );

	if ( error == 0 )
	{
		// Get the gamma value
		double gamma = mlt_properties_anim_get_double( properties, "gamma", position, length );

		if ( gamma != 1.0 )
		{
			uint8_t *p = *image;
			uint8_t *q = *image + *width * *height * 2;

			// Calculate the look up table
			double exp = 1 / gamma;
			uint8_t lookup[ 256 ];
			int i;

			for( i = 0; i < 256; i ++ )
				lookup[ i ] = ( uint8_t )( pow( ( double )i / 255.0, exp ) * 255 );

			while ( p != q )
			{
				*p = lookup[ *p ];
				p += 2;
			}
		}
	}

	return 0;
}
Пример #13
0
int get_image_and_detect(mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable)
{
	mlt_filter filter = (mlt_filter) mlt_frame_pop_service(frame);
	mlt_properties properties = MLT_FILTER_PROPERTIES(filter);

	*format = mlt_image_yuv420p;

	writable = writable || mlt_properties_get_int(properties, "show") ? 1 : 0;

	int error = mlt_frame_get_image(frame, image, format, width, height, writable);
	if (!error)
	{
		// Service locks are for concurrency control
		mlt_service_lock(MLT_FILTER_SERVICE(filter));

		StabData *data = static_cast<StabData*>(mlt_properties_get_data(properties, "_stab_data", NULL));
		if (!data)
		{
			data = init_detect(properties, format, width, height);
			mlt_properties_set_data(properties, "_stab_data", data, 0, (mlt_destructor) destroy_detect, NULL);
		}

		VSMotionDetect* md = &data->md;
		LocalMotions localmotions;
		VSFrame vsFrame;
		vsFrameFillFromBuffer(&vsFrame, *image, &md->fi);

		// detect and save motions
		vsMotionDetection(md, &localmotions, &vsFrame);
		mlt_position pos = mlt_filter_get_position( filter, frame );
		serialize_localmotions(data, localmotions, pos);

		mlt_service_unlock(MLT_FILTER_SERVICE(filter));
	}

	return error;
}
Пример #14
0
static int apply_results( mlt_filter filter, mlt_frame frame, uint8_t* vs_image, VSPixelFormat vs_format, int width, int height )
{
	int error = 0;
	mlt_properties properties = MLT_FILTER_PROPERTIES( filter );
	vs_data* data = (vs_data*)filter->child;

	if ( check_apply_config( filter, frame ) ||
		mlt_properties_get_int( properties, "reload" ) )
	{
		mlt_properties_set_int( properties, "reload", 0 );
		destory_apply_data( data->apply_data );
		data->apply_data = NULL;
	}

	// Init transform data if necessary (first time)
	if ( !data->apply_data )
	{
		init_apply_data( filter, frame, vs_format, width, height );
	}

	if( data->apply_data )
	{
		// Apply transformations to this image
		VSTransformData* td = &data->apply_data->td;
		VSTransformations* trans = &data->apply_data->trans;
		VSFrame vsFrame;
		vsFrameFillFromBuffer( &vsFrame, vs_image, vsTransformGetSrcFrameInfo( td ) );
		trans->current = mlt_filter_get_position( filter, frame );
		vsTransformPrepare( td, &vsFrame, &vsFrame );
		VSTransform t = vsGetNextTransform( td, trans );
		vsDoTransform( td, t );
		vsTransformFinish( td );
	}

	return error;
}
Пример #15
0
static int filter_get_image( mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{
	int error = 0;
	mlt_filter filter = (mlt_filter)mlt_frame_pop_service( frame );
	mlt_properties filter_properties = MLT_FILTER_PROPERTIES( filter );
	char* rect_str = mlt_properties_get( filter_properties, "rect" );
	if ( !rect_str )
	{
		mlt_log_warning( MLT_FILTER_SERVICE(filter), "rect property not set\n" );
		return mlt_frame_get_image( frame, image, format, width, height, writable );
	}
	mlt_profile profile = mlt_service_profile( MLT_FILTER_SERVICE( filter ) );
	mlt_position position = mlt_filter_get_position( filter, frame );
	mlt_position length = mlt_filter_get_length2( filter, frame );
	mlt_rect rect = mlt_properties_anim_get_rect( filter_properties, "rect", position, length );
	if ( strchr( rect_str, '%' ) )
	{
		rect.x *= profile->width;
		rect.w *= profile->width;
		rect.y *= profile->height;
		rect.h *= profile->height;
	}
	rect = constrain_rect( rect, profile->width, profile->height );
	if ( rect.w < 1 || rect.h < 1 )
	{
		mlt_log_info( MLT_FILTER_SERVICE(filter), "rect invalid\n" );
		return mlt_frame_get_image( frame, image, format, width, height, writable );
	}

	switch( *format )
	{
		case mlt_image_rgb24a:
		case mlt_image_rgb24:
		case mlt_image_yuv422:
		case mlt_image_yuv420p:
			// These formats are all supported
			break;
		default:
			*format = mlt_image_rgb24a;
			break;
	}
	error = mlt_frame_get_image( frame, image, format, width, height, 1 );
	if (error) return error;

	int i;
	switch( *format )
	{
		case mlt_image_rgb24a:
			for ( i = 0; i < 4; i++ )
			{
				remove_spot_channel( *image + i, *width, 4, rect );
			}
			break;
		case mlt_image_rgb24:
			for ( i = 0; i < 3; i++ )
			{
				remove_spot_channel( *image + i, *width, 3, rect );
			}
			break;
		case mlt_image_yuv422:
			// Y
			remove_spot_channel( *image, *width, 2, rect );
			// U
			remove_spot_channel( *image + 1, *width / 2, 4,
								 constrain_rect( scale_rect( rect, 2, 1 ), *width / 2, *height ) );
			// V
			remove_spot_channel( *image + 3, *width / 2, 4,
								 constrain_rect( scale_rect( rect, 2, 1 ), *width / 2, *height ) );
			break;
		case mlt_image_yuv420p:
			// Y
			remove_spot_channel( *image, *width, 1, rect );
			// U
			remove_spot_channel( *image + (*width * *height), *width / 2, 1,
								 constrain_rect( scale_rect( rect, 2, 2 ), *width / 2, *height / 2 ) );
			// V
			remove_spot_channel( *image + (*width * *height * 5 / 4), *width / 2, 1,
								 constrain_rect( scale_rect( rect, 2, 2 ), *width / 2, *height / 2 ) );
			break;
		default:
			return 1;
	}

	uint8_t *alpha = mlt_frame_get_alpha( frame );
	if ( alpha && *format != mlt_image_rgb24a )
	{
		remove_spot_channel( alpha, *width, 1, rect );
	}

	return error;
}
Пример #16
0
static int get_image(mlt_frame frame, uint8_t **image, mlt_image_format *format,
		int *width, int *height, int writable)
{
	mlt_filter filter = (mlt_filter) mlt_frame_pop_service(frame);
	mlt_properties properties = MLT_FILTER_PROPERTIES(filter);

	*format = mlt_image_yuv420p;
	DeshakeData *data = static_cast<DeshakeData*>(filter->child);

	int error = mlt_frame_get_image(frame, image, format, width, height, 1);
	if (!error)
	{
		// Service locks are for concurrency control
		mlt_service_lock(MLT_FILTER_SERVICE(filter));

		// Handle signal from app to re-init data
		if (mlt_properties_get_int(properties, "refresh"))
		{
			mlt_properties_set(properties, "refresh", NULL);
			clear_deshake(data);
			data->initialized = false;
		}

		// clear deshake data, when seeking or dropping frames
		mlt_position pos = mlt_filter_get_position(filter, frame);
		if(pos != data->lastFrame+1) {
			clear_deshake(data);
			data->initialized = false;
		}
		data->lastFrame = pos;

		if (!data->initialized)
		{
			char *interps = mlt_properties_get(MLT_FRAME_PROPERTIES(frame), "rescale.interp");
			init_deshake(data, properties, format, width, height,
					interps);
			data->initialized = true;
		}

		VSMotionDetect* md = &data->md;
		VSTransformData* td = &data->td;
		LocalMotions localmotions;
		VSTransform motion;
		VSFrame vsFrame;

		vsFrameFillFromBuffer(&vsFrame, *image, &md->fi);
		vsMotionDetection(md, &localmotions, &vsFrame);

		motion = vsSimpleMotionsToTransform(md->fi, FILTER_NAME, &localmotions);
		vs_vector_del(&localmotions);

		vsTransformPrepare(td, &vsFrame, &vsFrame);

		VSTransform t = vsLowPassTransforms(td, &data->avg, &motion);
//	    mlt_log_warning(filter, "Trans: det: %f %f %f \n\t\t act: %f %f %f %f",
//	                 motion.x, motion.y, motion.alpha,
//	                 t.x, t.y, t.alpha, t.zoom);
		vsDoTransform(td, t);
		vsTransformFinish(td);

		mlt_service_unlock(MLT_FILTER_SERVICE(filter));
	}

	return error;
}
Пример #17
0
static int ladspa_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
{
	int error = 0;

	// Get the filter service
	mlt_filter filter = mlt_frame_pop_audio( frame );

	// Get the filter properties
	mlt_properties filter_properties = MLT_FILTER_PROPERTIES( filter );

	// Check if the channel configuration has changed
	int prev_channels = mlt_properties_get_int( filter_properties, "_prev_channels" );
	if ( prev_channels != *channels )
	{
		if( prev_channels )
		{
			mlt_log_info( MLT_FILTER_SERVICE(filter), "Channel configuration changed. Old: %d New: %d.\n", prev_channels, *channels );
			mlt_properties_set_data( filter_properties, "jackrack", NULL, 0, (mlt_destructor) NULL, NULL );
		}
		mlt_properties_set_int( filter_properties, "_prev_channels", *channels );
	}

	// Initialise LADSPA if needed
	jack_rack_t *jackrack = mlt_properties_get_data( filter_properties, "jackrack", NULL );
	if ( jackrack == NULL )
	{
		sample_rate = *frequency; // global inside jack_rack
		jackrack = initialise_jack_rack( filter_properties, *channels );
	}

	if ( jackrack && jackrack->procinfo && jackrack->procinfo->chain &&
		 mlt_properties_get_int64( filter_properties, "_pluginid" ) )
	{
		plugin_t *plugin = jackrack->procinfo->chain;
		LADSPA_Data value;
		int i, c;
		mlt_position position = mlt_filter_get_position( filter, frame );
		mlt_position length = mlt_filter_get_length2( filter, frame );

		// Get the producer's audio
		*format = mlt_audio_float;
		mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );

		// Resize the buffer if necessary.
		if ( *channels < jackrack->channels )
		{
			// Add extra channels to satisfy the plugin.
			// Extra channels in the buffer will be ignored by downstream services.
			int old_size = mlt_audio_format_size( *format, *samples, *channels );
			int new_size = mlt_audio_format_size( *format, *samples, jackrack->channels );
			uint8_t* new_buffer = mlt_pool_alloc( new_size );
			memcpy( new_buffer, *buffer, old_size );
			// Put silence in extra channels.
			memset( new_buffer + old_size, 0, new_size - old_size );
			mlt_frame_set_audio( frame, new_buffer, *format, new_size, mlt_pool_release );
			*buffer = new_buffer;
		}

		for ( i = 0; i < plugin->desc->control_port_count; i++ )
		{
			// Apply the control port values
			char key[20];
			value = plugin_desc_get_default_control_value( plugin->desc, i, sample_rate );
			snprintf( key, sizeof(key), "%d", i );
			if ( mlt_properties_get( filter_properties, key ) )
				value = mlt_properties_anim_get_double( filter_properties, key, position, length );
			for ( c = 0; c < plugin->copies; c++ )
				plugin->holders[c].control_memory[i] = value;
		}
		plugin->wet_dry_enabled = mlt_properties_get( filter_properties, "wetness" ) != NULL;
		if ( plugin->wet_dry_enabled )
		{
			value = mlt_properties_anim_get_double( filter_properties, "wetness", position, length );
			for ( c = 0; c < jackrack->channels; c++ )
				plugin->wet_dry_values[c] = value;
		}

		// Configure the buffers
		LADSPA_Data **input_buffers  = mlt_pool_alloc( sizeof( LADSPA_Data* ) * jackrack->channels );
		LADSPA_Data **output_buffers = mlt_pool_alloc( sizeof( LADSPA_Data* ) * jackrack->channels );
		
		// Some plugins crash with too many frames (samples).
		// So, feed the plugin with N samples per loop iteration.
		int samples_offset = 0;
		int sample_count = MIN(*samples, MAX_SAMPLE_COUNT);
		for (i = 0; samples_offset < *samples; i++) {
			int j = 0;
			for (; j < jackrack->channels; j++)
				output_buffers[j] = input_buffers[j] = (LADSPA_Data*) *buffer + j * (*samples) + samples_offset;
			sample_count = MIN(*samples - samples_offset, MAX_SAMPLE_COUNT);
			// Do LADSPA processing
			error = process_ladspa( jackrack->procinfo, sample_count, input_buffers, output_buffers );
			samples_offset += MAX_SAMPLE_COUNT;
		}

		mlt_pool_release( input_buffers );
		mlt_pool_release( output_buffers );

		// read the status port values
		for ( i = 0; i < plugin->desc->status_port_count; i++ )
		{
			char key[20];
			int p = plugin->desc->status_port_indicies[i];
			for ( c = 0; c < plugin->copies; c++ )
			{
				snprintf( key, sizeof(key), "%d[%d]", p, c );
				value = plugin->holders[c].status_memory[i];
				mlt_properties_set_double( filter_properties, key, value );
			}
		}
	}
	else
	{
		// Nothing to do.
		error = mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );
	}

	return error;
}
Пример #18
0
// Image stack(able) method
static int filter_get_image( mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{

	// Get the filter object
	mlt_filter filter = mlt_frame_pop_service( frame );

	// Get the filter's property object
	mlt_properties filter_properties = MLT_FILTER_PROPERTIES(filter);

	// Get the frame properties
	mlt_properties frame_properties = MLT_FRAME_PROPERTIES(frame);

	// Get the frame position
	mlt_position position = mlt_filter_get_position( filter, frame );

	// Get the new image
	int error = mlt_frame_get_image( frame, image, format, width, height, 1 );

	if( error != 0 )
		mlt_properties_debug( frame_properties, "error after mlt_frame_get_image() in autotrack_rectangle", stderr );

	mlt_service_lock( MLT_FILTER_SERVICE( filter ) );

	// Get the geometry object
	mlt_geometry geometry = mlt_properties_get_data(filter_properties, "filter_geometry", NULL);

	// Get the current geometry item
	struct mlt_geometry_item_s boundry;
	mlt_geometry_fetch(geometry, &boundry, position);

	// Get the motion vectors
	struct motion_vector_s *vectors = mlt_properties_get_data( frame_properties, "motion_est.vectors", NULL );

	// Cleanse the geometry item
	boundry.w = boundry.x < 0 ? boundry.w + boundry.x : boundry.w;
	boundry.h = boundry.y < 0 ? boundry.h + boundry.y : boundry.h;
	boundry.x = boundry.x < 0 ? 0 : boundry.x;
	boundry.y = boundry.y < 0 ? 0 : boundry.y;
	boundry.w = boundry.w < 0 ? 0 : boundry.w;
	boundry.h = boundry.h < 0 ? 0 : boundry.h;

	// How did the rectangle move?
	if( vectors != NULL &&
	    boundry.key != 1 ) // Paused?
	{

		int method = mlt_properties_get_int( filter_properties, "method" );

		// Get the size of macroblocks in pixel units
		int macroblock_height = mlt_properties_get_int( frame_properties, "motion_est.macroblock_height" );
		int macroblock_width = mlt_properties_get_int( frame_properties, "motion_est.macroblock_width" );
		int mv_buffer_width = *width / macroblock_width;

		caculate_motion( vectors, &boundry, macroblock_width, macroblock_height, mv_buffer_width, method, *width, *height );


		// Make the geometry object a real boy
		boundry.key = 1;
		boundry.f[0] = 1;
		boundry.f[1] = 1;
		boundry.f[2] = 1;
		boundry.f[3] = 1;
		boundry.f[4] = 1;
		mlt_geometry_insert(geometry, &boundry);
		mlt_geometry_interpolate(geometry);
	}

	mlt_service_unlock( MLT_FILTER_SERVICE( filter ) );

	if( mlt_properties_get_int( filter_properties, "debug" ) == 1 )
	{
		init_arrows( format, *width, *height );
		draw_rectangle_outline(*image, boundry.x, boundry.y, boundry.w, boundry.h, 100);
	}

	if( mlt_properties_get_int( filter_properties, "_serialize" ) == 1 )
	{
		// Add the vector change to the list
		mlt_geometry key_frames = mlt_properties_get_data( filter_properties, "motion_vector_list", NULL );
		if ( !key_frames )
		{
			key_frames = mlt_geometry_init();
			mlt_properties_set_data( filter_properties, "motion_vector_list", key_frames, 0,
			                         (mlt_destructor) mlt_geometry_close, (mlt_serialiser) mlt_geometry_serialise );
			if ( key_frames )
				mlt_geometry_set_length( key_frames, mlt_filter_get_length2( filter, frame ) );
		}
		if ( key_frames )
		{
			struct mlt_geometry_item_s item;
			item.frame = (int) mlt_frame_get_position( frame );
			item.key = 1;
			item.x = boundry.x;
			item.y = boundry.y;
			item.w = boundry.w;
			item.h = boundry.h;
			item.mix = 0;
			item.f[0] = item.f[1] = item.f[2] = item.f[3] = 1;
			item.f[4] = 0;
			mlt_geometry_insert( key_frames, &item );
		}
	}
	
	if( mlt_properties_get_int( filter_properties, "obscure" ) == 1 )
	{
		mlt_filter obscure = mlt_properties_get_data( filter_properties, "_obscure", NULL );

		mlt_properties_pass_list( MLT_FILTER_PROPERTIES(obscure), filter_properties, "in, out");

		// Because filter_obscure needs to be rewritten to use mlt_geometry
		char geom[100];
		sprintf( geom, "%d/%d:%dx%d", (int)boundry.x, (int)boundry.y, (int)boundry.w, (int)boundry.h );
		mlt_properties_set( MLT_FILTER_PROPERTIES( obscure ), "start", geom );
		mlt_properties_set( MLT_FILTER_PROPERTIES( obscure ), "end", geom );
	}
		
	if( mlt_properties_get_int( filter_properties, "collect" ) == 1 )
	{
		fprintf( stderr, "%d,%d,%d,%d\n", (int)boundry.x, (int)boundry.y, (int)boundry.w, (int)boundry.h );
		fflush( stdout );
	}

	return error;
}
Пример #19
0
static int filter_get_image( mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{
	mlt_filter filter =  (mlt_filter) mlt_frame_pop_service( frame );
	mlt_properties properties = MLT_FILTER_PROPERTIES( filter );
	mlt_position position = mlt_filter_get_position( filter, frame );
	mlt_position length = mlt_filter_get_length2( filter, frame );
	double level = 1.0;

	// Use animated "level" property only if it has been set since init
	char* level_property = mlt_properties_get( properties, "level" );
	if ( level_property != NULL )
	{
		level = mlt_properties_anim_get_double( properties, "level", position, length );
	}
	else
	{
		// Get level using old "start,"end" mechanics
		// Get the starting brightness level
		level = fabs( mlt_properties_get_double( properties, "start" ) );

		// If there is an end adjust gain to the range
		if ( mlt_properties_get( properties, "end" ) != NULL )
		{
			// Determine the time position of this frame in the transition duration
			double end = fabs( mlt_properties_get_double( properties, "end" ) );
			level += ( end - level ) * mlt_filter_get_progress( filter, frame );
		}
	}

	// Do not cause an image conversion unless there is real work to do.
	if ( level != 1.0 )
		*format = mlt_image_yuv422;

	// Get the image
	int error = mlt_frame_get_image( frame, image, format, width, height, 1 );

	// Only process if we have no error.
	if ( error == 0 )
	{
		// Only process if level is something other than 1
		if ( level != 1.0 && *format == mlt_image_yuv422 )
		{
			int i = *width * *height + 1;
			uint8_t *p = *image;
			int32_t m = level * ( 1 << 16 );
			int32_t n = 128 * ( ( 1 << 16 ) - m );

			while ( --i )
			{
				p[0] = CLAMP( (p[0] * m) >> 16, 16, 235 );
				p[1] = CLAMP( (p[1] * m + n) >> 16, 16, 240 );
				p += 2;
			}
		}

		// Process the alpha channel if requested.
		if ( mlt_properties_get( properties, "alpha" ) )
		{
			double alpha = mlt_properties_anim_get_double( properties, "alpha", position, length );
			alpha = alpha >= 0.0 ? alpha : level;
			if ( alpha != 1.0 )
			{
				int32_t m = alpha * ( 1 << 16 );
				int i = *width * *height + 1;

				if ( *format == mlt_image_rgb24a ) {
					uint8_t *p = *image + 3;
					for ( ; --i; p += 4 )
						p[0] = ( p[0] * m ) >> 16;
				} else {
Пример #20
0
static int filter_get_image( mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{
	mlt_filter filter = (mlt_filter) mlt_frame_pop_service( frame );
	mlt_properties properties = MLT_FILTER_PROPERTIES( filter );
	mlt_position pos = mlt_filter_get_position( filter, frame );
	mlt_position len = mlt_filter_get_length2( filter, frame );

	*format = mlt_image_yuv422;
	int error = mlt_frame_get_image( frame, image, format, width, height, 1 );

	if (  error == 0 && *image )
	{
		int h = *height;
		int w = *width;

		int x = 0;
		int y = 0;

		double position = mlt_filter_get_progress( filter, frame );
		srand( position * 10000);

		int delta = mlt_properties_anim_get_int( properties, "delta", pos, len );
		int every = mlt_properties_anim_get_int( properties, "every", pos, len );

		int bdu = mlt_properties_anim_get_int( properties, "brightnessdelta_up", pos, len );
		int bdd = mlt_properties_anim_get_int( properties, "brightnessdelta_down", pos, len );
		int bevery = mlt_properties_anim_get_int( properties, "brightnessdelta_every", pos, len );

		int udu = mlt_properties_anim_get_int( properties, "unevendevelop_up", pos, len );
		int udd = mlt_properties_anim_get_int( properties, "unevendevelop_down", pos, len );
		int uduration = mlt_properties_anim_get_int( properties, "unevendevelop_duration", pos, len );

		int diffpic = 0;
		if ( delta )
			diffpic = rand() % delta * 2 - delta;
		int brightdelta = 0;
		if (( bdu + bdd ) != 0 )
			brightdelta = rand() % (bdu + bdd) - bdd;
		if ( rand() % 100 > every )
			diffpic = 0;
		if ( rand() % 100 > bevery)
			brightdelta = 0;
		int yend, ydiff;
		int unevendevelop_delta = 0;
		if ( uduration > 0 )
		{
			float uval = sinarr[ ( ((int)position) % uduration) * 100 / uduration ];
			unevendevelop_delta = uval * ( uval > 0 ? udu : udd );
		}
		if ( diffpic <= 0 )
		{
			y = h;
			yend = 0;
			ydiff = -1;
		}
		else
		{
			y = 0;
			yend = h;
			ydiff = 1;
		}

		while( y != yend )
		{
			for ( x = 0; x < w; x++ )
			{
				uint8_t* pic = ( *image + y * w * 2 + x * 2 );
				int newy = y + diffpic;
				if ( newy > 0 && newy < h )
				{
					uint8_t oldval= *( pic + diffpic * w * 2 );
					if ( ((int) oldval + brightdelta + unevendevelop_delta ) > 255 )
					{
						*pic=255;
					}
					else if ( ( (int) oldval + brightdelta + unevendevelop_delta )	<0 )
					{
						*pic=0;
					}
					else
					{
						*pic = oldval + brightdelta + unevendevelop_delta;
					}
					*( pic + 1 ) =* ( pic + diffpic * w * 2 + 1 );

				}
				else
				{
					*pic = 0;
				}
		}
			y += ydiff;
		}
	}

	return error;
}
Пример #21
0
static void refresh_lut( mlt_filter filter, mlt_frame frame )
{
	private_data* self = (private_data*)filter->child;
	mlt_properties properties = MLT_FILTER_PROPERTIES( filter );
	mlt_position position = mlt_filter_get_position( filter, frame );
	mlt_position length = mlt_filter_get_length2( filter, frame );
	double rlift = mlt_properties_anim_get_double( properties, "lift_r", position, length );
	double glift = mlt_properties_anim_get_double( properties, "lift_g", position, length );
	double blift = mlt_properties_anim_get_double( properties, "lift_b", position, length );
	double rgamma = mlt_properties_anim_get_double( properties, "gamma_r", position, length );
	double ggamma = mlt_properties_anim_get_double( properties, "gamma_g", position, length );
	double bgamma = mlt_properties_anim_get_double( properties, "gamma_b", position, length );
	double rgain = mlt_properties_anim_get_double( properties, "gain_r", position, length );
	double ggain = mlt_properties_anim_get_double( properties, "gain_g", position, length );
	double bgain = mlt_properties_anim_get_double( properties, "gain_b", position, length );

	// Only regenerate the LUT if something changed.
	if( self->rlift != rlift || self->glift != glift || self->blift != blift ||
		self->rgamma != rgamma || self->ggamma != ggamma || self->bgamma != bgamma ||
		self->rgain != rgain || self->ggain != ggain || self->bgain != bgain )
	{
		int i = 0;
		for( i = 0; i < 256; i++ )
		{
			// Convert to gamma 2.2
			double gamma22 = pow( (double)i / 255.0, 1.0 / 2.2 );
			double r = gamma22;
			double g = gamma22;
			double b = gamma22;

			// Apply lift
			r += rlift * ( 1.0 - r );
			g += glift * ( 1.0 - g );
			b += blift * ( 1.0 - b );

			// Clamp negative values
			r = MAX( r, 0.0 );
			g = MAX( g, 0.0 );
			b = MAX( b, 0.0 );

			// Apply gamma
			r = pow( r, 2.2 / rgamma );
			g = pow( g, 2.2 / ggamma );
			b = pow( b, 2.2 / bgamma );

			// Apply gain
			r *= pow( rgain, 1.0 / rgamma );
			g *= pow( ggain, 1.0 / ggamma );
			b *= pow( bgain, 1.0 / bgamma );

			// Clamp values
			r = CLAMP( r, 0.0, 1.0 );
			g = CLAMP( g, 0.0, 1.0 );
			b = CLAMP( b, 0.0, 1.0 );

			// Update LUT
			self->rlut[ i ] = (int)(r * 255.0);
			self->glut[ i ] = (int)(g * 255.0);
			self->blut[ i ] = (int)(b * 255.0);
		}

		// Store the values that created the LUT so that
		// changes can be detected.
		self->rlift = rlift;
		self->glift = glift;
		self->blift = blift;
		self->rgamma = rgamma;
		self->ggamma = ggamma;
		self->bgamma = bgamma;
		self->rgain = rgain;
		self->ggain = ggain;
		self->bgain = bgain;
	}
}
Пример #22
0
static int ladspa_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
{
	int error = 0;

	// Get the filter service
	mlt_filter filter = mlt_frame_pop_audio( frame );

	// Get the filter properties
	mlt_properties filter_properties = MLT_FILTER_PROPERTIES( filter );

	// Initialise LADSPA if needed
	jack_rack_t *jackrack = mlt_properties_get_data( filter_properties, "jackrack", NULL );
	if ( jackrack == NULL )
	{
		sample_rate = *frequency; // global inside jack_rack
		jackrack = initialise_jack_rack( filter_properties, *channels );
	}

	if ( jackrack && jackrack->procinfo && jackrack->procinfo->chain &&
		 mlt_properties_get_int64( filter_properties, "_pluginid" ) )
	{
		plugin_t *plugin = jackrack->procinfo->chain;
		LADSPA_Data value;
		int i, c;
		mlt_position position = mlt_filter_get_position( filter, frame );
		mlt_position length = mlt_filter_get_length2( filter, frame );

		// Get the producer's audio
		*channels = jackrack->channels;
		*format = mlt_audio_float;
		mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );

		for ( i = 0; i < plugin->desc->control_port_count; i++ )
		{
			// Apply the control port values
			char key[20];
			value = plugin_desc_get_default_control_value( plugin->desc, i, sample_rate );
			snprintf( key, sizeof(key), "%d", i );
			if ( mlt_properties_get( filter_properties, key ) )
				value = mlt_properties_anim_get_double( filter_properties, key, position, length );
			for ( c = 0; c < plugin->copies; c++ )
				plugin->holders[c].control_memory[i] = value;
		}
		plugin->wet_dry_enabled = mlt_properties_get( filter_properties, "wetness" ) != NULL;
		if ( plugin->wet_dry_enabled )
		{
			value = mlt_properties_anim_get_double( filter_properties, "wetness", position, length );
			for ( c = 0; c < *channels; c++ )
				plugin->wet_dry_values[c] = value;
		}

		// Configure the buffers
		LADSPA_Data **input_buffers  = mlt_pool_alloc( sizeof( LADSPA_Data* ) * *channels );
		LADSPA_Data **output_buffers = mlt_pool_alloc( sizeof( LADSPA_Data* ) * *channels );

		for ( i = 0; i < *channels; i++ )
		{
			input_buffers[i]  = (LADSPA_Data*) *buffer + i * *samples;
			output_buffers[i] = (LADSPA_Data*) *buffer + i * *samples;
		}

		// Do LADSPA processing
		error = process_ladspa( jackrack->procinfo, *samples, input_buffers, output_buffers );

		mlt_pool_release( input_buffers );
		mlt_pool_release( output_buffers );

		// read the status port values
		for ( i = 0; i < plugin->desc->status_port_count; i++ )
		{
			char key[20];
			int p = plugin->desc->status_port_indicies[i];
			for ( c = 0; c < plugin->copies; c++ )
			{
				snprintf( key, sizeof(key), "%d[%d]", p, c );
				value = plugin->holders[c].status_memory[i];
				mlt_properties_set_double( filter_properties, key, value );
			}
		}
	}
	else
	{
		// Nothing to do.
		error = mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );
	}

	return error;
}
Пример #23
0
static int filter_get_image( mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{
	// Error we will return
	int error = 0;

	// Get the watermark filter object
	mlt_filter this = mlt_frame_pop_service( frame );

	// Get the properties of the filter
	mlt_properties properties = MLT_FILTER_PROPERTIES( this );

	mlt_service_lock( MLT_FILTER_SERVICE( this ) );

	// Get the producer from the filter
	mlt_producer producer = mlt_properties_get_data( properties, "producer", NULL );

	// Get the composite from the filter
	mlt_transition composite = mlt_properties_get_data( properties, "composite", NULL );

	// Get the resource to use
	char *resource = mlt_properties_get( properties, "resource" );

	// Get the old resource
	char *old_resource = mlt_properties_get( properties, "_old_resource" );

	// Create a composite if we don't have one
	if ( composite == NULL )
	{
		// Create composite via the factory
		mlt_profile profile = mlt_service_profile( MLT_FILTER_SERVICE( this ) );
		composite = mlt_factory_transition( profile, "composite", NULL );

		// Register the composite for reuse/destruction
		if ( composite != NULL )
			mlt_properties_set_data( properties, "composite", composite, 0, ( mlt_destructor )mlt_transition_close, NULL );
	}

	// If we have one
	if ( composite != NULL )
	{
		// Get the properties
		mlt_properties composite_properties = MLT_TRANSITION_PROPERTIES( composite );

		// Pass all the composite. properties on the filter down
		mlt_properties_pass( composite_properties, properties, "composite." );

		if ( mlt_properties_get( properties, "composite.out" ) == NULL )
			mlt_properties_set_int( composite_properties, "out", mlt_properties_get_int( properties, "_out" ) );

		// Force a refresh
		mlt_properties_set_int( composite_properties, "refresh", 1 );
	}

	// Create a producer if don't have one
	if ( producer == NULL || ( old_resource != NULL && strcmp( resource, old_resource ) ) )
	{
		// Get the factory producer service
		char *factory = mlt_properties_get( properties, "factory" );

		// Create the producer
		mlt_profile profile = mlt_service_profile( MLT_FILTER_SERVICE( this ) );
		producer = mlt_factory_producer( profile, factory, resource );

		// If we have one
		if ( producer != NULL )
		{
			// Register the producer for reuse/destruction
			mlt_properties_set_data( properties, "producer", producer, 0, ( mlt_destructor )mlt_producer_close, NULL );

			// Ensure that we loop
			mlt_properties_set( MLT_PRODUCER_PROPERTIES( producer ), "eof", "loop" );

			// Set the old resource
			mlt_properties_set( properties, "_old_resource", resource );
		}
	}

	if ( producer != NULL )
	{
		// Get the producer properties
		mlt_properties producer_properties = MLT_PRODUCER_PROPERTIES( producer );

		// Now pass all producer. properties on the filter down
		mlt_properties_pass( producer_properties, properties, "producer." );
	}

	mlt_service_unlock( MLT_FILTER_SERVICE( this ) );

	// Only continue if we have both producer and composite
	if ( composite != NULL && producer != NULL )
	{
		// Get the service of the producer
		mlt_service service = MLT_PRODUCER_SERVICE( producer );

		// We will get the 'b frame' from the producer
		mlt_frame b_frame = NULL;

		// Get the original producer position
		mlt_position position = mlt_filter_get_position( this, frame );

		// Make sure the producer is in the correct position
		mlt_producer_seek( producer, position );

		// Resetting position to appease the composite transition
		mlt_frame_set_position( frame, position );

		// Get the b frame and process with composite if successful
		if ( mlt_service_get_frame( service, &b_frame, 0 ) == 0 )
		{
			// Get the a and b frame properties
			mlt_properties a_props = MLT_FRAME_PROPERTIES( frame );
			mlt_properties b_props = MLT_FRAME_PROPERTIES( b_frame );
			mlt_profile profile = mlt_service_profile( service );

			// Set the b frame to be in the same position and have same consumer requirements
			mlt_frame_set_position( b_frame, position );
			mlt_properties_set_int( b_props, "consumer_deinterlace", mlt_properties_get_int( a_props, "consumer_deinterlace" ) || mlt_properties_get_int( properties, "deinterlace" ) );

			// Check for the special case - no aspect ratio means no problem :-)
			if ( mlt_frame_get_aspect_ratio( b_frame ) == 0 )
				mlt_frame_set_aspect_ratio( b_frame, mlt_profile_sar( profile ) );
			if ( mlt_frame_get_aspect_ratio( frame ) == 0 )
				mlt_frame_set_aspect_ratio( frame, mlt_profile_sar( profile ) );

			if ( mlt_properties_get_int( properties, "distort" ) )
			{
				mlt_properties_set_int( MLT_TRANSITION_PROPERTIES( composite ), "distort", 1 );
				mlt_properties_set_int( a_props, "distort", 1 );
				mlt_properties_set_int( b_props, "distort", 1 );
			}

			*format = mlt_image_yuv422;
			if ( mlt_properties_get_int( properties, "reverse" ) == 0 )
			{
				// Apply all filters that are attached to this filter to the b frame
				mlt_service_apply_filters( MLT_FILTER_SERVICE( this ), b_frame, 0 );

				// Process the frame
				mlt_transition_process( composite, frame, b_frame );

				// Get the image
				error = mlt_frame_get_image( frame, image, format, width, height, 1 );
			}
			else
			{
				char temp[ 132 ];
				int count = 0;
				uint8_t *alpha = NULL;
				const char *rescale = mlt_properties_get( a_props, "rescale.interp" );
				if ( rescale == NULL || !strcmp( rescale, "none" ) )
					rescale = "hyper";
				mlt_transition_process( composite, b_frame, frame );
				mlt_properties_set_int( a_props, "consumer_deinterlace", 1 );
				mlt_properties_set_int( b_props, "consumer_deinterlace", 1 );
				mlt_properties_set( a_props, "rescale.interp", rescale );
				mlt_properties_set( b_props, "rescale.interp", rescale );
				mlt_service_apply_filters( MLT_FILTER_SERVICE( this ), b_frame, 0 );
				error = mlt_frame_get_image( b_frame, image, format, width, height, 1 );
				alpha = mlt_frame_get_alpha_mask( b_frame );
				mlt_frame_set_image( frame, *image, *width * *height * 2, NULL );
				mlt_frame_set_alpha( frame, alpha, *width * *height, NULL );
				mlt_properties_set_int( a_props, "width", *width );
				mlt_properties_set_int( a_props, "height", *height );
				mlt_properties_set_int( a_props, "progressive", 1 );
				mlt_properties_inc_ref( b_props );
				strcpy( temp, "_b_frame" );
				while( mlt_properties_get_data( a_props, temp, NULL ) != NULL )
					sprintf( temp, "_b_frame%d", count ++ );
				mlt_properties_set_data( a_props, temp, b_frame, 0, ( mlt_destructor )mlt_frame_close, NULL );
			}
		}

		// Close the b frame
		mlt_frame_close( b_frame );
	}
	else
	{
		// Get the image from the frame without running fx
		error = mlt_frame_get_image( frame, image, format, width, height, 1 );
	}

	return error;
}
Пример #24
0
double mlt_filter_get_progress( mlt_filter self, mlt_frame frame )
{
	double position = mlt_filter_get_position( self, frame );
	double length = mlt_filter_get_length2( self, frame );
	return position / length;
}
Пример #25
0
static int filter_get_image( mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{
	mlt_filter filter = (mlt_filter) mlt_frame_pop_service( frame );
	mlt_properties properties = MLT_FILTER_PROPERTIES( filter );
	mlt_position pos = mlt_filter_get_position( filter, frame );
	mlt_position len = mlt_filter_get_length2( filter, frame );
	
	int maxdia = mlt_properties_anim_get_int( properties, "maxdiameter", pos, len );
	int maxcount = mlt_properties_anim_get_int( properties, "maxcount", pos, len );

	*format = mlt_image_yuv422;
	int error = mlt_frame_get_image( frame, image, format, width, height, 1 );

	// Load svg
	char *factory = mlt_properties_get( properties, "factory" );
	char temp[1204] = "";
	sprintf( temp, "%s/oldfilm/", mlt_environment( "MLT_DATA" ) );
	
	mlt_properties direntries = mlt_properties_new();
	mlt_properties_dir_list( direntries, temp,"dust*.svg",1 );
	
	if (!maxcount)
		return 0;

	double position = mlt_filter_get_progress( filter, frame );
	srand( position * 10000 );

	mlt_service_lock( MLT_FILTER_SERVICE( filter ) );

	int im = rand() % maxcount;
	int piccount = mlt_properties_count( direntries );
	while ( im-- && piccount )
	{
		int picnum = rand() % piccount;
		
		int y1 = rand() % *height;
		int x1 = rand() % *width;
		char resource[1024] = "";
		char savename[1024] = "", savename1[1024] = "", cachedy[100];
		int dx = ( *width * maxdia / 100);
		int luma_width, luma_height;
		uint8_t *luma_image = NULL;
		uint8_t *alpha = NULL;
		int updown = rand() % 2;
		int mirror = rand() % 2;
		
		sprintf( resource, "%s", mlt_properties_get_value(direntries,picnum) );
		sprintf( savename, "cache-%d-%d", picnum,dx );
		sprintf( savename1, "cache-alpha-%d-%d", picnum, dx );
		sprintf( cachedy, "cache-dy-%d-%d", picnum,dx );
		
		luma_image = mlt_properties_get_data( properties , savename , NULL );
		alpha = mlt_properties_get_data( properties , savename1 , NULL );
		
		if ( luma_image == NULL || alpha == NULL )
		{
			mlt_profile profile = mlt_service_profile( MLT_FILTER_SERVICE( filter ) );
			mlt_producer producer = mlt_factory_producer( profile, factory, resource );
		
			if ( producer != NULL )
			{
				mlt_properties producer_properties = MLT_PRODUCER_PROPERTIES( producer );
				
				mlt_properties_set( producer_properties, "eof", "loop" );
				mlt_frame luma_frame = NULL;
				
				if ( mlt_service_get_frame( MLT_PRODUCER_SERVICE( producer ), &luma_frame, 0 ) == 0 )
				{
					mlt_image_format luma_format = mlt_image_yuv422;
					luma_width = dx;
					luma_height = luma_width * mlt_properties_get_int( MLT_FRAME_PROPERTIES ( luma_frame ) , "height" ) / mlt_properties_get_int( MLT_FRAME_PROPERTIES ( luma_frame ) , "width" );

					mlt_properties_set( MLT_FRAME_PROPERTIES( luma_frame ), "rescale.interp", "best" );// none/nearest/tiles/hyper
					
					mlt_frame_get_image( luma_frame, &luma_image, &luma_format, &luma_width, &luma_height, 0 );
					alpha = mlt_frame_get_alpha_mask (luma_frame );
					
					uint8_t* savealpha = mlt_pool_alloc( luma_width * luma_height );
					uint8_t* savepic = mlt_pool_alloc( luma_width * luma_height * 2);
					
					if ( savealpha && savepic )
					{
						memcpy( savealpha, alpha , luma_width * luma_height );
						memcpy( savepic, luma_image , luma_width * luma_height * 2 );
						
						mlt_properties_set_data( properties, savename, savepic, luma_width * luma_height * 2, mlt_pool_release, NULL );
						mlt_properties_set_data( properties, savename1, savealpha, luma_width * luma_height,  mlt_pool_release, NULL );
						mlt_properties_set_int( properties, cachedy, luma_height );
						
						overlay_image( *image, *width, *height, luma_image, luma_width, luma_height, alpha, x1, y1, updown, mirror );
					}
					else
					{
						if ( savealpha )
							mlt_pool_release( savealpha );
						if ( savepic )
							mlt_pool_release( savepic );
					}
					mlt_frame_close( luma_frame );	
				}
				mlt_producer_close( producer );	
			}
		}
		else 
		{
			overlay_image ( *image, *width, *height, luma_image, dx, mlt_properties_get_int ( properties, cachedy ), alpha, x1, y1, updown, mirror );
		}
	}

	mlt_service_unlock( MLT_FILTER_SERVICE( filter ) );

	if (piccount>0 )
		return 0;
	if ( error == 0 && *image )
	{

		int h = *height;
		int w = *width;
		int im = rand() % maxcount;
		
		while ( im-- )
		{
			int type = im % 2;
			int y1 = rand() % h;
			int x1 = rand() % w;
			int dx = rand() % maxdia;
			int dy = rand() % maxdia;
			int x=0, y=0;
			double v = 0.0;
			for ( x = -dx ; x < dx ; x++ )
			{
				for ( y = -dy ; y < dy ; y++ ) 
				{
					if ( x1 + x < w && x1 + x > 0 && y1 + y < h && y1 + y > 0 ){
						uint8_t *pix = *image + (y+y1) * w * 2 + (x + x1) * 2;

						v=pow((double) x /(double)dx * 5.0, 2.0) + pow((double)y / (double)dy * 5.0, 2.0);
						if (v>10)
							v=10;
						v = 1.0 - ( v / 10.0 );

						switch(type)
						{
							case 0:
								*pix -= (*pix) * v;
								break;
							case 1:
								*pix += ( 255-*pix ) * v;
								break;
						}
					}
				}
			}
		}
	}

	return error;
}
Пример #26
0
static int filter_get_image( mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{
	// Get the filter
	mlt_filter filter = mlt_frame_pop_service( frame );

	// Get the properties
	mlt_properties properties = MLT_FILTER_PROPERTIES( filter );

	// Get the image
	int error = 0;
	*format = mlt_image_rgb24a;

	// Only process if we have no error and a valid colour space
	if ( error == 0 )
	{
		mlt_service_lock( MLT_FILTER_SERVICE( filter ) );
		mlt_producer producer = mlt_properties_get_data( properties, "producer", NULL );
		mlt_transition transition = mlt_properties_get_data( properties, "transition", NULL );
		mlt_frame a_frame = NULL;
		mlt_profile profile = mlt_service_profile( MLT_FILTER_SERVICE( filter ) );

		if ( producer == NULL )
		{
			char *background = mlt_properties_get( properties, "background" );
			producer = mlt_factory_producer( profile, NULL, background );
			mlt_properties_set_data( properties, "producer", producer, 0, (mlt_destructor)mlt_producer_close, NULL );
		}

		if ( transition == NULL )
		{
			transition = mlt_factory_transition( profile, "qtblend", NULL );
			mlt_properties_set_data( properties, "transition", transition, 0, (mlt_destructor)mlt_transition_close, NULL );
			if ( transition )
				mlt_properties_set_int( MLT_TRANSITION_PROPERTIES( transition ), "b_alpha", 1 );
		}

		if ( producer != NULL && transition != NULL )
		{
			mlt_position position = mlt_filter_get_position( filter, frame );
			mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
			mlt_position in = mlt_filter_get_in( filter );
			mlt_position out = mlt_filter_get_out( filter );
			double consumer_ar = mlt_profile_sar( profile );
			mlt_transition_set_in_and_out( transition, in, out );
			if ( out > 0 ) {
				mlt_properties_set_position( MLT_PRODUCER_PROPERTIES( producer ), "length", out - in + 1 );
				mlt_producer_set_in_and_out( producer, in, out );
			}
			mlt_producer_seek( producer, in + position );
			mlt_frame_set_position( frame, position );
			mlt_properties_pass( MLT_PRODUCER_PROPERTIES( producer ), properties, "producer." );
			mlt_properties_pass( MLT_TRANSITION_PROPERTIES( transition ), properties, "transition." );
			mlt_service_get_frame( MLT_PRODUCER_SERVICE( producer ), &a_frame, 0 );
			mlt_frame_set_position( a_frame, in + position );

			// Set the rescale interpolation to match the frame
			mlt_properties_set( MLT_FRAME_PROPERTIES( a_frame ), "rescale.interp", mlt_properties_get( frame_properties, "rescale.interp" ) );

			// Special case - aspect_ratio = 0
			if ( mlt_frame_get_aspect_ratio( frame ) == 0 )
				mlt_frame_set_aspect_ratio( frame, consumer_ar );
			if ( mlt_frame_get_aspect_ratio( a_frame ) == 0 )
				mlt_frame_set_aspect_ratio( a_frame, consumer_ar );

			// Add the qtblend transition onto the frame stack
			mlt_service_unlock( MLT_FILTER_SERVICE( filter ) );
			mlt_transition_process( transition, a_frame, frame );

			if ( mlt_properties_get_int( properties, "use_normalised" ) )
			{
				// Use the normalised width & height
				mlt_profile profile = mlt_service_profile( MLT_FILTER_SERVICE( filter ) );
				*width = profile->width;
				*height = profile->height;
			}
			
			mlt_frame_get_image( a_frame, image, format, width, height, writable );
			mlt_properties_set_data( frame_properties, "affine_frame", a_frame, 0, (mlt_destructor)mlt_frame_close, NULL );
			mlt_frame_set_image( frame, *image, *width * *height * 4, NULL );
			//mlt_frame_set_alpha( frame, mlt_frame_get_alpha_mask( a_frame ), *width * *height, NULL );
		}
		else
		{
			mlt_service_unlock( MLT_FILTER_SERVICE( filter ) );
		}
	}

	return error;
}
Пример #27
0
static int filter_get_image( mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{
    // Get the filter
    mlt_filter filter = mlt_frame_pop_service( frame );
    mlt_properties properties = MLT_FILTER_PROPERTIES( filter );
    mlt_position position = mlt_filter_get_position( filter, frame );
    mlt_position length = mlt_filter_get_length2( filter, frame );

    // Get the image
    *format = mlt_image_yuv422;
    int error = mlt_frame_get_image( frame, image, format, width, height, 1 );

    // Only process if we have no error and a valid colour space
    if ( error == 0 )
    {
        // Get the charcoal scatter value
        int x_scatter = mlt_properties_anim_get_double( properties, "x_scatter", position, length );
        int y_scatter = mlt_properties_anim_get_double( properties, "y_scatter", position, length );
        float scale = mlt_properties_anim_get_double( properties, "scale" ,position, length);
        float mix = mlt_properties_anim_get_double( properties, "mix", position, length);
        int invert = mlt_properties_anim_get_int( properties, "invert", position, length);

        // We'll process pixel by pixel
        int x = 0;
        int y = 0;

        // We need to create a new frame as this effect modifies the input
        uint8_t *temp = mlt_pool_alloc( *width * *height * 2 );
        uint8_t *p = temp;
        uint8_t *q = *image;

        // Calculations are carried out on a 3x3 matrix
        int matrix[ 3 ][ 3 ];

        // Used to carry out the matrix calculations
        int sum1;
        int sum2;
        float sum;
        int val;

        // Loop for each row
        for ( y = 0; y < *height; y ++ )
        {
            // Loop for each pixel
            for ( x = 0; x < *width; x ++ )
            {
                // Populate the matrix
                matrix[ 0 ][ 0 ] = get_Y( *image, *width, *height, x - x_scatter, y - y_scatter );
                matrix[ 0 ][ 1 ] = get_Y( *image, *width, *height, x            , y - y_scatter );
                matrix[ 0 ][ 2 ] = get_Y( *image, *width, *height, x + x_scatter, y - y_scatter );
                matrix[ 1 ][ 0 ] = get_Y( *image, *width, *height, x - x_scatter, y             );
                matrix[ 1 ][ 2 ] = get_Y( *image, *width, *height, x + x_scatter, y             );
                matrix[ 2 ][ 0 ] = get_Y( *image, *width, *height, x - x_scatter, y + y_scatter );
                matrix[ 2 ][ 1 ] = get_Y( *image, *width, *height, x            , y + y_scatter );
                matrix[ 2 ][ 2 ] = get_Y( *image, *width, *height, x + x_scatter, y + y_scatter );

                // Do calculations
                sum1 = (matrix[2][0] - matrix[0][0]) + ( (matrix[2][1] - matrix[0][1]) << 1 ) + (matrix[2][2] - matrix[2][0]);
                sum2 = (matrix[0][2] - matrix[0][0]) + ( (matrix[1][2] - matrix[1][0]) << 1 ) + (matrix[2][2] - matrix[2][0]);
                sum = scale * sqrti( sum1 * sum1 + sum2 * sum2 );

                // Assign value
                *p ++ = !invert ? ( sum >= 16 && sum <= 235 ? 251 - sum : sum < 16 ? 235 : 16 ) :
                        ( sum >= 16 && sum <= 235 ? sum : sum < 16 ? 16 : 235 );
                q ++;
                val = 128 + mix * ( *q ++ - 128 );
                val = val < 16 ? 16 : val > 240 ? 240 : val;
                *p ++ = val;
            }
        }

        // Return the created image
        *image = temp;

        // Store new and destroy old
        mlt_frame_set_image( frame, *image, *width * *height * 2, mlt_pool_release );
    }

    return error;
}
Пример #28
0
static int filter_get_image( mlt_frame this, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{
	int error = 0;
	mlt_filter filter = mlt_frame_pop_service( this );
	mlt_properties properties = MLT_FILTER_PROPERTIES( filter );

	mlt_service_lock( MLT_FILTER_SERVICE( filter ) );

	mlt_transition luma = mlt_properties_get_data( properties, "luma", NULL );
	mlt_frame b_frame = mlt_properties_get_data( properties, "frame", NULL );
	mlt_properties b_frame_props = b_frame ? MLT_FRAME_PROPERTIES( b_frame ) : NULL;
	int out = mlt_properties_get_int( properties, "period" );
	int cycle = mlt_properties_get_int( properties, "cycle" );
	int duration = mlt_properties_get_int( properties, "duration" );
	mlt_position position = mlt_filter_get_position( filter, this );

	out = out? out + 1 : 25;
	if ( cycle )
		out = cycle;
	if ( duration < 1 || duration > out )
		duration = out;
	*format = mlt_image_yuv422;

	if ( b_frame == NULL || mlt_properties_get_int( b_frame_props, "width" ) != *width || mlt_properties_get_int( b_frame_props, "height" ) != *height )
	{
		b_frame = mlt_frame_init( MLT_FILTER_SERVICE( filter ) );
		mlt_properties_set_data( properties, "frame", b_frame, 0, ( mlt_destructor )mlt_frame_close, NULL );
	}

	if ( luma == NULL )
Пример #29
0
static int filter_get_image( mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{
	mlt_filter filter = mlt_frame_pop_service( frame );
	*format = mlt_image_rgb24;
	mlt_properties_set_int( MLT_FRAME_PROPERTIES(frame), "consumer_deinterlace", 1 );
	int error = mlt_frame_get_image( frame, image, format, width, height, 1 );

	if ( !error && *image )
	{
		videostab self = filter->child;
		mlt_position length = mlt_filter_get_length2( filter, frame );
		int h = *height;
		int w = *width;

		// Service locks are for concurrency control
		mlt_service_lock( MLT_FILTER_SERVICE( filter ) );
		if ( !self->initialized )
		{
			// Initialize our context
			self->initialized = 1;
			self->es = es_init( w, h );
			self->pos_i = (vc*) malloc( length * sizeof(vc) );
			self->pos_h = (vc*) malloc( length * sizeof(vc) );
			self->pos_y = (vc*) malloc( h * sizeof(vc) );
			self->rs = rs_init( w, h );
		}
		char *vectors = mlt_properties_get( MLT_FILTER_PROPERTIES(filter), "vectors" );
		if ( !vectors )
		{
			// Analyse
			int pos = (int) mlt_filter_get_position( filter, frame );
			self->pos_i[pos] = vc_add( pos == 0 ? vc_zero() : self->pos_i[pos - 1], es_estimate( self->es, *image ) );

			// On last frame
			if ( pos == length - 1 )
			{
				mlt_profile profile = mlt_service_profile( MLT_FILTER_SERVICE(filter) );
				double fps =  mlt_profile_fps( profile );

				// Filter and store the results
				hipass( self->pos_i, self->pos_h, length, fps );
				serialize_vectors( self, length );
			}
		} else {
			// Apply
			if ( self->initialized != 2 )
			{
				// Load analysis results from property
				self->initialized = 2;
				deserialize_vectors( self, vectors, length );
			}
			if ( self->initialized == 2 )
			{
				// Stabilize
				float shutter_angle = mlt_properties_get_double( MLT_FRAME_PROPERTIES(frame) , "shutterangle" );
				float pos = mlt_filter_get_position( filter, frame );
				int i;

				for (i = 0; i < h; i ++)
					self->pos_y[i] = interp( self->lanc_kernels,self->pos_h, length, pos + (i - h / 2.0) * shutter_angle / (h * 360.0) );
				rs_resample( self->lanc_kernels,self->rs, *image, self->pos_y );
			}
		}
		mlt_service_unlock( MLT_FILTER_SERVICE( filter ) );
	}
	return error;
}
Пример #30
0
static int filter_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
{
	// Get the filter from the frame
	mlt_filter filter = mlt_frame_pop_audio( frame );

	// Get the properties from the filter
	mlt_properties filter_props = MLT_FILTER_PROPERTIES( filter );

	// Get the frame's filter instance properties
	mlt_properties instance_props = mlt_frame_unique_properties( frame, MLT_FILTER_SERVICE( filter ) );

	// Get the parameters
	double gain = mlt_properties_get_double( instance_props, "gain" );
	double max_gain = mlt_properties_get_double( instance_props, "max_gain" );
	double limiter_level = 0.5; /* -6 dBFS */
	int normalise =  mlt_properties_get_int( instance_props, "normalise" );
	double amplitude =  mlt_properties_get_double( instance_props, "amplitude" );
	int i, j;
	double sample;
	int16_t peak;
	
	// Use animated value for gain if "level" property is set 
	char* level_property = mlt_properties_get( filter_props, "level" );
	if ( level_property != NULL )
	{
		mlt_position position = mlt_filter_get_position( filter, frame );
		mlt_position length = mlt_filter_get_length2( filter, frame );
		gain = mlt_properties_anim_get_double( filter_props, "level", position, length );
		gain = DBFSTOAMP( gain );
	}

	if ( mlt_properties_get( instance_props, "limiter" ) != NULL )
		limiter_level = mlt_properties_get_double( instance_props, "limiter" );
	
	// Get the producer's audio
	*format = mlt_audio_s16;
	mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );

	// Determine numeric limits
	int bytes_per_samp = (samp_width - 1) / 8 + 1;
	int samplemax = (1 << (bytes_per_samp * 8 - 1)) - 1;
	int samplemin = -samplemax - 1;

	mlt_service_lock( MLT_FILTER_SERVICE( filter ) );

	if ( normalise )
	{
		int window = mlt_properties_get_int( filter_props, "window" );
		double *smooth_buffer = mlt_properties_get_data( filter_props, "smooth_buffer", NULL );

		if ( window > 0 && smooth_buffer != NULL )
		{
			int smooth_index = mlt_properties_get_int( filter_props, "_smooth_index" );
			
			// Compute the signal power and put into smoothing buffer
			smooth_buffer[ smooth_index ] = signal_max_power( *buffer, *channels, *samples, &peak );

			if ( smooth_buffer[ smooth_index ] > EPSILON )
			{
				mlt_properties_set_int( filter_props, "_smooth_index", ( smooth_index + 1 ) % window );

				// Smooth the data and compute the gain
				gain *= amplitude / get_smoothed_data( smooth_buffer, window );
			}
		}
		else
		{
			gain *= amplitude / signal_max_power( (int16_t*) *buffer, *channels, *samples, &peak );
		}
	}

	if ( max_gain > 0 && gain > max_gain )
		gain = max_gain;

	// Initialise filter's previous gain value to prevent an inadvertant jump from 0
	mlt_position last_position = mlt_properties_get_position( filter_props, "_last_position" );
	mlt_position current_position = mlt_frame_get_position( frame );
	if ( mlt_properties_get( filter_props, "_previous_gain" ) == NULL
	     || current_position != last_position + 1 )
		mlt_properties_set_double( filter_props, "_previous_gain", gain );

	// Start the gain out at the previous
	double previous_gain = mlt_properties_get_double( filter_props, "_previous_gain" );

	// Determine ramp increment
	double gain_step = ( gain - previous_gain ) / *samples;

	// Save the current gain for the next iteration
	mlt_properties_set_double( filter_props, "_previous_gain", gain );
	mlt_properties_set_position( filter_props, "_last_position", current_position );

	mlt_service_unlock( MLT_FILTER_SERVICE( filter ) );

	// Ramp from the previous gain to the current
	gain = previous_gain;

	int16_t *p = (int16_t*) *buffer;

	// Apply the gain
	for ( i = 0; i < *samples; i++ )
	{
		for ( j = 0; j < *channels; j++ )
		{
			sample = *p * gain;
			*p = ROUND( sample );
		
			if ( gain > 1.0 )
			{
				/* use limiter function instead of clipping */
				if ( normalise )
					*p = ROUND( samplemax * limiter( sample / (double) samplemax, limiter_level ) );
				
				/* perform clipping */
				else if ( sample > samplemax )
					*p = samplemax;
				else if ( sample < samplemin )
					*p = samplemin;
			}
			p++;
		}
		gain += gain_step;
	}
	
	return 0;
}