示例#1
0
static void pango_cached_image_destroy( void* p )
{
	struct pango_cached_image_s* i = p;

	if ( !i )
		return;
	if ( i->image )
		mlt_pool_release( i->image );
	if ( i->alpha )
		mlt_pool_release( i->alpha );
	mlt_pool_release( i );
};
示例#2
0
文件: common.c 项目: bmatherly/mlt
void free_vsimage( uint8_t* vs_img, VSPixelFormat format )
{
	if( format != PF_YUV420P )
	{
		mlt_pool_release( vs_img );
	}
}
示例#3
0
文件: win32.c 项目: elfring/mlt
static int iconv_from_utf8( mlt_properties properties, const char *prop_name, const char *prop_name_out, const char* encoding )
{
	const char *text = mlt_properties_get( properties, prop_name );
	int result = -1;

	iconv_t cd = iconv_open( encoding, "UTF-8" );
	if ( text && ( cd != ( iconv_t )-1 ) ) {
		size_t inbuf_n = strlen( text );
		size_t outbuf_n = inbuf_n * 6;
		char *outbuf = mlt_pool_alloc( outbuf_n );
		char *outbuf_p = outbuf;

		memset( outbuf, 0, outbuf_n );

		if ( text != NULL && strcmp( text, "" ) && iconv( cd, &text, &inbuf_n, &outbuf_p, &outbuf_n ) != -1 )
			mlt_properties_set( properties, prop_name_out, outbuf );
		else
			mlt_properties_set( properties, prop_name_out, "" );

		mlt_pool_release( outbuf );
		result = 0;
	}
	iconv_close( cd );
	return result;
}
示例#4
0
static void draw_spectrum( mlt_filter filter, mlt_frame frame, QImage* qimg )
{
	mlt_properties filter_properties = MLT_FILTER_PROPERTIES( filter );
	mlt_position position = mlt_filter_get_position( filter, frame );
	mlt_position length = mlt_filter_get_length2( filter, frame );
	mlt_rect rect = mlt_properties_anim_get_rect( filter_properties, "rect", position, length );
	if ( strchr( mlt_properties_get( filter_properties, "rect" ), '%' ) ) {
		rect.x *= qimg->width();
		rect.w *= qimg->width();
		rect.y *= qimg->height();
		rect.h *= qimg->height();
	}
	char* graph_type = mlt_properties_get( filter_properties, "type" );
	int mirror = mlt_properties_get_int( filter_properties, "mirror" );
	int fill = mlt_properties_get_int( filter_properties, "fill" );
	double tension = mlt_properties_get_double( filter_properties, "tension" );

	QRectF r( rect.x, rect.y, rect.w, rect.h );
	QPainter p( qimg );

	if( mirror ) {
		// Draw two half rectangle instead of one full rectangle.
		r.setHeight( r.height() / 2.0 );
	}

	setup_graph_painter( p, r, filter_properties );
	setup_graph_pen( p, r, filter_properties );

	int bands = mlt_properties_get_int( filter_properties, "bands" );
	if ( bands == 0 ) {
		// "0" means match rectangle width
		bands = r.width();
	}
	float* spectrum = (float*)mlt_pool_alloc( bands * sizeof(float) );

	convert_fft_to_spectrum( filter, frame, bands, spectrum );

	if( graph_type && graph_type[0] == 'b' ) {
		paint_bar_graph( p, r, bands, spectrum );
	} else {
		paint_line_graph( p, r, bands, spectrum, tension, fill );
	}

	if( mirror ) {
		// Second rectangle is mirrored.
		p.translate( 0, r.y() * 2 + r.height() * 2 );
		p.scale( 1, -1 );

		if( graph_type && graph_type[0] == 'b' ) {
			paint_bar_graph( p, r, bands, spectrum );
		} else {
			paint_line_graph( p, r, bands, spectrum, tension, fill );
		}
	}

	mlt_pool_release( spectrum );

	p.end();
}
示例#5
0
void frame_cache_close( frame_cache self )
{
	if ( self == NULL )
		return;

	frame_cache_purge( self );
	mlt_pool_release( self->frames );
	free( self );
}
示例#6
0
static int ladspa_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
{
	// Get the filter service
	mlt_filter filter = mlt_frame_pop_audio( frame );

	// Get the filter properties
	mlt_properties filter_properties = MLT_FILTER_PROPERTIES( filter );

	// Get the producer's audio
	*format = mlt_audio_float;
	mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );
	
	// Initialise LADSPA if needed
	jack_rack_t *jackrack = mlt_properties_get_data( filter_properties, "jackrack", NULL );
	if ( jackrack == NULL )
	{
		sample_rate = *frequency; // global inside jack_rack
		jackrack = initialise_jack_rack( filter_properties, *channels );
	}
		
	// Get the filter-specific properties
	LADSPA_Data **input_buffers  = mlt_pool_alloc( sizeof( LADSPA_Data* ) * *channels );
	LADSPA_Data **output_buffers = mlt_pool_alloc( sizeof( LADSPA_Data* ) * *channels );

	int i;
	for ( i = 0; i < *channels; i++ )
	{
		input_buffers[i]  = (LADSPA_Data*) *buffer + i * *samples;
		output_buffers[i] = (LADSPA_Data*) *buffer + i * *samples;
	}

	// Do LADSPA processing
	int error = jackrack && process_ladspa( jackrack->procinfo, *samples, input_buffers, output_buffers );

	mlt_pool_release( input_buffers );
	mlt_pool_release( output_buffers );

	return error;
}
示例#7
0
文件: producer_ladspa.c 项目: aib/mlt
static int producer_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
{
	// Get the producer service
	mlt_producer producer = mlt_properties_get_data( MLT_FRAME_PROPERTIES( frame ), "_producer_ladspa", NULL );
	mlt_properties producer_properties = MLT_PRODUCER_PROPERTIES( producer );
	int size = 0;
	LADSPA_Data** output_buffers = NULL;
	int i = 0;

	// Initialize LADSPA if needed
	jack_rack_t *jackrack = mlt_properties_get_data( producer_properties, "_jackrack", NULL );
	if ( !jackrack )
	{
		sample_rate = *frequency; // global inside jack_rack
		jackrack = initialise_jack_rack( producer_properties, *channels );
	}

	if( jackrack )
	{
		// Correct the returns if necessary
		*samples = *samples <= 0 ? 1920 : *samples;
		*channels = *channels <= 0 ? 2 : *channels;
		*frequency = *frequency <= 0 ? 48000 : *frequency;
		*format = mlt_audio_float;

		// Calculate the size of the buffer
		size = *samples * *channels * sizeof( float );

		// Allocate the buffer
		*buffer = mlt_pool_alloc( size );

		// Initialize the LADSPA output buffer.
		output_buffers = mlt_pool_alloc( sizeof( LADSPA_Data* ) * *channels );
		for ( i = 0; i < *channels; i++ )
		{
			output_buffers[i] = (LADSPA_Data*) *buffer + i * *samples;
		}

		// Do LADSPA processing
		process_ladspa( jackrack->procinfo, *samples, NULL, output_buffers );
		mlt_pool_release( output_buffers );

		// Set the buffer for destruction
		mlt_frame_set_audio( frame, *buffer, *format, size, mlt_pool_release );
	}

	return 0;
}
示例#8
0
void read_xml(mlt_properties properties)
{
	FILE *f = fopen( mlt_properties_get( properties, "resource" ), "r");
	if ( f != NULL )
	{
		int size = 0;
		long lSize;
 
		fseek (f , 0 , SEEK_END);
		lSize = ftell (f);
		rewind (f);

		char *infile = (char*) mlt_pool_alloc(lSize);
		size=fread(infile,1,lSize,f);
		infile[size] = '\0';
		fclose(f);
		mlt_properties_set(properties, "_xmldata", infile);
		mlt_pool_release( infile );
	}
}
示例#9
0
文件: producer_kino.c 项目: Enlik/mlt
static int producer_get_frame( mlt_producer producer, mlt_frame_ptr frame, int index )
{
	producer_kino this = producer->child;
	uint8_t *data = mlt_pool_alloc( FRAME_SIZE_625_50 );
	
	// Obtain the current frame number
	uint64_t position = mlt_producer_frame( producer );
	
	// Create an empty frame
	*frame = mlt_frame_init( MLT_PRODUCER_SERVICE( producer ) );

	// Seek and fetch
	if ( kino_wrapper_get_frame( this->wrapper, data, position ) )
	{
		// Get the frames properties
		mlt_properties properties = MLT_FRAME_PROPERTIES( *frame );

		// Determine if we're PAL or NTSC
		int is_pal = kino_wrapper_is_pal( this->wrapper );

		// Pass the dv data
		mlt_properties_set_data( properties, "dv_data", data, FRAME_SIZE_625_50, ( mlt_destructor )mlt_pool_release, NULL );

		// Update other info on the frame
		mlt_properties_set_int( properties, "width", 720 );
		mlt_properties_set_int( properties, "height", is_pal ? 576 : 480 );
		mlt_properties_set_int( properties, "top_field_first", is_pal ? 0 : ( data[ 5 ] & 0x07 ) == 0 ? 0 : 1 );
	}
	else
	{
		mlt_pool_release( data );
	}

	// Update timecode on the frame we're creating
	mlt_frame_set_position( *frame, mlt_producer_position( producer ) );

	// Calculate the next timecode
	mlt_producer_prepare_next( producer );

	return 0;
}
示例#10
0
文件: vdpau.c 项目: aib/mlt
static void vdpau_producer_close( producer_avformat self )
{
	if ( self->vdpau )
	{
		mlt_log_debug( MLT_PRODUCER_SERVICE(self->parent), "vdpau_producer_close\n" );
		int i;
		for ( i = 0; i < MAX_VDPAU_SURFACES; i++ )
		{
			if ( self->vdpau->render_states[i].surface != VDP_INVALID_HANDLE )
				vdp_surface_destroy( self->vdpau->render_states[i].surface );
			self->vdpau->render_states[i].surface = VDP_INVALID_HANDLE;
		}

		mlt_deque_close( self->vdpau->deque );
		if ( self->vdpau->buffer )
			mlt_pool_release( self->vdpau->buffer );
		self->vdpau->buffer = NULL;

		vdpau_fini( self );
	}
}
示例#11
0
static void consumer_close( mlt_consumer consumer )
{
	mlt_properties consumer_properties = MLT_CONSUMER_PROPERTIES( consumer );
	avsync_stats* stats = mlt_properties_get_data( consumer_properties, "_stats", NULL );

	// Stop the consumer
	mlt_consumer_stop( consumer );

	// Close the file
	if( stats->out_file != stdout )
	{
		fclose( stats->out_file );
	}

	// Clean up memory
	mlt_pool_release( stats );

	// Close the parent
	mlt_consumer_close( consumer );

	// Free the memory
	free( consumer );
}
示例#12
0
void read_xml(mlt_properties properties)
{
	// Convert file name string encoding.
	const char *resource = mlt_properties_get( properties, "resource" );
	mlt_properties_set( properties, "_resource_utf8", resource );
	mlt_properties_from_utf8( properties, "_resource_utf8", "_resource_local8" );
	resource = mlt_properties_get( properties, "_resource_local8" );

	FILE *f = fopen( resource, "r" );
	if ( f != NULL )
	{
		int size = 0;
		long lSize;
 
		if ( fseek (f , 0 , SEEK_END) < 0 )
			goto error;
		lSize = ftell (f);
		if ( lSize <= 0 )
			goto error;
		rewind (f);

		char *infile = (char*) mlt_pool_alloc(lSize);
		if ( infile )
		{
			size = fread(infile,1,lSize,f);
			if ( size )
			{
				infile[size] = '\0';
				mlt_properties_set(properties, "_xmldata", infile);
			}
			mlt_pool_release( infile );
		}
error:
		fclose(f);
	}
}
示例#13
0
static void foreach_consumer_put( mlt_consumer consumer, mlt_frame frame )
{
    mlt_properties properties = MLT_CONSUMER_PROPERTIES( consumer );
    mlt_consumer nested = NULL;
    char key[30];
    int index = 0;

    do {
        snprintf( key, sizeof(key), "%d.consumer", index++ );
        nested = mlt_properties_get_data( properties, key, NULL );
        if ( nested )
        {
            mlt_properties nested_props = MLT_CONSUMER_PROPERTIES(nested);
            double self_fps = mlt_properties_get_double( properties, "fps" );
            double nested_fps = mlt_properties_get_double( nested_props, "fps" );
            mlt_position nested_pos = mlt_properties_get_position( nested_props, "_multi_position" );
            mlt_position self_pos = mlt_frame_get_position( frame );
            double self_time = self_pos / self_fps;
            double nested_time = nested_pos / nested_fps;

            // get the audio for the current frame
            uint8_t *buffer = NULL;
            mlt_audio_format format = mlt_audio_s16;
            int channels = mlt_properties_get_int( properties, "channels" );
            int frequency = mlt_properties_get_int( properties, "frequency" );
            int current_samples = mlt_sample_calculator( self_fps, frequency, self_pos );
            mlt_frame_get_audio( frame, (void**) &buffer, &format, &frequency, &channels, &current_samples );
            int current_size = mlt_audio_format_size( format, current_samples, channels );

            // get any leftover audio
            int prev_size = 0;
            uint8_t *prev_buffer = mlt_properties_get_data( nested_props, "_multi_audio", &prev_size );
            uint8_t *new_buffer = NULL;
            if ( prev_size > 0 )
            {
                new_buffer = mlt_pool_alloc( prev_size + current_size );
                memcpy( new_buffer, prev_buffer, prev_size );
                memcpy( new_buffer + prev_size, buffer, current_size );
                buffer = new_buffer;
            }
            current_size += prev_size;
            current_samples += mlt_properties_get_int( nested_props, "_multi_samples" );

            while ( nested_time <= self_time )
            {
                // put ideal number of samples into cloned frame
                int deeply = index > 1 ? 1 : 0;
                mlt_frame clone_frame = mlt_frame_clone( frame, deeply );
                int nested_samples = mlt_sample_calculator( nested_fps, frequency, nested_pos );
                // -10 is an optimization to avoid tiny amounts of leftover samples
                nested_samples = nested_samples > current_samples - 10 ? current_samples : nested_samples;
                int nested_size = mlt_audio_format_size( format, nested_samples, channels );
                if ( nested_size > 0 )
                {
                    prev_buffer = mlt_pool_alloc( nested_size );
                    memcpy( prev_buffer, buffer, nested_size );
                }
                else
                {
                    prev_buffer = NULL;
                    nested_size = 0;
                }
                mlt_frame_set_audio( clone_frame, prev_buffer, format, nested_size, mlt_pool_release );
                mlt_properties_set_int( MLT_FRAME_PROPERTIES(clone_frame), "audio_samples", nested_samples );
                mlt_properties_set_int( MLT_FRAME_PROPERTIES(clone_frame), "audio_frequency", frequency );
                mlt_properties_set_int( MLT_FRAME_PROPERTIES(clone_frame), "audio_channels", channels );

                // chomp the audio
                current_samples -= nested_samples;
                current_size -= nested_size;
                buffer += nested_size;

                // send frame to nested consumer
                mlt_consumer_put_frame( nested, clone_frame );
                mlt_properties_set_position( nested_props, "_multi_position", ++nested_pos );
                nested_time = nested_pos / nested_fps;
            }

            // save any remaining audio
            if ( current_size > 0 )
            {
                prev_buffer = mlt_pool_alloc( current_size );
                memcpy( prev_buffer, buffer, current_size );
            }
            else
            {
                prev_buffer = NULL;
                current_size = 0;
            }
            mlt_pool_release( new_buffer );
            mlt_properties_set_data( nested_props, "_multi_audio", prev_buffer, current_size, mlt_pool_release, NULL );
            mlt_properties_set_int( nested_props, "_multi_samples", current_samples );
        }
    } while ( nested );
}
示例#14
0
文件: filter_dust.c 项目: aib/mlt
static int filter_get_image( mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{
	mlt_filter filter = (mlt_filter) mlt_frame_pop_service( frame );
	mlt_properties properties = MLT_FILTER_PROPERTIES( filter );
	mlt_position pos = mlt_filter_get_position( filter, frame );
	mlt_position len = mlt_filter_get_length2( filter, frame );
	
	int maxdia = mlt_properties_anim_get_int( properties, "maxdiameter", pos, len );
	int maxcount = mlt_properties_anim_get_int( properties, "maxcount", pos, len );

	*format = mlt_image_yuv422;
	int error = mlt_frame_get_image( frame, image, format, width, height, 1 );

	// Load svg
	char *factory = mlt_properties_get( properties, "factory" );
	char temp[1204] = "";
	sprintf( temp, "%s/oldfilm/", mlt_environment( "MLT_DATA" ) );
	
	mlt_properties direntries = mlt_properties_new();
	mlt_properties_dir_list( direntries, temp,"dust*.svg",1 );
	
	if (!maxcount)
		return 0;

	double position = mlt_filter_get_progress( filter, frame );
	srand( position * 10000 );

	mlt_service_lock( MLT_FILTER_SERVICE( filter ) );

	int im = rand() % maxcount;
	int piccount = mlt_properties_count( direntries );
	while ( im-- && piccount )
	{
		int picnum = rand() % piccount;
		
		int y1 = rand() % *height;
		int x1 = rand() % *width;
		char resource[1024] = "";
		char savename[1024] = "", savename1[1024] = "", cachedy[100];
		int dx = ( *width * maxdia / 100);
		int luma_width, luma_height;
		uint8_t *luma_image = NULL;
		uint8_t *alpha = NULL;
		int updown = rand() % 2;
		int mirror = rand() % 2;
		
		sprintf( resource, "%s", mlt_properties_get_value(direntries,picnum) );
		sprintf( savename, "cache-%d-%d", picnum,dx );
		sprintf( savename1, "cache-alpha-%d-%d", picnum, dx );
		sprintf( cachedy, "cache-dy-%d-%d", picnum,dx );
		
		luma_image = mlt_properties_get_data( properties , savename , NULL );
		alpha = mlt_properties_get_data( properties , savename1 , NULL );
		
		if ( luma_image == NULL || alpha == NULL )
		{
			mlt_profile profile = mlt_service_profile( MLT_FILTER_SERVICE( filter ) );
			mlt_producer producer = mlt_factory_producer( profile, factory, resource );
		
			if ( producer != NULL )
			{
				mlt_properties producer_properties = MLT_PRODUCER_PROPERTIES( producer );
				
				mlt_properties_set( producer_properties, "eof", "loop" );
				mlt_frame luma_frame = NULL;
				
				if ( mlt_service_get_frame( MLT_PRODUCER_SERVICE( producer ), &luma_frame, 0 ) == 0 )
				{
					mlt_image_format luma_format = mlt_image_yuv422;
					luma_width = dx;
					luma_height = luma_width * mlt_properties_get_int( MLT_FRAME_PROPERTIES ( luma_frame ) , "height" ) / mlt_properties_get_int( MLT_FRAME_PROPERTIES ( luma_frame ) , "width" );

					mlt_properties_set( MLT_FRAME_PROPERTIES( luma_frame ), "rescale.interp", "best" );// none/nearest/tiles/hyper
					
					mlt_frame_get_image( luma_frame, &luma_image, &luma_format, &luma_width, &luma_height, 0 );
					alpha = mlt_frame_get_alpha_mask (luma_frame );
					
					uint8_t* savealpha = mlt_pool_alloc( luma_width * luma_height );
					uint8_t* savepic = mlt_pool_alloc( luma_width * luma_height * 2);
					
					if ( savealpha && savepic )
					{
						memcpy( savealpha, alpha , luma_width * luma_height );
						memcpy( savepic, luma_image , luma_width * luma_height * 2 );
						
						mlt_properties_set_data( properties, savename, savepic, luma_width * luma_height * 2, mlt_pool_release, NULL );
						mlt_properties_set_data( properties, savename1, savealpha, luma_width * luma_height,  mlt_pool_release, NULL );
						mlt_properties_set_int( properties, cachedy, luma_height );
						
						overlay_image( *image, *width, *height, luma_image, luma_width, luma_height, alpha, x1, y1, updown, mirror );
					}
					else
					{
						if ( savealpha )
							mlt_pool_release( savealpha );
						if ( savepic )
							mlt_pool_release( savepic );
					}
					mlt_frame_close( luma_frame );	
				}
				mlt_producer_close( producer );	
			}
		}
		else 
		{
			overlay_image ( *image, *width, *height, luma_image, dx, mlt_properties_get_int ( properties, cachedy ), alpha, x1, y1, updown, mirror );
		}
	}

	mlt_service_unlock( MLT_FILTER_SERVICE( filter ) );

	if (piccount>0 )
		return 0;
	if ( error == 0 && *image )
	{

		int h = *height;
		int w = *width;
		int im = rand() % maxcount;
		
		while ( im-- )
		{
			int type = im % 2;
			int y1 = rand() % h;
			int x1 = rand() % w;
			int dx = rand() % maxdia;
			int dy = rand() % maxdia;
			int x=0, y=0;
			double v = 0.0;
			for ( x = -dx ; x < dx ; x++ )
			{
				for ( y = -dy ; y < dy ; y++ ) 
				{
					if ( x1 + x < w && x1 + x > 0 && y1 + y < h && y1 + y > 0 ){
						uint8_t *pix = *image + (y+y1) * w * 2 + (x + x1) * 2;

						v=pow((double) x /(double)dx * 5.0, 2.0) + pow((double)y / (double)dy * 5.0, 2.0);
						if (v>10)
							v=10;
						v = 1.0 - ( v / 10.0 );

						switch(type)
						{
							case 0:
								*pix -= (*pix) * v;
								break;
							case 1:
								*pix += ( 255-*pix ) * v;
								break;
						}
					}
				}
			}
		}
	}

	return error;
}
示例#15
0
static int ladspa_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
{
	int error = 0;

	// Get the filter service
	mlt_filter filter = mlt_frame_pop_audio( frame );

	// Get the filter properties
	mlt_properties filter_properties = MLT_FILTER_PROPERTIES( filter );

	// Check if the channel configuration has changed
	int prev_channels = mlt_properties_get_int( filter_properties, "_prev_channels" );
	if ( prev_channels != *channels )
	{
		if( prev_channels )
		{
			mlt_log_info( MLT_FILTER_SERVICE(filter), "Channel configuration changed. Old: %d New: %d.\n", prev_channels, *channels );
			mlt_properties_set_data( filter_properties, "jackrack", NULL, 0, (mlt_destructor) NULL, NULL );
		}
		mlt_properties_set_int( filter_properties, "_prev_channels", *channels );
	}

	// Initialise LADSPA if needed
	jack_rack_t *jackrack = mlt_properties_get_data( filter_properties, "jackrack", NULL );
	if ( jackrack == NULL )
	{
		sample_rate = *frequency; // global inside jack_rack
		jackrack = initialise_jack_rack( filter_properties, *channels );
	}

	if ( jackrack && jackrack->procinfo && jackrack->procinfo->chain &&
		 mlt_properties_get_int64( filter_properties, "_pluginid" ) )
	{
		plugin_t *plugin = jackrack->procinfo->chain;
		LADSPA_Data value;
		int i, c;
		mlt_position position = mlt_filter_get_position( filter, frame );
		mlt_position length = mlt_filter_get_length2( filter, frame );

		// Get the producer's audio
		*format = mlt_audio_float;
		mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );

		// Resize the buffer if necessary.
		if ( *channels < jackrack->channels )
		{
			// Add extra channels to satisfy the plugin.
			// Extra channels in the buffer will be ignored by downstream services.
			int old_size = mlt_audio_format_size( *format, *samples, *channels );
			int new_size = mlt_audio_format_size( *format, *samples, jackrack->channels );
			uint8_t* new_buffer = mlt_pool_alloc( new_size );
			memcpy( new_buffer, *buffer, old_size );
			// Put silence in extra channels.
			memset( new_buffer + old_size, 0, new_size - old_size );
			mlt_frame_set_audio( frame, new_buffer, *format, new_size, mlt_pool_release );
			*buffer = new_buffer;
		}

		for ( i = 0; i < plugin->desc->control_port_count; i++ )
		{
			// Apply the control port values
			char key[20];
			value = plugin_desc_get_default_control_value( plugin->desc, i, sample_rate );
			snprintf( key, sizeof(key), "%d", i );
			if ( mlt_properties_get( filter_properties, key ) )
				value = mlt_properties_anim_get_double( filter_properties, key, position, length );
			for ( c = 0; c < plugin->copies; c++ )
				plugin->holders[c].control_memory[i] = value;
		}
		plugin->wet_dry_enabled = mlt_properties_get( filter_properties, "wetness" ) != NULL;
		if ( plugin->wet_dry_enabled )
		{
			value = mlt_properties_anim_get_double( filter_properties, "wetness", position, length );
			for ( c = 0; c < jackrack->channels; c++ )
				plugin->wet_dry_values[c] = value;
		}

		// Configure the buffers
		LADSPA_Data **input_buffers  = mlt_pool_alloc( sizeof( LADSPA_Data* ) * jackrack->channels );
		LADSPA_Data **output_buffers = mlt_pool_alloc( sizeof( LADSPA_Data* ) * jackrack->channels );
		
		// Some plugins crash with too many frames (samples).
		// So, feed the plugin with N samples per loop iteration.
		int samples_offset = 0;
		int sample_count = MIN(*samples, MAX_SAMPLE_COUNT);
		for (i = 0; samples_offset < *samples; i++) {
			int j = 0;
			for (; j < jackrack->channels; j++)
				output_buffers[j] = input_buffers[j] = (LADSPA_Data*) *buffer + j * (*samples) + samples_offset;
			sample_count = MIN(*samples - samples_offset, MAX_SAMPLE_COUNT);
			// Do LADSPA processing
			error = process_ladspa( jackrack->procinfo, sample_count, input_buffers, output_buffers );
			samples_offset += MAX_SAMPLE_COUNT;
		}

		mlt_pool_release( input_buffers );
		mlt_pool_release( output_buffers );

		// read the status port values
		for ( i = 0; i < plugin->desc->status_port_count; i++ )
		{
			char key[20];
			int p = plugin->desc->status_port_indicies[i];
			for ( c = 0; c < plugin->copies; c++ )
			{
				snprintf( key, sizeof(key), "%d[%d]", p, c );
				value = plugin->holders[c].status_memory[i];
				mlt_properties_set_double( filter_properties, key, value );
			}
		}
	}
	else
	{
		// Nothing to do.
		error = mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );
	}

	return error;
}
示例#16
0
static int ladspa_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
{
	int error = 0;

	// Get the filter service
	mlt_filter filter = mlt_frame_pop_audio( frame );

	// Get the filter properties
	mlt_properties filter_properties = MLT_FILTER_PROPERTIES( filter );

	// Initialise LADSPA if needed
	jack_rack_t *jackrack = mlt_properties_get_data( filter_properties, "jackrack", NULL );
	if ( jackrack == NULL )
	{
		sample_rate = *frequency; // global inside jack_rack
		jackrack = initialise_jack_rack( filter_properties, *channels );
	}

	if ( jackrack && jackrack->procinfo && jackrack->procinfo->chain &&
		 mlt_properties_get_int64( filter_properties, "_pluginid" ) )
	{
		plugin_t *plugin = jackrack->procinfo->chain;
		LADSPA_Data value;
		int i, c;
		mlt_position position = mlt_filter_get_position( filter, frame );
		mlt_position length = mlt_filter_get_length2( filter, frame );

		// Get the producer's audio
		*channels = jackrack->channels;
		*format = mlt_audio_float;
		mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );

		for ( i = 0; i < plugin->desc->control_port_count; i++ )
		{
			// Apply the control port values
			char key[20];
			value = plugin_desc_get_default_control_value( plugin->desc, i, sample_rate );
			snprintf( key, sizeof(key), "%d", i );
			if ( mlt_properties_get( filter_properties, key ) )
				value = mlt_properties_anim_get_double( filter_properties, key, position, length );
			for ( c = 0; c < plugin->copies; c++ )
				plugin->holders[c].control_memory[i] = value;
		}
		plugin->wet_dry_enabled = mlt_properties_get( filter_properties, "wetness" ) != NULL;
		if ( plugin->wet_dry_enabled )
		{
			value = mlt_properties_anim_get_double( filter_properties, "wetness", position, length );
			for ( c = 0; c < *channels; c++ )
				plugin->wet_dry_values[c] = value;
		}

		// Configure the buffers
		LADSPA_Data **input_buffers  = mlt_pool_alloc( sizeof( LADSPA_Data* ) * *channels );
		LADSPA_Data **output_buffers = mlt_pool_alloc( sizeof( LADSPA_Data* ) * *channels );

		for ( i = 0; i < *channels; i++ )
		{
			input_buffers[i]  = (LADSPA_Data*) *buffer + i * *samples;
			output_buffers[i] = (LADSPA_Data*) *buffer + i * *samples;
		}

		// Do LADSPA processing
		error = process_ladspa( jackrack->procinfo, *samples, input_buffers, output_buffers );

		mlt_pool_release( input_buffers );
		mlt_pool_release( output_buffers );

		// read the status port values
		for ( i = 0; i < plugin->desc->status_port_count; i++ )
		{
			char key[20];
			int p = plugin->desc->status_port_indicies[i];
			for ( c = 0; c < plugin->copies; c++ )
			{
				snprintf( key, sizeof(key), "%d[%d]", p, c );
				value = plugin->holders[c].status_memory[i];
				mlt_properties_set_double( filter_properties, key, value );
			}
		}
	}
	else
	{
		// Nothing to do.
		error = mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );
	}

	return error;
}
示例#17
0
    virtual HRESULT STDMETHODCALLTYPE VideoInputFrameArrived(
        IDeckLinkVideoInputFrame* video,
        IDeckLinkAudioInputPacket* audio )
    {
        if ( mlt_properties_get_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "preview" ) &&
                mlt_producer_get_speed( getProducer() ) == 0.0 && !mlt_deque_count( m_queue ))
        {
            pthread_cond_broadcast( &m_condition );
            return S_OK;
        }

        // Create mlt_frame
        mlt_frame frame = mlt_frame_init( MLT_PRODUCER_SERVICE( getProducer() ) );

        // Copy video
        if ( video )
        {
            if ( !( video->GetFlags() & bmdFrameHasNoInputSource ) )
            {
                int size = video->GetRowBytes() * ( video->GetHeight() + m_vancLines );
                void* image = mlt_pool_alloc( size );
                void* buffer = 0;
                unsigned char* p = (unsigned char*) image;
                int n = size / 2;
                \
                // Initialize VANC lines to nominal black
                while ( --n )
                {
                    *p ++ = 16;
                    *p ++ = 128;
                }

                // Capture VANC
                if ( m_vancLines > 0 )
                {
                    IDeckLinkVideoFrameAncillary* vanc = 0;
                    if ( video->GetAncillaryData( &vanc ) == S_OK && vanc )
                    {
                        for ( int i = 1; i < m_vancLines + 1; i++ )
                        {
                            if ( vanc->GetBufferForVerticalBlankingLine( i, &buffer ) == S_OK )
                                swab( (char*) buffer, (char*) image + ( i - 1 ) * video->GetRowBytes(), video->GetRowBytes() );
                            else
                                mlt_log_debug( getProducer(), "failed capture vanc line %d\n", i );
                        }
                        SAFE_RELEASE(vanc);
                    }
                }

                // Capture image
                video->GetBytes( &buffer );
                if ( image && buffer )
                {
                    size =  video->GetRowBytes() * video->GetHeight();
                    swab( (char*) buffer, (char*) image + m_vancLines * video->GetRowBytes(), size );
                    mlt_frame_set_image( frame, (uint8_t*) image, size, mlt_pool_release );
                }
                else if ( image )
                {
                    mlt_log_verbose( getProducer(), "no video\n" );
                    mlt_pool_release( image );
                }
            }
            else
            {
                mlt_log_verbose( getProducer(), "no signal\n" );
                mlt_frame_close( frame );
                frame = 0;
            }

            // Get timecode
            IDeckLinkTimecode* timecode = 0;
            if ( video->GetTimecode( bmdTimecodeVITC, &timecode ) == S_OK && timecode )
            {
                DLString timecodeString = 0;

                if ( timecode->GetString( &timecodeString ) == S_OK )
                {
                    char* s = getCString( timecodeString );
                    mlt_properties_set( MLT_FRAME_PROPERTIES( frame ), "meta.attr.vitc.markup", s );
                    mlt_log_debug( getProducer(), "timecode %s\n", s );
                    freeCString( s );
                }
                freeDLString( timecodeString );
                SAFE_RELEASE( timecode );
            }
        }
        else
        {
            mlt_log_verbose( getProducer(), "no video\n" );
            mlt_frame_close( frame );
            frame = 0;
        }

        // Copy audio
        if ( frame && audio )
        {
            int channels = mlt_properties_get_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "channels" );
            int size = audio->GetSampleFrameCount() * channels * sizeof(int16_t);
            mlt_audio_format format = mlt_audio_s16;
            void* pcm = mlt_pool_alloc( size );
            void* buffer = 0;

            audio->GetBytes( &buffer );
            if ( buffer )
            {
                memcpy( pcm, buffer, size );
                mlt_frame_set_audio( frame, pcm, format, size, mlt_pool_release );
                mlt_properties_set_int( MLT_FRAME_PROPERTIES(frame), "audio_samples", audio->GetSampleFrameCount() );
            }
            else
            {
                mlt_log_verbose( getProducer(), "no audio\n" );
                mlt_pool_release( pcm );
            }
        }
        else
        {
            mlt_log_verbose( getProducer(), "no audio\n" );
        }

        // Put frame in queue
        if ( frame )
        {
            int queueMax = mlt_properties_get_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "buffer" );
            pthread_mutex_lock( &m_mutex );
            if ( mlt_deque_count( m_queue ) < queueMax )
            {
                mlt_deque_push_back( m_queue, frame );
                pthread_cond_broadcast( &m_condition );
            }
            else
            {
                mlt_frame_close( frame );
                mlt_properties_set_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "dropped", ++m_dropped );
                mlt_log_warning( getProducer(), "frame dropped %d\n", m_dropped );
            }
            pthread_mutex_unlock( &m_mutex );
        }

        return S_OK;
    }