示例#1
0
static void *consumer_thread( void *arg )
{
    mlt_consumer consumer = arg;
    mlt_properties properties = MLT_CONSUMER_PROPERTIES( consumer );
    mlt_frame frame = NULL;

    // Determine whether to stop at end-of-media
    int terminate_on_pause = mlt_properties_get_int( properties, "terminate_on_pause" );
    int terminated = 0;

    // Loop while running
    while ( !terminated && !is_stopped( consumer ) )
    {
        // Get the next frame
        frame = mlt_consumer_rt_frame( consumer );

        // Check for termination
        if ( terminate_on_pause && frame )
            terminated = mlt_properties_get_double( MLT_FRAME_PROPERTIES( frame ), "_speed" ) == 0.0;

        // Check that we have a frame to work with
        if ( frame && !terminated && !is_stopped( consumer ) )
        {
            if ( mlt_properties_get_int( MLT_FRAME_PROPERTIES(frame), "rendered" ) )
            {
                if ( mlt_properties_get_int( MLT_FRAME_PROPERTIES(frame), "_speed" ) == 0 )
                    foreach_consumer_refresh( consumer );
                foreach_consumer_put( consumer, frame );
            }
            else
            {
                int dropped = mlt_properties_get_int( properties, "_dropped" );
                mlt_log_info( MLT_CONSUMER_SERVICE(consumer), "dropped frame %d\n", ++dropped );
                mlt_properties_set_int( properties, "_dropped", dropped );
            }
            mlt_frame_close( frame );
        }
        else
        {
            if ( frame && terminated )
            {
                // Send this termination frame to nested consumers for their cancellation
                foreach_consumer_put( consumer, frame );
            }
            if ( frame )
                mlt_frame_close( frame );
            terminated = 1;
        }
    }

    // Indicate that the consumer is stopped
    mlt_consumer_stopped( consumer );

    return NULL;
}
示例#2
0
static void analyze_image( mlt_filter filter, mlt_frame frame, uint8_t* vs_image, VSPixelFormat vs_format, int width, int height )
{
	mlt_properties properties = MLT_FILTER_PROPERTIES( filter );
	vs_data* data = (vs_data*)filter->child;
	mlt_position pos = mlt_filter_get_position( filter, frame );

	// If any frames are skipped, analysis data will be incomplete.
	if( data->analyze_data && pos != data->analyze_data->last_position + 1 )
	{
		mlt_log_error( MLT_FILTER_SERVICE(filter), "Bad frame sequence\n" );
		destory_analyze_data( data->analyze_data );
		data->analyze_data = NULL;
	}

	if ( !data->analyze_data && pos == 0 )
	{
		// Analysis must start on the first frame
		init_analyze_data( filter, frame, vs_format, width, height );
	}

	if( data->analyze_data )
	{
		// Initialize the VSFrame to be analyzed.
		VSMotionDetect* md = &data->analyze_data->md;
		LocalMotions localmotions;
		VSFrame vsFrame;
		vsFrameFillFromBuffer( &vsFrame, vs_image, &md->fi );

		// Detect and save motions.
		if( vsMotionDetection( md, &localmotions, &vsFrame ) == VS_OK )
		{
			vsWriteToFile( md, data->analyze_data->results, &localmotions);
			vs_vector_del( &localmotions );
		}
		else
		{
			mlt_log_error( MLT_FILTER_SERVICE(filter), "Motion detection failed\n" );
			destory_analyze_data( data->analyze_data );
			data->analyze_data = NULL;
		}

		// Publish the motions if this is the last frame.
		if ( pos + 1 == mlt_filter_get_length2( filter, frame ) )
		{
			mlt_log_info( MLT_FILTER_SERVICE(filter), "Analysis complete\n" );
			destory_analyze_data( data->analyze_data );
			data->analyze_data = NULL;
			mlt_properties_set( properties, "results", mlt_properties_get( properties, "filename" ) );
		}
		else
		{
			data->analyze_data->last_position = pos;
		}
	}
}
示例#3
0
static int filter_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
{
	mlt_filter filter = mlt_frame_pop_audio( frame );
	private_data* pdata = (private_data*)filter->child;
	mlt_position o_pos = mlt_frame_original_position( frame );

	// Get the producer's audio
	*format = mlt_audio_f32le;
	mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );

	mlt_service_lock( MLT_FILTER_SERVICE( filter ) );

	if( abs( o_pos - pdata->prev_o_pos ) > 1 )
	{
		// Assume this is a new clip and restart
		// Use original position so that transitions between clips are detected.
		pdata->reset = 1;
		mlt_log_info( MLT_FILTER_SERVICE( filter ), "Reset. Old Pos: %d\tNew Pos: %d\n", pdata->prev_o_pos, o_pos );
	}

	check_for_reset( filter, *channels, *frequency );

	if( o_pos != pdata->prev_o_pos )
	{
		// Only analyze the audio is the producer is not paused.
		analyze_audio( filter, *buffer, *samples, *frequency );
	}

	double start_coeff = pdata->start_gain > -90.0 ? pow(10.0, pdata->start_gain / 20.0) : 0.0;
	double end_coeff = pdata->end_gain > -90.0 ? pow(10.0, pdata->end_gain / 20.0) : 0.0;
	double coeff_factor = pow( (end_coeff / start_coeff), 1.0 / (double)*samples );
	double coeff = start_coeff;
	float* p = *buffer;
	int s = 0;
	int c = 0;
	for( s = 0; s < *samples; s++ )
	{
		coeff = coeff * coeff_factor;
		for ( c = 0; c < *channels; c++ )
		{
			*p = *p * coeff;
			p++;
		}
	}

	pdata->prev_o_pos = o_pos;

	mlt_service_unlock( MLT_FILTER_SERVICE( filter ) );

	return 0;
}
示例#4
0
文件: jack_rack.c 项目: mcfrisk/mlt
plugin_t *
jack_rack_instantiate_plugin (jack_rack_t * jack_rack, plugin_desc_t * desc)
{
  plugin_t * plugin;
  
  /* check whether or not the plugin is RT capable and confirm with the user if it isn't */
  if (!LADSPA_IS_HARD_RT_CAPABLE(desc->properties)) {
    mlt_log_info( NULL, "Plugin not RT capable. The plugin '%s' does not describe itself as being capable of real-time operation. You may experience drop outs or jack may even kick us out if you use it.\n",
               desc->name);
  }

  /* create the plugin */
  plugin = plugin_new (desc, jack_rack);

  if (!plugin) {
   mlt_log_error( NULL, "Error loading file plugin '%s' from file '%s'\n",
               desc->name, desc->object_file);
  }
  
  return plugin;
}
示例#5
0
static void *video_thread( void *arg )
{
	// Identify the arg
	consumer_sdl self = arg;

	// Obtain time of thread start
	struct timeval now;
	int64_t start = 0;
	int64_t elapsed = 0;
	struct timespec tm;
	mlt_frame next = NULL;
	mlt_properties properties = NULL;
	double speed = 0;

	// Get real time flag
	int real_time = mlt_properties_get_int( self->properties, "real_time" );

#if !defined(__APPLE__) && !defined(_WIN32)
	if ( setup_sdl_video(self) )
		self->running = 0;
#endif

	// Determine start time
	gettimeofday( &now, NULL );
	start = ( int64_t )now.tv_sec * 1000000 + now.tv_usec;

	while ( self->running )
	{
		// Pop the next frame
		pthread_mutex_lock( &self->video_mutex );
		next = mlt_deque_pop_front( self->queue );
		while ( next == NULL && self->running )
		{
			pthread_cond_wait( &self->video_cond, &self->video_mutex );
			next = mlt_deque_pop_front( self->queue );
		}
		pthread_mutex_unlock( &self->video_mutex );

		if ( !self->running || next == NULL ) break;

		// Get the properties
		properties =  MLT_FRAME_PROPERTIES( next );

		// Get the speed of the frame
		speed = mlt_properties_get_double( properties, "_speed" );

		// Get the current time
		gettimeofday( &now, NULL );

		// Get the elapsed time
		elapsed = ( ( int64_t )now.tv_sec * 1000000 + now.tv_usec ) - start;

		// See if we have to delay the display of the current frame
		if ( mlt_properties_get_int( properties, "rendered" ) == 1 && self->running )
		{
			// Obtain the scheduled playout time
			int64_t scheduled = mlt_properties_get_int( properties, "playtime" );

			// Determine the difference between the elapsed time and the scheduled playout time
			int64_t difference = scheduled - elapsed;

			// Smooth playback a bit
			if ( real_time && ( difference > 20000 && speed == 1.0 ) )
			{
				tm.tv_sec = difference / 1000000;
				tm.tv_nsec = ( difference % 1000000 ) * 500;
				nanosleep( &tm, NULL );
			}

			// Show current frame if not too old
			if ( !real_time || ( difference > -10000 || speed != 1.0 || mlt_deque_count( self->queue ) < 2 ) )
				consumer_play_video( self, next );

			// If the queue is empty, recalculate start to allow build up again
			if ( real_time && ( mlt_deque_count( self->queue ) == 0 && speed == 1.0 ) )
			{
				gettimeofday( &now, NULL );
				start = ( ( int64_t )now.tv_sec * 1000000 + now.tv_usec ) - scheduled + 20000;
			}
		}
		else
		{
			static int dropped = 0;
			mlt_log_info( MLT_CONSUMER_SERVICE(&self->parent), "dropped video frame %d\n", ++dropped );
		}

		// This frame can now be closed
		mlt_frame_close( next );
		next = NULL;
	}

	if ( next != NULL )
		mlt_frame_close( next );

	mlt_consumer_stopped( &self->parent );

	return NULL;
}
示例#6
0
文件: vdpau.c 项目: aib/mlt
static int vdpau_decoder_init( producer_avformat self )
{
	mlt_log_debug( MLT_PRODUCER_SERVICE(self->parent), "vdpau_decoder_init\n" );
	int success = 1;
	
	self->video_codec->opaque = self;
	self->video_codec->get_format = vdpau_get_format;
	self->video_codec->get_buffer = vdpau_get_buffer;
	self->video_codec->release_buffer = vdpau_release_buffer;
	self->video_codec->draw_horiz_band = vdpau_draw_horiz;
	self->video_codec->slice_flags = SLICE_FLAG_CODED_ORDER | SLICE_FLAG_ALLOW_FIELD;
	self->video_codec->pix_fmt = PIX_FMT_VDPAU_H264;
	
	VdpDecoderProfile profile = VDP_DECODER_PROFILE_H264_HIGH;
	uint32_t max_references = self->video_codec->refs;
	pthread_mutex_lock( &mlt_sdl_mutex );
	VdpStatus status = vdp_decoder_create( self->vdpau->device,
		profile, self->video_codec->width, self->video_codec->height, max_references, &self->vdpau->decoder );
	pthread_mutex_unlock( &mlt_sdl_mutex );
	
	if ( status == VDP_STATUS_OK )
	{
			int i, n = FFMIN( self->video_codec->refs + 2, MAX_VDPAU_SURFACES );

			self->vdpau->deque = mlt_deque_init();
			for ( i = 0; i < n; i++ )
			{
				if ( VDP_STATUS_OK == vdp_surface_create( self->vdpau->device, VDP_CHROMA_TYPE_420,
					self->video_codec->width, self->video_codec->height, &self->vdpau->render_states[i].surface ) )
				{
					mlt_log_debug( MLT_PRODUCER_SERVICE(self->parent), "successfully created VDPAU surface %x\n",
						self->vdpau->render_states[i].surface );
					mlt_deque_push_back( self->vdpau->deque, &self->vdpau->render_states[i] );
				}
				else
				{
					mlt_log_info( MLT_PRODUCER_SERVICE(self->parent), "failed to create VDPAU surface %dx%d\n",
						self->video_codec->width, self->video_codec->height );
					while ( mlt_deque_count( self->vdpau->deque ) )
					{
						struct vdpau_render_state *render = mlt_deque_pop_front( self->vdpau->deque );
						vdp_surface_destroy( render->surface );
					}
					mlt_deque_close( self->vdpau->deque );
					success = 0;
					break;
				}
			}
			if ( self->vdpau )
				self->vdpau->b_age = self->vdpau->ip_age[0] = self->vdpau->ip_age[1] = 256*256*256*64; // magic from Avidemux
	}
	else
	{
		success = 0;
		self->vdpau->decoder = VDP_INVALID_HANDLE;
		mlt_log_error( MLT_PRODUCER_SERVICE(self->parent), "VDPAU failed to initialize decoder (%s)\n",
			vdp_get_error_string( status ) );
	}
	
	return success;
}
示例#7
0
static int filter_get_image( mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable )
{
	int error = 0;
	mlt_filter filter = (mlt_filter)mlt_frame_pop_service( frame );
	mlt_properties filter_properties = MLT_FILTER_PROPERTIES( filter );
	char* rect_str = mlt_properties_get( filter_properties, "rect" );
	if ( !rect_str )
	{
		mlt_log_warning( MLT_FILTER_SERVICE(filter), "rect property not set\n" );
		return mlt_frame_get_image( frame, image, format, width, height, writable );
	}
	mlt_profile profile = mlt_service_profile( MLT_FILTER_SERVICE( filter ) );
	mlt_position position = mlt_filter_get_position( filter, frame );
	mlt_position length = mlt_filter_get_length2( filter, frame );
	mlt_rect rect = mlt_properties_anim_get_rect( filter_properties, "rect", position, length );
	if ( strchr( rect_str, '%' ) )
	{
		rect.x *= profile->width;
		rect.w *= profile->width;
		rect.y *= profile->height;
		rect.h *= profile->height;
	}
	rect = constrain_rect( rect, profile->width, profile->height );
	if ( rect.w < 1 || rect.h < 1 )
	{
		mlt_log_info( MLT_FILTER_SERVICE(filter), "rect invalid\n" );
		return mlt_frame_get_image( frame, image, format, width, height, writable );
	}

	switch( *format )
	{
		case mlt_image_rgb24a:
		case mlt_image_rgb24:
		case mlt_image_yuv422:
		case mlt_image_yuv420p:
			// These formats are all supported
			break;
		default:
			*format = mlt_image_rgb24a;
			break;
	}
	error = mlt_frame_get_image( frame, image, format, width, height, 1 );
	if (error) return error;

	int i;
	switch( *format )
	{
		case mlt_image_rgb24a:
			for ( i = 0; i < 4; i++ )
			{
				remove_spot_channel( *image + i, *width, 4, rect );
			}
			break;
		case mlt_image_rgb24:
			for ( i = 0; i < 3; i++ )
			{
				remove_spot_channel( *image + i, *width, 3, rect );
			}
			break;
		case mlt_image_yuv422:
			// Y
			remove_spot_channel( *image, *width, 2, rect );
			// U
			remove_spot_channel( *image + 1, *width / 2, 4,
								 constrain_rect( scale_rect( rect, 2, 1 ), *width / 2, *height ) );
			// V
			remove_spot_channel( *image + 3, *width / 2, 4,
								 constrain_rect( scale_rect( rect, 2, 1 ), *width / 2, *height ) );
			break;
		case mlt_image_yuv420p:
			// Y
			remove_spot_channel( *image, *width, 1, rect );
			// U
			remove_spot_channel( *image + (*width * *height), *width / 2, 1,
								 constrain_rect( scale_rect( rect, 2, 2 ), *width / 2, *height / 2 ) );
			// V
			remove_spot_channel( *image + (*width * *height * 5 / 4), *width / 2, 1,
								 constrain_rect( scale_rect( rect, 2, 2 ), *width / 2, *height / 2 ) );
			break;
		default:
			return 1;
	}

	uint8_t *alpha = mlt_frame_get_alpha( frame );
	if ( alpha && *format != mlt_image_rgb24a )
	{
		remove_spot_channel( alpha, *width, 1, rect );
	}

	return error;
}
示例#8
0
static int ladspa_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
{
	int error = 0;

	// Get the filter service
	mlt_filter filter = mlt_frame_pop_audio( frame );

	// Get the filter properties
	mlt_properties filter_properties = MLT_FILTER_PROPERTIES( filter );

	// Check if the channel configuration has changed
	int prev_channels = mlt_properties_get_int( filter_properties, "_prev_channels" );
	if ( prev_channels != *channels )
	{
		if( prev_channels )
		{
			mlt_log_info( MLT_FILTER_SERVICE(filter), "Channel configuration changed. Old: %d New: %d.\n", prev_channels, *channels );
			mlt_properties_set_data( filter_properties, "jackrack", NULL, 0, (mlt_destructor) NULL, NULL );
		}
		mlt_properties_set_int( filter_properties, "_prev_channels", *channels );
	}

	// Initialise LADSPA if needed
	jack_rack_t *jackrack = mlt_properties_get_data( filter_properties, "jackrack", NULL );
	if ( jackrack == NULL )
	{
		sample_rate = *frequency; // global inside jack_rack
		jackrack = initialise_jack_rack( filter_properties, *channels );
	}

	if ( jackrack && jackrack->procinfo && jackrack->procinfo->chain &&
		 mlt_properties_get_int64( filter_properties, "_pluginid" ) )
	{
		plugin_t *plugin = jackrack->procinfo->chain;
		LADSPA_Data value;
		int i, c;
		mlt_position position = mlt_filter_get_position( filter, frame );
		mlt_position length = mlt_filter_get_length2( filter, frame );

		// Get the producer's audio
		*format = mlt_audio_float;
		mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );

		// Resize the buffer if necessary.
		if ( *channels < jackrack->channels )
		{
			// Add extra channels to satisfy the plugin.
			// Extra channels in the buffer will be ignored by downstream services.
			int old_size = mlt_audio_format_size( *format, *samples, *channels );
			int new_size = mlt_audio_format_size( *format, *samples, jackrack->channels );
			uint8_t* new_buffer = mlt_pool_alloc( new_size );
			memcpy( new_buffer, *buffer, old_size );
			// Put silence in extra channels.
			memset( new_buffer + old_size, 0, new_size - old_size );
			mlt_frame_set_audio( frame, new_buffer, *format, new_size, mlt_pool_release );
			*buffer = new_buffer;
		}

		for ( i = 0; i < plugin->desc->control_port_count; i++ )
		{
			// Apply the control port values
			char key[20];
			value = plugin_desc_get_default_control_value( plugin->desc, i, sample_rate );
			snprintf( key, sizeof(key), "%d", i );
			if ( mlt_properties_get( filter_properties, key ) )
				value = mlt_properties_anim_get_double( filter_properties, key, position, length );
			for ( c = 0; c < plugin->copies; c++ )
				plugin->holders[c].control_memory[i] = value;
		}
		plugin->wet_dry_enabled = mlt_properties_get( filter_properties, "wetness" ) != NULL;
		if ( plugin->wet_dry_enabled )
		{
			value = mlt_properties_anim_get_double( filter_properties, "wetness", position, length );
			for ( c = 0; c < jackrack->channels; c++ )
				plugin->wet_dry_values[c] = value;
		}

		// Configure the buffers
		LADSPA_Data **input_buffers  = mlt_pool_alloc( sizeof( LADSPA_Data* ) * jackrack->channels );
		LADSPA_Data **output_buffers = mlt_pool_alloc( sizeof( LADSPA_Data* ) * jackrack->channels );
		
		// Some plugins crash with too many frames (samples).
		// So, feed the plugin with N samples per loop iteration.
		int samples_offset = 0;
		int sample_count = MIN(*samples, MAX_SAMPLE_COUNT);
		for (i = 0; samples_offset < *samples; i++) {
			int j = 0;
			for (; j < jackrack->channels; j++)
				output_buffers[j] = input_buffers[j] = (LADSPA_Data*) *buffer + j * (*samples) + samples_offset;
			sample_count = MIN(*samples - samples_offset, MAX_SAMPLE_COUNT);
			// Do LADSPA processing
			error = process_ladspa( jackrack->procinfo, sample_count, input_buffers, output_buffers );
			samples_offset += MAX_SAMPLE_COUNT;
		}

		mlt_pool_release( input_buffers );
		mlt_pool_release( output_buffers );

		// read the status port values
		for ( i = 0; i < plugin->desc->status_port_count; i++ )
		{
			char key[20];
			int p = plugin->desc->status_port_indicies[i];
			for ( c = 0; c < plugin->copies; c++ )
			{
				snprintf( key, sizeof(key), "%d[%d]", p, c );
				value = plugin->holders[c].status_memory[i];
				mlt_properties_set_double( filter_properties, key, value );
			}
		}
	}
	else
	{
		// Nothing to do.
		error = mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );
	}

	return error;
}
示例#9
0
    bool start( mlt_profile profile = 0 )
    {
        if ( m_started )
            return false;
        try
        {
            // Initialize some members
            m_vancLines = mlt_properties_get_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "vanc" );
            if ( m_vancLines == -1 )
                m_vancLines = profile->height <= 512 ? 26 : 32;

            if ( !profile )
                profile = mlt_service_profile( MLT_PRODUCER_SERVICE( getProducer() ) );

            // Get the display mode
            BMDDisplayMode displayMode = getDisplayMode( profile, m_vancLines );
            if ( displayMode == (BMDDisplayMode) bmdDisplayModeNotSupported )
            {
                mlt_log_info( getProducer(), "profile = %dx%d %f fps %s\n", profile->width, profile->height,
                              mlt_profile_fps( profile ), profile->progressive? "progressive" : "interlace" );
                throw "Profile is not compatible with decklink.";
            }

            // Determine if supports input format detection
#ifdef WIN32
            BOOL doesDetectFormat = FALSE;
#else
            bool doesDetectFormat = false;
#endif
            IDeckLinkAttributes *decklinkAttributes = 0;
            if ( m_decklink->QueryInterface( IID_IDeckLinkAttributes, (void**) &decklinkAttributes ) == S_OK )
            {
                if ( decklinkAttributes->GetFlag( BMDDeckLinkSupportsInputFormatDetection, &doesDetectFormat ) != S_OK )
                    doesDetectFormat = false;
                SAFE_RELEASE( decklinkAttributes );
            }
            mlt_log_verbose( getProducer(), "%s format detection\n", doesDetectFormat ? "supports" : "does not support" );

            // Enable video capture
            BMDPixelFormat pixelFormat = bmdFormat8BitYUV;
            BMDVideoInputFlags flags = doesDetectFormat ? bmdVideoInputEnableFormatDetection : bmdVideoInputFlagDefault;
            if ( S_OK != m_decklinkInput->EnableVideoInput( displayMode, pixelFormat, flags ) )
                throw "Failed to enable video capture.";

            // Enable audio capture
            BMDAudioSampleRate sampleRate = bmdAudioSampleRate48kHz;
            BMDAudioSampleType sampleType = bmdAudioSampleType16bitInteger;
            int channels = mlt_properties_get_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "channels" );
            if ( S_OK != m_decklinkInput->EnableAudioInput( sampleRate, sampleType, channels ) )
                throw "Failed to enable audio capture.";

            // Start capture
            m_dropped = 0;
            mlt_properties_set_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "dropped", m_dropped );
            m_started = m_decklinkInput->StartStreams() == S_OK;
            if ( !m_started )
                throw "Failed to start capture.";
        }
        catch ( const char *error )
        {
            m_decklinkInput->DisableVideoInput();
            mlt_log_error( getProducer(), "%s\n", error );
            return false;
        }
        return true;
    }
示例#10
0
static void init_apply_data( mlt_filter filter, mlt_frame frame, VSPixelFormat vs_format, int width, int height )
{
	mlt_properties properties = MLT_FILTER_PROPERTIES( filter );
	vs_data* data = (vs_data*)filter->child;
	vs_apply* apply_data = (vs_apply*)calloc( 1, sizeof(vs_apply) );
	char* filename = mlt_properties_get( properties, "results" );
	memset( apply_data, 0, sizeof( vs_apply ) );

	mlt_log_info( MLT_FILTER_SERVICE(filter), "Load results from %s\n", filename );

	// Initialize the VSTransformConfig
	get_transform_config( &apply_data->conf, filter, frame );

	// Initialize VSTransformData
	VSFrameInfo fi_src, fi_dst;
	vsFrameInfoInit( &fi_src, width, height, vs_format );
	vsFrameInfoInit( &fi_dst, width, height, vs_format );
	vsTransformDataInit( &apply_data->td, &apply_data->conf, &fi_src, &fi_dst );

	// Initialize VSTransformations
	vsTransformationsInit( &apply_data->trans );

	// Convert file name string encoding.
	mlt_properties_from_utf8( properties, "results", "_results" );
	filename = mlt_properties_get( properties, "_results" );

	// Load the motions from the analyze step and convert them to VSTransformations
	FILE* f = fopen( filename, "r" );
	VSManyLocalMotions mlms;

	if( vsReadLocalMotionsFile( f, &mlms ) == VS_OK )
	{
		int i = 0;
		mlt_log_info( MLT_FILTER_SERVICE(filter), "Successfully loaded %d motions\n", vs_vector_size( &mlms ) );
		vsLocalmotions2Transforms( &apply_data->td, &mlms, &apply_data->trans );
		vsPreprocessTransforms( &apply_data->td, &apply_data->trans );

		// Free the MultipleLocalMotions
		for( i = 0; i < vs_vector_size( &mlms ); i++ )
		{
			LocalMotions* lms = (LocalMotions*)vs_vector_get( &mlms, i );
			if( lms )
			{
				vs_vector_del( lms );
			}
		}
		vs_vector_del( &mlms );

		data->apply_data = apply_data;
	}
	else
	{
		mlt_log_error( MLT_FILTER_SERVICE(filter), "Can not read results file: %s\n", filename );
		destory_apply_data( apply_data );
		data->apply_data = NULL;
	}

	if( f )
	{
		fclose( f );
	}
}
示例#11
0
static int consumer_play_audio( consumer_sdl self, mlt_frame frame, int init_audio, int *duration )
{
	// Get the properties of self consumer
	mlt_properties properties = self->properties;
	mlt_audio_format afmt = mlt_audio_s16;

	// Set the preferred params of the test card signal
	int channels = mlt_properties_get_int( properties, "channels" );
	int frequency = mlt_properties_get_int( properties, "frequency" );
	int scrub = mlt_properties_get_int( properties, "scrub_audio" );
	static int counter = 0;

	int samples = mlt_sample_calculator( mlt_properties_get_double( self->properties, "fps" ), frequency, counter++ );
	int16_t *pcm;
	mlt_frame_get_audio( frame, (void**) &pcm, &afmt, &frequency, &channels, &samples );
	*duration = ( ( samples * 1000 ) / frequency );
	pcm += mlt_properties_get_int( properties, "audio_offset" );

	if ( mlt_properties_get_int( properties, "audio_off" ) )
	{
		self->playing = 1;
		init_audio = 1;
		return init_audio;
	}

	if ( init_audio == 1 )
	{
		SDL_AudioSpec request;
		SDL_AudioSpec got;
		SDL_AudioDeviceID dev;
		int audio_buffer = mlt_properties_get_int( properties, "audio_buffer" );

		// specify audio format
		memset( &request, 0, sizeof( SDL_AudioSpec ) );
		self->playing = 0;
		request.freq = frequency;
		request.format = AUDIO_S16SYS;
		request.channels = mlt_properties_get_int( properties, "channels" );
		request.samples = audio_buffer;
		request.callback = sdl_fill_audio;
		request.userdata = (void *)self;

		dev = sdl2_open_audio( &request, &got );
		if( dev == 0 )
		{
			mlt_log_error( MLT_CONSUMER_SERVICE( self ), "SDL failed to open audio\n" );
			init_audio = 2;
		}
		else
		{
			if( got.channels != request.channels )
			{
				mlt_log_info( MLT_CONSUMER_SERVICE( self ), "Unable to output %d channels. Change to %d\n", request.channels, got.channels );
			}
			mlt_log_info( MLT_CONSUMER_SERVICE( self ), "Audio Opened: driver=%s channels=%d frequency=%d\n", SDL_GetCurrentAudioDriver(), got.channels, got.freq );
			SDL_PauseAudioDevice( dev, 0 );
			init_audio = 0;
			self->out_channels = got.channels;
		}
	}

	if ( init_audio == 0 )
	{
		mlt_properties properties = MLT_FRAME_PROPERTIES( frame );
		int samples_copied = 0;
		int dst_stride = self->out_channels * sizeof( *pcm );

		pthread_mutex_lock( &self->audio_mutex );

		while ( self->running && samples_copied < samples )
		{
			int sample_space = ( sizeof( self->audio_buffer ) - self->audio_avail ) / dst_stride;
			while ( self->running && sample_space == 0 )
			{
				pthread_cond_wait( &self->audio_cond, &self->audio_mutex );
				sample_space = ( sizeof( self->audio_buffer ) - self->audio_avail ) / dst_stride;
			}
			if ( self->running )
			{
				int samples_to_copy = samples - samples_copied;
				if ( samples_to_copy > sample_space )
				{
					samples_to_copy = sample_space;
				}
				int dst_bytes = samples_to_copy * dst_stride;

				if ( scrub || mlt_properties_get_double( properties, "_speed" ) == 1 )
				{
					if ( channels == self->out_channels )
					{
						memcpy( &self->audio_buffer[ self->audio_avail ], pcm, dst_bytes );
						pcm += samples_to_copy * channels;
					}
					else
					{
						int16_t *dest = (int16_t*) &self->audio_buffer[ self->audio_avail ];
						int i = samples_to_copy + 1;
						while ( --i )
						{
							memcpy( dest, pcm, dst_stride );
							pcm += channels;
							dest += self->out_channels;
						}
					}
				}
				else
				{
					memset( &self->audio_buffer[ self->audio_avail ], 0, dst_bytes );
					pcm += samples_to_copy * channels;
				}
				self->audio_avail += dst_bytes;
				samples_copied += samples_to_copy;
			}
			pthread_cond_broadcast( &self->audio_cond );
		}
		pthread_mutex_unlock( &self->audio_mutex );
	}
	else
	{
		self->playing = 1;
	}

	return init_audio;
}
示例#12
0
	HRESULT render( mlt_frame frame )
	{
		HRESULT result = S_OK;
		// Get the audio		
		double speed = mlt_properties_get_double( MLT_FRAME_PROPERTIES(frame), "_speed" );
		if ( speed == 1.0 )
		{
			mlt_audio_format format = mlt_audio_s16;
			int frequency = bmdAudioSampleRate48kHz;
			int samples = mlt_sample_calculator( m_fps, frequency, m_count );
			int16_t *pcm = 0;
			
			if ( !mlt_frame_get_audio( frame, (void**) &pcm, &format, &frequency, &m_channels, &samples ) )
			{
				int count = samples;
				
				if ( !m_isPrerolling )
				{
					uint32_t audioCount = 0;
					uint32_t videoCount = 0;
					
					// Check for resync
					m_deckLinkOutput->GetBufferedAudioSampleFrameCount( &audioCount );
					m_deckLinkOutput->GetBufferedVideoFrameCount( &videoCount );
					
					// Underflow typically occurs during non-normal speed playback.
					if ( audioCount < 1 || videoCount < 1 )
					{
						// Upon switching to normal playback, buffer some frames faster than realtime.
						mlt_log_info( &m_consumer, "buffer underrun: audio buf %u video buf %u frames\n", audioCount, videoCount );
						m_prerollCounter = 0;
					}
					
					// While rebuffering
					if ( isBuffering() )
					{
						// Only append audio to reach the ideal level and not overbuffer.
						int ideal = ( m_preroll - 1 ) * bmdAudioSampleRate48kHz / m_fps;
						int actual = m_fifo->used / m_channels + audioCount;
						int diff = ideal / 2 - actual;
						count = diff < 0 ? 0 : diff < count ? diff : count;
					}
				}
				if ( count > 0 )
					sample_fifo_append( m_fifo, pcm, count * m_channels );
			}
		}
		
		// Create video frames while pre-rolling
		if ( m_isPrerolling )
		{
			createFrame();
			if ( !m_videoFrame )
			{
				mlt_log_error( &m_consumer, "failed to create video frame\n" );
				return S_FALSE;
			}
		}
		
		// Get the video
		if ( mlt_properties_get_int( MLT_FRAME_PROPERTIES( frame ), "rendered") )
		{
			mlt_image_format format = mlt_image_yuv422;
			uint8_t* image = 0;
			uint8_t* buffer = 0;

			if ( !mlt_frame_get_image( frame, &image, &format, &m_width, &m_height, 0 ) )
			{
				m_videoFrame = (IDeckLinkMutableVideoFrame*) mlt_deque_pop_back( m_videoFrameQ );
				m_videoFrame->GetBytes( (void**) &buffer );
				if ( m_displayMode->GetFieldDominance() == bmdUpperFieldFirst )
					// convert lower field first to top field first
					swab( image, buffer + m_width * 2, m_width * ( m_height - 1 ) * 2 );
				else
					swab( image, buffer, m_width * m_height * 2 );
				m_deckLinkOutput->ScheduleVideoFrame( m_videoFrame, m_count * m_duration, m_duration, m_timescale );
				mlt_deque_push_front( m_videoFrameQ, m_videoFrame );
			}
		}
		else
		{
			mlt_log_verbose( &m_consumer, "dropped video frame\n" );
		}
		++m_count;

		// Check for end of pre-roll
		if ( ++m_prerollCounter > m_preroll && m_isPrerolling )
		{
			// Start audio and video output
			m_deckLinkOutput->EndAudioPreroll();
			m_deckLinkOutput->StartScheduledPlayback( 0, m_timescale, 1.0 );
			m_isPrerolling = false;
		}

		return result;
	}
示例#13
0
文件: plugin_mgr.c 项目: aib/mlt
static void
plugin_mgr_get_object_file_plugins (plugin_mgr_t * plugin_mgr, const char * filename)
{
  const char * dlerr;
  void * dl_handle;
  LADSPA_Descriptor_Function get_descriptor;
  const LADSPA_Descriptor * descriptor;
  unsigned long plugin_index;
  plugin_desc_t * desc, * other_desc = NULL;
  GSList * list;
  gboolean exists;
  int err;
  
  /* open the object file */
  dl_handle = dlopen (filename, RTLD_NOW);
  if (!dl_handle)
    {
      mlt_log_info( NULL, "%s: error opening shared object file '%s': %s\n",
               __FUNCTION__, filename, dlerror());
      return;
    }
  
  
  /* get the get_descriptor function */
  dlerror (); /* clear the error report */
  
  get_descriptor = (LADSPA_Descriptor_Function)
    dlsym (dl_handle, "ladspa_descriptor");
  
  dlerr = dlerror();
  if (dlerr) {
    mlt_log_info( NULL, "%s: error finding ladspa_descriptor symbol in object file '%s': %s\n",
             __FUNCTION__, filename, dlerr);
    dlclose (dl_handle);
    return;
  }
  
#ifdef __DARWIN__
  if (!get_descriptor (0)) {
    void (*constructor)(void) = dlsym (dl_handle, "_init");
    if (constructor) constructor();
  }
#endif

  plugin_index = 0;
  while ( (descriptor = get_descriptor (plugin_index)) )
    {
      if (!plugin_is_valid (descriptor))
        {
          plugin_index++;
          continue;
        }

      
      /* check it doesn't already exist */
      exists = FALSE;
      for (list = plugin_mgr->all_plugins; list; list = g_slist_next (list))
        {
          other_desc = (plugin_desc_t *) list->data;
          
          if (other_desc->id == descriptor->UniqueID)
            {
              exists = TRUE;
              break;
            }
        }
      
      if (exists)
        {
          mlt_log_info( NULL, "Plugin %ld exists in both '%s' and '%s'; using version in '%s'\n",
                  descriptor->UniqueID, other_desc->object_file, filename, other_desc->object_file);
          plugin_index++;
          continue;
        }

      
      desc = plugin_desc_new_with_descriptor (filename, plugin_index, descriptor);
      plugin_mgr->all_plugins = g_slist_append (plugin_mgr->all_plugins, desc);
      plugin_index++;
      plugin_mgr->plugin_count++;
      
      /* print in the splash screen */
      /* mlt_log_verbose( NULL, "Loaded plugin '%s'\n", desc->name); */
    }
  
  err = dlclose (dl_handle);
  if (err)
    {
      mlt_log_warning( NULL, "%s: error closing object file '%s': %s\n",
               __FUNCTION__, filename, dlerror ());
    }
}