Esempio n. 1
0
static int get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
{
	context cx = mlt_frame_pop_audio( frame );
	mlt_frame nested_frame = mlt_frame_pop_audio( frame );
	int result = 0;

	// if not repeating last frame
	if ( mlt_frame_get_position( nested_frame ) != cx->audio_position )
	{
		double fps = mlt_profile_fps( cx->profile );
		if ( mlt_producer_get_fps( cx->self ) < fps )
			fps = mlt_producer_get_fps( cx->self );
		*samples = mlt_sample_calculator( fps, *frequency, cx->audio_counter++ );
		result = mlt_frame_get_audio( nested_frame, buffer, format, frequency, channels, samples );
		int size = mlt_audio_format_size( *format, *samples, *channels );
		int16_t *new_buffer = mlt_pool_alloc( size );

		mlt_frame_set_audio( frame, new_buffer, *format, size, mlt_pool_release );
		memcpy( new_buffer, *buffer, size );
		*buffer = new_buffer;
		cx->audio_position = mlt_frame_get_position( nested_frame );
	}
	else
	{
		// otherwise return no samples
		*samples = 0;
		*buffer = NULL;
	}

	return result;
}
Esempio n. 2
0
static int producer_get_audio( mlt_frame self, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
{
	mlt_properties properties = MLT_FRAME_PROPERTIES( self );
	mlt_frame frame = mlt_frame_pop_audio( self );
	mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );
	mlt_frame_set_audio( self, *buffer, *format, mlt_audio_format_size( *format, *samples, *channels ), NULL );
	mlt_properties_set_int( properties, "audio_frequency", *frequency );
	mlt_properties_set_int( properties, "audio_channels", *channels );
	mlt_properties_set_int( properties, "audio_samples", *samples );
	return 0;
}
Esempio n. 3
0
static int producer_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
{
	// Get the producer service
	mlt_producer producer = mlt_properties_get_data( MLT_FRAME_PROPERTIES( frame ), "_producer_ladspa", NULL );
	mlt_properties producer_properties = MLT_PRODUCER_PROPERTIES( producer );
	int size = 0;
	LADSPA_Data** output_buffers = NULL;
	int i = 0;

	// Initialize LADSPA if needed
	jack_rack_t *jackrack = mlt_properties_get_data( producer_properties, "_jackrack", NULL );
	if ( !jackrack )
	{
		sample_rate = *frequency; // global inside jack_rack
		jackrack = initialise_jack_rack( producer_properties, *channels );
	}

	if( jackrack )
	{
		// Correct the returns if necessary
		*samples = *samples <= 0 ? 1920 : *samples;
		*channels = *channels <= 0 ? 2 : *channels;
		*frequency = *frequency <= 0 ? 48000 : *frequency;
		*format = mlt_audio_float;

		// Calculate the size of the buffer
		size = *samples * *channels * sizeof( float );

		// Allocate the buffer
		*buffer = mlt_pool_alloc( size );

		// Initialize the LADSPA output buffer.
		output_buffers = mlt_pool_alloc( sizeof( LADSPA_Data* ) * *channels );
		for ( i = 0; i < *channels; i++ )
		{
			output_buffers[i] = (LADSPA_Data*) *buffer + i * *samples;
		}

		// Do LADSPA processing
		process_ladspa( jackrack->procinfo, *samples, NULL, output_buffers );
		mlt_pool_release( output_buffers );

		// Set the buffer for destruction
		mlt_frame_set_audio( frame, *buffer, *format, size, mlt_pool_release );
	}

	return 0;
}
Esempio n. 4
0
static int filter_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
{
	// Used to return number of channels in the source
	int channels_avail = *channels;

	// Get the producer's audio
	int error = mlt_frame_get_audio( frame, buffer, format, frequency, &channels_avail, samples );
	if ( error ) return error;

	if ( channels_avail < *channels )
	{
		int size = mlt_audio_format_size( *format, *samples, *channels );
		int16_t *new_buffer = mlt_pool_alloc( size );

		// Duplicate the existing channels
		if ( *format == mlt_audio_s16 )
		{
			int i, j, k = 0;
			for ( i = 0; i < *samples; i++ )
			{
				for ( j = 0; j < *channels; j++ )
				{
					new_buffer[ ( i * *channels ) + j ] = ((int16_t*)(*buffer))[ ( i * channels_avail ) + k ];
					k = ( k + 1 ) % channels_avail;
				}
			}
		}
		else if ( *format == mlt_audio_s32le || *format == mlt_audio_f32le )
		{
			int32_t *p = (int32_t*) new_buffer;
			int i, j, k = 0;
			for ( i = 0; i < *samples; i++ )
			{
				for ( j = 0; j < *channels; j++ )
				{
					p[ ( i * *channels ) + j ] = ((int32_t*)(*buffer))[ ( i * channels_avail ) + k ];
					k = ( k + 1 ) % channels_avail;
				}
			}
		}
		else
		{
			// non-interleaved - s32 or float
			int size_avail = mlt_audio_format_size( *format, *samples, channels_avail );
			int32_t *p = (int32_t*) new_buffer;
			int i = *channels / channels_avail;
			while ( i-- )
			{
				memcpy( p, *buffer, size_avail );
				p += size_avail / sizeof(*p);
			}
			i = *channels % channels_avail;
			if ( i )
			{
				size_avail = mlt_audio_format_size( *format, *samples, i );
				memcpy( p, *buffer, size_avail );
			}
		}
		// Update the audio buffer now - destroys the old
		mlt_frame_set_audio( frame, new_buffer, *format, size, mlt_pool_release );
		*buffer = new_buffer;
	}
	else if ( channels_avail > *channels )
	{
		int size = mlt_audio_format_size( *format, *samples, *channels );
		int16_t *new_buffer = mlt_pool_alloc( size );

		// Drop all but the first *channels
		if ( *format == mlt_audio_s16 )
		{
			int i, j;
			for ( i = 0; i < *samples; i++ )
				for ( j = 0; j < *channels; j++ )
					new_buffer[ ( i * *channels ) + j ]	= ((int16_t*)(*buffer))[ ( i * channels_avail ) + j ];
		}
		else
		{
			// non-interleaved
			memcpy( new_buffer, *buffer, size );
		}
		// Update the audio buffer now - destroys the old
		mlt_frame_set_audio( frame, new_buffer, *format, size, mlt_pool_release );
		*buffer = new_buffer;
	}
	return error;
}
Esempio n. 5
0
static int transition_get_audio( mlt_frame frame_a, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
{
	int error = 0;

	// Get the b frame from the stack
	mlt_frame frame_b = mlt_frame_pop_audio( frame_a );

	// Get the effect
	mlt_transition transition = mlt_frame_pop_audio( frame_a );

	// Get the properties of the b frame
	mlt_properties b_props = MLT_FRAME_PROPERTIES( frame_b );

	transition_mix self = transition->child;
	int16_t *buffer_b, *buffer_a;
	int frequency_b = *frequency, frequency_a = *frequency;
	int channels_b = *channels, channels_a = *channels;
	int samples_b = *samples, samples_a = *samples;

	// We can only mix s16
	*format = mlt_audio_s16;
	mlt_frame_get_audio( frame_b, (void**) &buffer_b, format, &frequency_b, &channels_b, &samples_b );
	mlt_frame_get_audio( frame_a, (void**) &buffer_a, format, &frequency_a, &channels_a, &samples_a );

	if ( buffer_b == buffer_a )
	{
		*samples = samples_b;
		*channels = channels_b;
		*buffer = buffer_b;
		*frequency = frequency_b;
		return error;
	}

	int silent = mlt_properties_get_int( MLT_FRAME_PROPERTIES( frame_a ), "silent_audio" );
	mlt_properties_set_int( MLT_FRAME_PROPERTIES( frame_a ), "silent_audio", 0 );
	if ( silent )
		memset( buffer_a, 0, samples_a * channels_a * sizeof( int16_t ) );

	silent = mlt_properties_get_int( b_props, "silent_audio" );
	mlt_properties_set_int( b_props, "silent_audio", 0 );
	if ( silent )
		memset( buffer_b, 0, samples_b * channels_b * sizeof( int16_t ) );

	// determine number of samples to process
	*samples = MIN( self->src_buffer_count + samples_b, self->dest_buffer_count + samples_a );
	*channels = MIN( MIN( channels_b, channels_a ), MAX_CHANNELS );
	*frequency = frequency_a;

	// Prevent src buffer overflow by discarding oldest samples.
	samples_b = MIN( samples_b, MAX_SAMPLES * MAX_CHANNELS / channels_b );
	size_t bytes = PCM16_BYTES( samples_b, channels_b );
	if ( PCM16_BYTES( self->src_buffer_count + samples_b, channels_b ) > MAX_BYTES ) {
		mlt_log_verbose( MLT_TRANSITION_SERVICE(transition), "buffer overflow: src_buffer_count %d\n",
					  self->src_buffer_count );
		self->src_buffer_count = MAX_SAMPLES * MAX_CHANNELS / channels_b - samples_b;
		memmove( self->src_buffer, &self->src_buffer[MAX_SAMPLES * MAX_CHANNELS - samples_b * channels_b],
				 PCM16_BYTES( samples_b, channels_b ) );
	}
	// Buffer new src samples.
	memcpy( &self->src_buffer[self->src_buffer_count * channels_b], buffer_b, bytes );
	self->src_buffer_count += samples_b;
	buffer_b = self->src_buffer;

	// Prevent dest buffer overflow by discarding oldest samples.
	samples_a = MIN( samples_a, MAX_SAMPLES * MAX_CHANNELS / channels_a );
	bytes = PCM16_BYTES( samples_a, channels_a );
	if ( PCM16_BYTES( self->dest_buffer_count + samples_a, channels_a ) > MAX_BYTES ) {
		mlt_log_verbose( MLT_TRANSITION_SERVICE(transition), "buffer overflow: dest_buffer_count %d\n",
					  self->dest_buffer_count );
		self->dest_buffer_count = MAX_SAMPLES * MAX_CHANNELS / channels_a - samples_a;
		memmove( self->dest_buffer, &self->dest_buffer[MAX_SAMPLES * MAX_CHANNELS - samples_a * channels_a],
				 PCM16_BYTES( samples_a, channels_a ) );
	}
	// Buffer the new dest samples.
	memcpy( &self->dest_buffer[self->dest_buffer_count * channels_a], buffer_a, bytes );
	self->dest_buffer_count += samples_a;
	buffer_a = self->dest_buffer;

	// Do the mixing.
	if ( mlt_properties_get_int( MLT_TRANSITION_PROPERTIES(transition), "combine" ) )
	{
		double weight = 1.0;
		if ( mlt_properties_get_int( MLT_FRAME_PROPERTIES( frame_a ), "meta.mixdown" ) )
			weight = 1.0 - mlt_properties_get_double( MLT_FRAME_PROPERTIES( frame_a ), "meta.volume" );
		combine_audio( weight, buffer_a, buffer_b, channels_a, channels_b, *channels, *samples );
	}
	else
	{
		double mix_start = 0.5, mix_end = 0.5;
		if ( mlt_properties_get( b_props, "audio.previous_mix" ) )
			mix_start = mlt_properties_get_double( b_props, "audio.previous_mix" );
		if ( mlt_properties_get( b_props, "audio.mix" ) )
			mix_end = mlt_properties_get_double( b_props, "audio.mix" );
		if ( mlt_properties_get_int( b_props, "audio.reverse" ) )
		{
			mix_start = 1.0 - mix_start;
			mix_end = 1.0 - mix_end;
		}
		mix_audio( mix_start, mix_end, buffer_a, buffer_b, channels_a, channels_b, *channels, *samples );
	}

	// Copy the audio into the frame.
	bytes = PCM16_BYTES( *samples, *channels );
	*buffer = mlt_pool_alloc( bytes );
	memcpy( *buffer, buffer_a, bytes );
	mlt_frame_set_audio( frame_a, *buffer, *format, bytes, mlt_pool_release );

	if ( mlt_properties_get_int( b_props, "_speed" ) == 0 )
	{
		// Flush the buffer when paused and scrubbing.
		samples_b = self->src_buffer_count;
		samples_a = self->dest_buffer_count;
	}
	else
	{
		// Determine the maximum amount of latency permitted in the buffer.
		int max_latency = CLAMP( *frequency / 1000, 0, MAX_SAMPLES ); // samples in 1ms
		// samples_b becomes the new target src buffer count.
		samples_b = CLAMP( self->src_buffer_count - *samples, 0, max_latency );
		// samples_b becomes the number of samples to consume: difference between actual and the target.
		samples_b = self->src_buffer_count - samples_b;
		// samples_a becomes the new target dest buffer count.
		samples_a = CLAMP( self->dest_buffer_count - *samples, 0, max_latency );
		// samples_a becomes the number of samples to consume: difference between actual and the target.
		samples_a = self->dest_buffer_count - samples_a;
	}

	// Consume the src buffer.
	self->src_buffer_count -= samples_b;
	if ( self->src_buffer_count ) {
		memmove( self->src_buffer, &self->src_buffer[samples_b * channels_b],
			PCM16_BYTES( self->src_buffer_count, channels_b ));
	}
	// Consume the dest buffer.
	self->dest_buffer_count -= samples_a;
	if ( self->dest_buffer_count ) {
		memmove( self->dest_buffer, &self->dest_buffer[samples_a * channels_a],
			PCM16_BYTES( self->dest_buffer_count, channels_a ));
	}

	return error;
}
Esempio n. 6
0
static void foreach_consumer_put( mlt_consumer consumer, mlt_frame frame )
{
    mlt_properties properties = MLT_CONSUMER_PROPERTIES( consumer );
    mlt_consumer nested = NULL;
    char key[30];
    int index = 0;

    do {
        snprintf( key, sizeof(key), "%d.consumer", index++ );
        nested = mlt_properties_get_data( properties, key, NULL );
        if ( nested )
        {
            mlt_properties nested_props = MLT_CONSUMER_PROPERTIES(nested);
            double self_fps = mlt_properties_get_double( properties, "fps" );
            double nested_fps = mlt_properties_get_double( nested_props, "fps" );
            mlt_position nested_pos = mlt_properties_get_position( nested_props, "_multi_position" );
            mlt_position self_pos = mlt_frame_get_position( frame );
            double self_time = self_pos / self_fps;
            double nested_time = nested_pos / nested_fps;

            // get the audio for the current frame
            uint8_t *buffer = NULL;
            mlt_audio_format format = mlt_audio_s16;
            int channels = mlt_properties_get_int( properties, "channels" );
            int frequency = mlt_properties_get_int( properties, "frequency" );
            int current_samples = mlt_sample_calculator( self_fps, frequency, self_pos );
            mlt_frame_get_audio( frame, (void**) &buffer, &format, &frequency, &channels, &current_samples );
            int current_size = mlt_audio_format_size( format, current_samples, channels );

            // get any leftover audio
            int prev_size = 0;
            uint8_t *prev_buffer = mlt_properties_get_data( nested_props, "_multi_audio", &prev_size );
            uint8_t *new_buffer = NULL;
            if ( prev_size > 0 )
            {
                new_buffer = mlt_pool_alloc( prev_size + current_size );
                memcpy( new_buffer, prev_buffer, prev_size );
                memcpy( new_buffer + prev_size, buffer, current_size );
                buffer = new_buffer;
            }
            current_size += prev_size;
            current_samples += mlt_properties_get_int( nested_props, "_multi_samples" );

            while ( nested_time <= self_time )
            {
                // put ideal number of samples into cloned frame
                int deeply = index > 1 ? 1 : 0;
                mlt_frame clone_frame = mlt_frame_clone( frame, deeply );
                int nested_samples = mlt_sample_calculator( nested_fps, frequency, nested_pos );
                // -10 is an optimization to avoid tiny amounts of leftover samples
                nested_samples = nested_samples > current_samples - 10 ? current_samples : nested_samples;
                int nested_size = mlt_audio_format_size( format, nested_samples, channels );
                if ( nested_size > 0 )
                {
                    prev_buffer = mlt_pool_alloc( nested_size );
                    memcpy( prev_buffer, buffer, nested_size );
                }
                else
                {
                    prev_buffer = NULL;
                    nested_size = 0;
                }
                mlt_frame_set_audio( clone_frame, prev_buffer, format, nested_size, mlt_pool_release );
                mlt_properties_set_int( MLT_FRAME_PROPERTIES(clone_frame), "audio_samples", nested_samples );
                mlt_properties_set_int( MLT_FRAME_PROPERTIES(clone_frame), "audio_frequency", frequency );
                mlt_properties_set_int( MLT_FRAME_PROPERTIES(clone_frame), "audio_channels", channels );

                // chomp the audio
                current_samples -= nested_samples;
                current_size -= nested_size;
                buffer += nested_size;

                // send frame to nested consumer
                mlt_consumer_put_frame( nested, clone_frame );
                mlt_properties_set_position( nested_props, "_multi_position", ++nested_pos );
                nested_time = nested_pos / nested_fps;
            }

            // save any remaining audio
            if ( current_size > 0 )
            {
                prev_buffer = mlt_pool_alloc( current_size );
                memcpy( prev_buffer, buffer, current_size );
            }
            else
            {
                prev_buffer = NULL;
                current_size = 0;
            }
            mlt_pool_release( new_buffer );
            mlt_properties_set_data( nested_props, "_multi_audio", prev_buffer, current_size, mlt_pool_release, NULL );
            mlt_properties_set_int( nested_props, "_multi_samples", current_samples );
        }
    } while ( nested );
}
Esempio n. 7
0
static int ladspa_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
{
	int error = 0;

	// Get the filter service
	mlt_filter filter = mlt_frame_pop_audio( frame );

	// Get the filter properties
	mlt_properties filter_properties = MLT_FILTER_PROPERTIES( filter );

	// Check if the channel configuration has changed
	int prev_channels = mlt_properties_get_int( filter_properties, "_prev_channels" );
	if ( prev_channels != *channels )
	{
		if( prev_channels )
		{
			mlt_log_info( MLT_FILTER_SERVICE(filter), "Channel configuration changed. Old: %d New: %d.\n", prev_channels, *channels );
			mlt_properties_set_data( filter_properties, "jackrack", NULL, 0, (mlt_destructor) NULL, NULL );
		}
		mlt_properties_set_int( filter_properties, "_prev_channels", *channels );
	}

	// Initialise LADSPA if needed
	jack_rack_t *jackrack = mlt_properties_get_data( filter_properties, "jackrack", NULL );
	if ( jackrack == NULL )
	{
		sample_rate = *frequency; // global inside jack_rack
		jackrack = initialise_jack_rack( filter_properties, *channels );
	}

	if ( jackrack && jackrack->procinfo && jackrack->procinfo->chain &&
		 mlt_properties_get_int64( filter_properties, "_pluginid" ) )
	{
		plugin_t *plugin = jackrack->procinfo->chain;
		LADSPA_Data value;
		int i, c;
		mlt_position position = mlt_filter_get_position( filter, frame );
		mlt_position length = mlt_filter_get_length2( filter, frame );

		// Get the producer's audio
		*format = mlt_audio_float;
		mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );

		// Resize the buffer if necessary.
		if ( *channels < jackrack->channels )
		{
			// Add extra channels to satisfy the plugin.
			// Extra channels in the buffer will be ignored by downstream services.
			int old_size = mlt_audio_format_size( *format, *samples, *channels );
			int new_size = mlt_audio_format_size( *format, *samples, jackrack->channels );
			uint8_t* new_buffer = mlt_pool_alloc( new_size );
			memcpy( new_buffer, *buffer, old_size );
			// Put silence in extra channels.
			memset( new_buffer + old_size, 0, new_size - old_size );
			mlt_frame_set_audio( frame, new_buffer, *format, new_size, mlt_pool_release );
			*buffer = new_buffer;
		}

		for ( i = 0; i < plugin->desc->control_port_count; i++ )
		{
			// Apply the control port values
			char key[20];
			value = plugin_desc_get_default_control_value( plugin->desc, i, sample_rate );
			snprintf( key, sizeof(key), "%d", i );
			if ( mlt_properties_get( filter_properties, key ) )
				value = mlt_properties_anim_get_double( filter_properties, key, position, length );
			for ( c = 0; c < plugin->copies; c++ )
				plugin->holders[c].control_memory[i] = value;
		}
		plugin->wet_dry_enabled = mlt_properties_get( filter_properties, "wetness" ) != NULL;
		if ( plugin->wet_dry_enabled )
		{
			value = mlt_properties_anim_get_double( filter_properties, "wetness", position, length );
			for ( c = 0; c < jackrack->channels; c++ )
				plugin->wet_dry_values[c] = value;
		}

		// Configure the buffers
		LADSPA_Data **input_buffers  = mlt_pool_alloc( sizeof( LADSPA_Data* ) * jackrack->channels );
		LADSPA_Data **output_buffers = mlt_pool_alloc( sizeof( LADSPA_Data* ) * jackrack->channels );
		
		// Some plugins crash with too many frames (samples).
		// So, feed the plugin with N samples per loop iteration.
		int samples_offset = 0;
		int sample_count = MIN(*samples, MAX_SAMPLE_COUNT);
		for (i = 0; samples_offset < *samples; i++) {
			int j = 0;
			for (; j < jackrack->channels; j++)
				output_buffers[j] = input_buffers[j] = (LADSPA_Data*) *buffer + j * (*samples) + samples_offset;
			sample_count = MIN(*samples - samples_offset, MAX_SAMPLE_COUNT);
			// Do LADSPA processing
			error = process_ladspa( jackrack->procinfo, sample_count, input_buffers, output_buffers );
			samples_offset += MAX_SAMPLE_COUNT;
		}

		mlt_pool_release( input_buffers );
		mlt_pool_release( output_buffers );

		// read the status port values
		for ( i = 0; i < plugin->desc->status_port_count; i++ )
		{
			char key[20];
			int p = plugin->desc->status_port_indicies[i];
			for ( c = 0; c < plugin->copies; c++ )
			{
				snprintf( key, sizeof(key), "%d[%d]", p, c );
				value = plugin->holders[c].status_memory[i];
				mlt_properties_set_double( filter_properties, key, value );
			}
		}
	}
	else
	{
		// Nothing to do.
		error = mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );
	}

	return error;
}
Esempio n. 8
0
static int filter_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
{
	// Get the properties of the a frame
	mlt_properties properties = MLT_FRAME_PROPERTIES( frame );
	int channels_out = mlt_properties_get_int( properties, "mono.channels" );
	int i, j, size;

	// Get the producer's audio
	mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );

	if ( channels_out == -1 )
		channels_out = *channels;
	size = *samples * channels_out;

	switch ( *format )
	{
		case mlt_audio_s16:
		{
			size *= sizeof( int16_t );
			int16_t *new_buffer = mlt_pool_alloc( size );
			for ( i = 0; i < *samples; i++ )
			{
				int16_t mixdown = 0;
				for ( j = 0; j < *channels; j++ )
					mixdown += ((int16_t*) *buffer)[ ( i * *channels ) + j ] / *channels;
				for ( j = 0; j < channels_out; j++ )
					new_buffer[ ( i * channels_out ) + j ] = mixdown;
			}
			*buffer = new_buffer;
			break;
		}
		case mlt_audio_s32:
		{
			size *= sizeof( int32_t );
			int32_t *new_buffer = mlt_pool_alloc( size );
			for ( i = 0; i < *samples; i++ )
			{
				int32_t mixdown = 0;
				for ( j = 0; j < *channels; j++ )
					mixdown += ((int32_t*) *buffer)[ ( j * *channels ) + i ] / *channels;
				for ( j = 0; j < channels_out; j++ )
					new_buffer[ ( j * *samples ) + i ] = mixdown;
			}
			*buffer = new_buffer;
			break;
		}
		case mlt_audio_float:
		{
			size *= sizeof( float );
			float *new_buffer = mlt_pool_alloc( size );
			for ( i = 0; i < *samples; i++ )
			{
				float mixdown = 0;
				for ( j = 0; j < *channels; j++ )
					mixdown += ((float*) *buffer)[ ( j * *channels ) + i ] / *channels;
				for ( j = 0; j < channels_out; j++ )
					new_buffer[ ( j * *samples ) + i ] = mixdown;
			}
			*buffer = new_buffer;
			break;
		}
		default:
			mlt_log_error( NULL, "[filter mono] Invalid audio format\n" );
			break;
	}
	if ( size > *samples * channels_out )
	{
		mlt_frame_set_audio( frame, *buffer, *format, size, mlt_pool_release );
		*channels = channels_out;
	}

	return 0;
}
Esempio n. 9
0
    virtual HRESULT STDMETHODCALLTYPE VideoInputFrameArrived(
        IDeckLinkVideoInputFrame* video,
        IDeckLinkAudioInputPacket* audio )
    {
        if ( mlt_properties_get_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "preview" ) &&
                mlt_producer_get_speed( getProducer() ) == 0.0 && !mlt_deque_count( m_queue ))
        {
            pthread_cond_broadcast( &m_condition );
            return S_OK;
        }

        // Create mlt_frame
        mlt_frame frame = mlt_frame_init( MLT_PRODUCER_SERVICE( getProducer() ) );

        // Copy video
        if ( video )
        {
            if ( !( video->GetFlags() & bmdFrameHasNoInputSource ) )
            {
                int size = video->GetRowBytes() * ( video->GetHeight() + m_vancLines );
                void* image = mlt_pool_alloc( size );
                void* buffer = 0;
                unsigned char* p = (unsigned char*) image;
                int n = size / 2;
                \
                // Initialize VANC lines to nominal black
                while ( --n )
                {
                    *p ++ = 16;
                    *p ++ = 128;
                }

                // Capture VANC
                if ( m_vancLines > 0 )
                {
                    IDeckLinkVideoFrameAncillary* vanc = 0;
                    if ( video->GetAncillaryData( &vanc ) == S_OK && vanc )
                    {
                        for ( int i = 1; i < m_vancLines + 1; i++ )
                        {
                            if ( vanc->GetBufferForVerticalBlankingLine( i, &buffer ) == S_OK )
                                swab( (char*) buffer, (char*) image + ( i - 1 ) * video->GetRowBytes(), video->GetRowBytes() );
                            else
                                mlt_log_debug( getProducer(), "failed capture vanc line %d\n", i );
                        }
                        SAFE_RELEASE(vanc);
                    }
                }

                // Capture image
                video->GetBytes( &buffer );
                if ( image && buffer )
                {
                    size =  video->GetRowBytes() * video->GetHeight();
                    swab( (char*) buffer, (char*) image + m_vancLines * video->GetRowBytes(), size );
                    mlt_frame_set_image( frame, (uint8_t*) image, size, mlt_pool_release );
                }
                else if ( image )
                {
                    mlt_log_verbose( getProducer(), "no video\n" );
                    mlt_pool_release( image );
                }
            }
            else
            {
                mlt_log_verbose( getProducer(), "no signal\n" );
                mlt_frame_close( frame );
                frame = 0;
            }

            // Get timecode
            IDeckLinkTimecode* timecode = 0;
            if ( video->GetTimecode( bmdTimecodeVITC, &timecode ) == S_OK && timecode )
            {
                DLString timecodeString = 0;

                if ( timecode->GetString( &timecodeString ) == S_OK )
                {
                    char* s = getCString( timecodeString );
                    mlt_properties_set( MLT_FRAME_PROPERTIES( frame ), "meta.attr.vitc.markup", s );
                    mlt_log_debug( getProducer(), "timecode %s\n", s );
                    freeCString( s );
                }
                freeDLString( timecodeString );
                SAFE_RELEASE( timecode );
            }
        }
        else
        {
            mlt_log_verbose( getProducer(), "no video\n" );
            mlt_frame_close( frame );
            frame = 0;
        }

        // Copy audio
        if ( frame && audio )
        {
            int channels = mlt_properties_get_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "channels" );
            int size = audio->GetSampleFrameCount() * channels * sizeof(int16_t);
            mlt_audio_format format = mlt_audio_s16;
            void* pcm = mlt_pool_alloc( size );
            void* buffer = 0;

            audio->GetBytes( &buffer );
            if ( buffer )
            {
                memcpy( pcm, buffer, size );
                mlt_frame_set_audio( frame, pcm, format, size, mlt_pool_release );
                mlt_properties_set_int( MLT_FRAME_PROPERTIES(frame), "audio_samples", audio->GetSampleFrameCount() );
            }
            else
            {
                mlt_log_verbose( getProducer(), "no audio\n" );
                mlt_pool_release( pcm );
            }
        }
        else
        {
            mlt_log_verbose( getProducer(), "no audio\n" );
        }

        // Put frame in queue
        if ( frame )
        {
            int queueMax = mlt_properties_get_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "buffer" );
            pthread_mutex_lock( &m_mutex );
            if ( mlt_deque_count( m_queue ) < queueMax )
            {
                mlt_deque_push_back( m_queue, frame );
                pthread_cond_broadcast( &m_condition );
            }
            else
            {
                mlt_frame_close( frame );
                mlt_properties_set_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "dropped", ++m_dropped );
                mlt_log_warning( getProducer(), "frame dropped %d\n", m_dropped );
            }
            pthread_mutex_unlock( &m_mutex );
        }

        return S_OK;
    }
Esempio n. 10
0
static int resample_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
{
	// Get the filter service
	mlt_filter filter = mlt_frame_pop_audio( frame );

	// Get the filter properties
	mlt_properties filter_properties = MLT_FILTER_PROPERTIES( filter );

	mlt_service_lock( MLT_FILTER_SERVICE( filter ) );

	// Get the resample information
	int output_rate = mlt_properties_get_int( filter_properties, "frequency" );
	int16_t *sample_buffer = mlt_properties_get_data( filter_properties, "buffer", NULL );

	// Obtain the resample context if it exists
	ReSampleContext *resample = mlt_properties_get_data( filter_properties, "audio_resample", NULL );

	// If no resample frequency is specified, default to requested value
	if ( output_rate == 0 )
		output_rate = *frequency;

	// Get the producer's audio
	int error = mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );
	if ( error ) return error;

	// Return now if no work to do
	if ( output_rate != *frequency )
	{
		// Will store number of samples created
		int used = 0;

		mlt_log_debug( MLT_FILTER_SERVICE(filter), "channels %d samples %d frequency %d -> %d\n",
			*channels, *samples, *frequency, output_rate );

		// Do not convert to s16 unless we need to change the rate
		if ( *format != mlt_audio_s16 )
		{
			*format = mlt_audio_s16;
			mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );
		}

		// Create a resampler if nececessary
		if ( resample == NULL || *frequency != mlt_properties_get_int( filter_properties, "last_frequency" ) )
		{
			// Create the resampler
			resample = av_audio_resample_init( *channels, *channels, output_rate, *frequency,
				AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16, 16, 10, 0, 0.8 );

			// And store it on properties
			mlt_properties_set_data( filter_properties, "audio_resample", resample, 0, ( mlt_destructor )audio_resample_close, NULL );

			// And remember what it was created for
			mlt_properties_set_int( filter_properties, "last_frequency", *frequency );
		}

		mlt_service_unlock( MLT_FILTER_SERVICE( filter ) );

		// Resample the audio
		used = audio_resample( resample, sample_buffer, *buffer, *samples );
		int size = used * *channels * sizeof( int16_t );

		// Resize if necessary
		if ( used > *samples )
		{
			*buffer = mlt_pool_realloc( *buffer, size );
			mlt_frame_set_audio( frame, *buffer, *format, size, mlt_pool_release );
		}

		// Copy samples
		memcpy( *buffer, sample_buffer, size );

		// Update output variables
		*samples = used;
		*frequency = output_rate;
	}
	else
	{
		mlt_service_unlock( MLT_FILTER_SERVICE( filter ) );
	}

	return error;
}
Esempio n. 11
0
static int producer_get_audio( mlt_frame frame, int16_t** buffer, mlt_audio_format* format, int* frequency, int* channels, int* samples )
{
	mlt_producer producer = (mlt_producer)mlt_frame_pop_audio( frame );
	mlt_properties producer_properties = MLT_PRODUCER_PROPERTIES( producer );
	char* sound = mlt_properties_get( producer_properties, "sound" );
	double fps = mlt_producer_get_fps( producer );
	mlt_position position = mlt_frame_original_position( frame );
	int size = 0;
	int do_beep = 0;
	time_info info;

	if( fps == 0 ) fps = 25;

	// Correct the returns if necessary
	*format = mlt_audio_float;
	*frequency = *frequency <= 0 ? 48000 : *frequency;
	*channels = *channels <= 0 ? 2 : *channels;
	*samples = *samples <= 0 ? mlt_sample_calculator( fps, *frequency, position ) : *samples;

	// Allocate the buffer
	size = *samples * *channels * sizeof( float );
	*buffer = mlt_pool_alloc( size );

	mlt_service_lock( MLT_PRODUCER_SERVICE( producer ) );

	get_time_info( producer, frame, &info );

	// Determine if this should be a tone or silence.
	if( strcmp( sound, "none") )
	{
		if( !strcmp( sound, "2pop" ) )
		{
			mlt_position out = mlt_properties_get_int( producer_properties, "out" );
			mlt_position frames = out - position;

			if( frames == ( info.fps * 2 ) )
			{
				do_beep = 1;
			}
		}
		else if( !strcmp( sound, "frame0" ) )
		{
			if( info.frames == 0 )
			{
				do_beep = 1;
			}
		}
	}

	if( do_beep )
	{
		fill_beep( producer_properties, (float*)*buffer, *frequency, *channels, *samples );
	}
	else
	{
		// Fill silence.
		memset( *buffer, 0, size );
	}

	mlt_service_unlock( MLT_PRODUCER_SERVICE( producer ) );

	// Set the buffer for destruction
	mlt_frame_set_audio( frame, *buffer, *format, size, mlt_pool_release );
	return 0;
}