static int mix_audio( mlt_frame frame, mlt_frame that, float weight_start, float weight_end, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { int ret = 0; int16_t *src, *dest; int frequency_src = *frequency, frequency_dest = *frequency; int channels_src = *channels, channels_dest = *channels; int samples_src = *samples, samples_dest = *samples; int i, j; double d = 0, s = 0; mlt_frame_get_audio( that, (void**) &src, format, &frequency_src, &channels_src, &samples_src ); mlt_frame_get_audio( frame, (void**) &dest, format, &frequency_dest, &channels_dest, &samples_dest ); int silent = mlt_properties_get_int( MLT_FRAME_PROPERTIES( frame ), "silent_audio" ); mlt_properties_set_int( MLT_FRAME_PROPERTIES( frame ), "silent_audio", 0 ); if ( silent ) memset( dest, 0, samples_dest * channels_dest * sizeof( int16_t ) ); silent = mlt_properties_get_int( MLT_FRAME_PROPERTIES( that ), "silent_audio" ); mlt_properties_set_int( MLT_FRAME_PROPERTIES( that ), "silent_audio", 0 ); if ( silent ) memset( src, 0, samples_src * channels_src * sizeof( int16_t ) ); // determine number of samples to process *samples = samples_src < samples_dest ? samples_src : samples_dest; *channels = channels_src < channels_dest ? channels_src : channels_dest; *buffer = dest; *frequency = frequency_dest; // Compute a smooth ramp over start to end float weight = weight_start; float weight_step = ( weight_end - weight_start ) / *samples; if ( src == dest ) { *samples = samples_src; *channels = channels_src; *buffer = src; *frequency = frequency_src; return ret; } // Mixdown for ( i = 0; i < *samples; i++ ) { for ( j = 0; j < *channels; j++ ) { if ( j < channels_dest ) d = (double) dest[ i * channels_dest + j ]; if ( j < channels_src ) s = (double) src[ i * channels_src + j ]; dest[ i * channels_dest + j ] = s * weight + d * ( 1.0 - weight ); } weight += weight_step; } return ret; }
static int get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { context cx = mlt_frame_pop_audio( frame ); mlt_frame nested_frame = mlt_frame_pop_audio( frame ); int result = 0; // if not repeating last frame if ( mlt_frame_get_position( nested_frame ) != cx->audio_position ) { double fps = mlt_profile_fps( cx->profile ); if ( mlt_producer_get_fps( cx->self ) < fps ) fps = mlt_producer_get_fps( cx->self ); *samples = mlt_sample_calculator( fps, *frequency, cx->audio_counter++ ); result = mlt_frame_get_audio( nested_frame, buffer, format, frequency, channels, samples ); int size = mlt_audio_format_size( *format, *samples, *channels ); int16_t *new_buffer = mlt_pool_alloc( size ); mlt_frame_set_audio( frame, new_buffer, *format, size, mlt_pool_release ); memcpy( new_buffer, *buffer, size ); *buffer = new_buffer; cx->audio_position = mlt_frame_get_position( nested_frame ); } else { // otherwise return no samples *samples = 0; *buffer = NULL; } return result; }
static int filter_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { mlt_filter filter = mlt_frame_pop_audio( frame ); private_data* pdata = (private_data*)filter->child; mlt_position pos = mlt_frame_get_position( frame ); // Get the producer's audio *format = mlt_audio_f32le; mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples ); mlt_service_lock( MLT_FILTER_SERVICE( filter ) ); check_for_reset( filter, *channels, *frequency ); if( pos != pdata->prev_pos ) { // Only analyze the audio if the producer is not paused. analyze_audio( filter, *buffer, *samples ); } pdata->prev_pos = pos; mlt_service_unlock( MLT_FILTER_SERVICE( filter ) ); return 0; }
static int get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { mlt_frame nested_frame = mlt_frame_pop_audio( frame ); int result = mlt_frame_get_audio( nested_frame, buffer, format, frequency, channels, samples ); int size = *channels * *samples; switch ( *format ) { case mlt_audio_s16: size *= sizeof( int16_t ); break; case mlt_audio_s32: size *= sizeof( int32_t ); case mlt_audio_float: size *= sizeof( float ); default: mlt_log_error( NULL, "[producer consumer] Invalid audio format\n" ); } int16_t *new_buffer = mlt_pool_alloc( size ); mlt_properties_set_data( MLT_FRAME_PROPERTIES( frame ), "audio", new_buffer, size, mlt_pool_release, NULL ); memcpy( new_buffer, *buffer, size ); *buffer = new_buffer; return result; }
static int jackrack_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { // Get the filter service mlt_filter filter = mlt_frame_pop_audio( frame ); // Get the filter properties mlt_properties filter_properties = MLT_FILTER_PROPERTIES( filter ); int jack_frequency = mlt_properties_get_int( filter_properties, "_sample_rate" ); // Get the producer's audio *format = mlt_audio_float; mlt_frame_get_audio( frame, buffer, format, &jack_frequency, channels, samples ); // TODO: Deal with sample rate differences if ( *frequency != jack_frequency ) mlt_log_error( MLT_FILTER_SERVICE( filter ), "mismatching frequencies JACK = %d actual = %d\n", jack_frequency, *frequency ); *frequency = jack_frequency; // Initialise Jack ports and connections if needed if ( mlt_properties_get_int( filter_properties, "_samples" ) == 0 ) mlt_properties_set_int( filter_properties, "_samples", *samples ); // Get the filter-specific properties jack_ringbuffer_t **output_buffers = mlt_properties_get_data( filter_properties, "output_buffers", NULL ); jack_ringbuffer_t **input_buffers = mlt_properties_get_data( filter_properties, "input_buffers", NULL ); // pthread_mutex_t *output_lock = mlt_properties_get_data( filter_properties, "output_lock", NULL ); // pthread_cond_t *output_ready = mlt_properties_get_data( filter_properties, "output_ready", NULL ); // Process the audio float *q = (float*) *buffer; size_t size = *samples * sizeof(float); int j; // struct timespec tm = { 0, 0 }; // Write into output ringbuffer for ( j = 0; j < *channels; j++ ) { if ( jack_ringbuffer_write_space( output_buffers[j] ) >= size ) jack_ringbuffer_write( output_buffers[j], (char*)( q + j * *samples ), size ); } // Synchronization phase - wait for signal from Jack process while ( jack_ringbuffer_read_space( input_buffers[ *channels - 1 ] ) < size ) ; //pthread_cond_wait( output_ready, output_lock ); // Read from input ringbuffer for ( j = 0; j < *channels; j++, q++ ) { if ( jack_ringbuffer_read_space( input_buffers[j] ) >= size ) jack_ringbuffer_read( input_buffers[j], (char*)( q + j * *samples ), size ); } // help jack_sync() indicate when we are rolling mlt_position pos = mlt_frame_get_position( frame ); mlt_properties_set_position( filter_properties, "_last_pos", pos ); return 0; }
static int filter_get_image( mlt_frame frame, uint8_t **image, mlt_image_format *image_format, int *width, int *height, int writable ) { int error = 0; mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame ); mlt_filter filter = (mlt_filter)mlt_frame_pop_service( frame ); int samples = 0; int channels = 0; int frequency = 0; mlt_audio_format audio_format = mlt_audio_s16; int16_t* audio = (int16_t*)mlt_properties_get_data( frame_properties, "audio", NULL ); if ( !audio && !preprocess_warned ) { // This filter depends on the consumer processing the audio before the // video. If the audio is not preprocessed, this filter will process it. // If this filter processes the audio, it could cause confusion for the // consumer if it needs different audio properties. mlt_log_warning( MLT_FILTER_SERVICE(filter), "Audio not preprocessed. Potential audio distortion.\n" ); preprocess_warned = true; } *image_format = mlt_image_rgb24a; // Get the current image error = mlt_frame_get_image( frame, image, image_format, width, height, writable ); // Get the audio if( !error ) { frequency = mlt_properties_get_int( frame_properties, "audio_frequency" ); if (!frequency) { frequency = 48000; } channels = mlt_properties_get_int( frame_properties, "audio_channels" ); if (!channels) { channels = 2; } samples = mlt_properties_get_int( frame_properties, "audio_samples" ); if (!samples) { mlt_producer producer = mlt_frame_get_original_producer( frame ); double fps = mlt_producer_get_fps( mlt_producer_cut_parent( producer ) ); samples = mlt_sample_calculator( fps, frequency, mlt_frame_get_position( frame ) ); } error = mlt_frame_get_audio( frame, (void**)&audio, &audio_format, &frequency, &channels, &samples ); } // Draw the waveforms if( !error ) { QImage qimg( *width, *height, QImage::Format_ARGB32 ); convert_mlt_to_qimage_rgba( *image, &qimg, *width, *height ); draw_waveforms( filter, frame, &qimg, audio, channels, samples ); convert_qimage_to_mlt_rgba( &qimg, *image, *width, *height ); } return error; }
static int producer_get_audio( mlt_frame self, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { mlt_properties properties = MLT_FRAME_PROPERTIES( self ); mlt_frame frame = mlt_frame_pop_audio( self ); mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples ); mlt_frame_set_audio( self, *buffer, *format, mlt_audio_format_size( *format, *samples, *channels ), NULL ); mlt_properties_set_int( properties, "audio_frequency", *frequency ); mlt_properties_set_int( properties, "audio_channels", *channels ); mlt_properties_set_int( properties, "audio_samples", *samples ); return 0; }
static int filter_get_audio( mlt_frame frame, void** buffer, mlt_audio_format* format, int* frequency, int* channels, int* samples ) { mlt_filter filter = (mlt_filter)mlt_frame_pop_audio( frame ); mlt_properties filter_properties = MLT_FILTER_PROPERTIES( filter ); private_data* pdata = (private_data*)filter->child; // Create the FFT filter the first time. if( !pdata->fft ) { mlt_profile profile = mlt_service_profile( MLT_FILTER_SERVICE(filter) ); pdata->fft = mlt_factory_filter( profile, "fft", NULL ); mlt_properties_set_int( MLT_FILTER_PROPERTIES( pdata->fft ), "window_size", mlt_properties_get_int( filter_properties, "window_size" ) ); if( !pdata->fft ) { mlt_log_warning( MLT_FILTER_SERVICE(filter), "Unable to create FFT.\n" ); return 1; } } mlt_properties fft_properties = MLT_FILTER_PROPERTIES( pdata->fft ); // The service must stay locked while using the private data mlt_service_lock( MLT_FILTER_SERVICE( filter ) ); // Perform FFT processing on the frame mlt_filter_process( pdata->fft, frame ); mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples ); float* bins = (float*)mlt_properties_get_data( fft_properties, "bins", NULL ); if( bins ) { double window_level = mlt_properties_get_double( fft_properties, "window_level" ); int bin_count = mlt_properties_get_int( fft_properties, "bin_count" ); size_t bins_size = bin_count * sizeof(float); float* save_bins = (float*)mlt_pool_alloc( bins_size ); if( window_level == 1.0 ) { memcpy( save_bins, bins, bins_size ); } else { memset( save_bins, 0, bins_size ); } // Save the bin data as a property on the frame to be used in get_image() mlt_properties_set_data( MLT_FRAME_PROPERTIES(frame), pdata->fft_prop_name, save_bins, bins_size, mlt_pool_release, NULL ); } mlt_service_unlock( MLT_FILTER_SERVICE( filter ) ); return 0; }
static void detect_blip( mlt_frame frame, mlt_position pos, double fps, avsync_stats* stats ) { int frequency = SAMPLE_FREQ; int channels = 1; int samples = mlt_sample_calculator( fps, frequency, pos ); mlt_audio_format format = mlt_audio_float; float* buffer = NULL; int error = mlt_frame_get_audio( frame, (void**) &buffer, &format, &frequency, &channels, &samples ); if ( !error && format == mlt_audio_float && buffer != NULL ) { int i = 0; for( i = 0; i < samples; i++ ) { if( !stats->blip_in_progress ) { if( buffer[i] > BLIP_THRESHOLD || buffer[i] < -BLIP_THRESHOLD ) { // This sample must start a blip stats->blip_in_progress = 1; stats->samples_since_blip = 0; stats->blip_history[1] = stats->blip_history[0]; stats->blip_history[0] = mlt_sample_calculator_to_now( fps, SAMPLE_FREQ, pos ); stats->blip_history[0] += i; if( stats->blip_history_count < 2 ) { stats->blip_history_count++; } stats->blip = 1; } } else { if( buffer[i] > -BLIP_THRESHOLD && buffer[i] < BLIP_THRESHOLD ) { if( ++stats->samples_since_blip > frequency / 1000 ) { // One ms of silence means the blip is over stats->blip_in_progress = 0; stats->samples_since_blip = 0; } } else { stats->samples_since_blip = 0; } } } } }
static int filter_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { mlt_filter filter = mlt_frame_pop_audio( frame ); private_data* pdata = (private_data*)filter->child; mlt_position o_pos = mlt_frame_original_position( frame ); // Get the producer's audio *format = mlt_audio_f32le; mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples ); mlt_service_lock( MLT_FILTER_SERVICE( filter ) ); if( abs( o_pos - pdata->prev_o_pos ) > 1 ) { // Assume this is a new clip and restart // Use original position so that transitions between clips are detected. pdata->reset = 1; mlt_log_info( MLT_FILTER_SERVICE( filter ), "Reset. Old Pos: %d\tNew Pos: %d\n", pdata->prev_o_pos, o_pos ); } check_for_reset( filter, *channels, *frequency ); if( o_pos != pdata->prev_o_pos ) { // Only analyze the audio is the producer is not paused. analyze_audio( filter, *buffer, *samples, *frequency ); } double start_coeff = pdata->start_gain > -90.0 ? pow(10.0, pdata->start_gain / 20.0) : 0.0; double end_coeff = pdata->end_gain > -90.0 ? pow(10.0, pdata->end_gain / 20.0) : 0.0; double coeff_factor = pow( (end_coeff / start_coeff), 1.0 / (double)*samples ); double coeff = start_coeff; float* p = *buffer; int s = 0; int c = 0; for( s = 0; s < *samples; s++ ) { coeff = coeff * coeff_factor; for ( c = 0; c < *channels; c++ ) { *p = *p * coeff; p++; } } pdata->prev_o_pos = o_pos; mlt_service_unlock( MLT_FILTER_SERVICE( filter ) ); return 0; }
static int ladspa_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { // Get the filter service mlt_filter filter = mlt_frame_pop_audio( frame ); // Get the filter properties mlt_properties filter_properties = MLT_FILTER_PROPERTIES( filter ); // Get the producer's audio *format = mlt_audio_float; mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples ); // Initialise LADSPA if needed jack_rack_t *jackrack = mlt_properties_get_data( filter_properties, "jackrack", NULL ); if ( jackrack == NULL ) { sample_rate = *frequency; // global inside jack_rack jackrack = initialise_jack_rack( filter_properties, *channels ); } // Get the filter-specific properties LADSPA_Data **input_buffers = mlt_pool_alloc( sizeof( LADSPA_Data* ) * *channels ); LADSPA_Data **output_buffers = mlt_pool_alloc( sizeof( LADSPA_Data* ) * *channels ); int i; for ( i = 0; i < *channels; i++ ) { input_buffers[i] = (LADSPA_Data*) *buffer + i * *samples; output_buffers[i] = (LADSPA_Data*) *buffer + i * *samples; } // Do LADSPA processing int error = jackrack && process_ladspa( jackrack->procinfo, *samples, input_buffers, output_buffers ); mlt_pool_release( input_buffers ); mlt_pool_release( output_buffers ); return error; }
static int transition_get_audio( mlt_frame frame_a, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { int error = 0; // Get the b frame from the stack mlt_frame frame_b = mlt_frame_pop_audio( frame_a ); // Get the effect mlt_transition transition = mlt_frame_pop_audio( frame_a ); // Get the properties of the b frame mlt_properties b_props = MLT_FRAME_PROPERTIES( frame_b ); transition_mix self = transition->child; int16_t *buffer_b, *buffer_a; int frequency_b = *frequency, frequency_a = *frequency; int channels_b = *channels, channels_a = *channels; int samples_b = *samples, samples_a = *samples; // We can only mix s16 *format = mlt_audio_s16; mlt_frame_get_audio( frame_b, (void**) &buffer_b, format, &frequency_b, &channels_b, &samples_b ); mlt_frame_get_audio( frame_a, (void**) &buffer_a, format, &frequency_a, &channels_a, &samples_a ); if ( buffer_b == buffer_a ) { *samples = samples_b; *channels = channels_b; *buffer = buffer_b; *frequency = frequency_b; return error; } int silent = mlt_properties_get_int( MLT_FRAME_PROPERTIES( frame_a ), "silent_audio" ); mlt_properties_set_int( MLT_FRAME_PROPERTIES( frame_a ), "silent_audio", 0 ); if ( silent ) memset( buffer_a, 0, samples_a * channels_a * sizeof( int16_t ) ); silent = mlt_properties_get_int( b_props, "silent_audio" ); mlt_properties_set_int( b_props, "silent_audio", 0 ); if ( silent ) memset( buffer_b, 0, samples_b * channels_b * sizeof( int16_t ) ); // determine number of samples to process *samples = MIN( self->src_buffer_count + samples_b, self->dest_buffer_count + samples_a ); *channels = MIN( MIN( channels_b, channels_a ), MAX_CHANNELS ); *frequency = frequency_a; // Prevent src buffer overflow by discarding oldest samples. samples_b = MIN( samples_b, MAX_SAMPLES * MAX_CHANNELS / channels_b ); size_t bytes = PCM16_BYTES( samples_b, channels_b ); if ( PCM16_BYTES( self->src_buffer_count + samples_b, channels_b ) > MAX_BYTES ) { mlt_log_verbose( MLT_TRANSITION_SERVICE(transition), "buffer overflow: src_buffer_count %d\n", self->src_buffer_count ); self->src_buffer_count = MAX_SAMPLES * MAX_CHANNELS / channels_b - samples_b; memmove( self->src_buffer, &self->src_buffer[MAX_SAMPLES * MAX_CHANNELS - samples_b * channels_b], PCM16_BYTES( samples_b, channels_b ) ); } // Buffer new src samples. memcpy( &self->src_buffer[self->src_buffer_count * channels_b], buffer_b, bytes ); self->src_buffer_count += samples_b; buffer_b = self->src_buffer; // Prevent dest buffer overflow by discarding oldest samples. samples_a = MIN( samples_a, MAX_SAMPLES * MAX_CHANNELS / channels_a ); bytes = PCM16_BYTES( samples_a, channels_a ); if ( PCM16_BYTES( self->dest_buffer_count + samples_a, channels_a ) > MAX_BYTES ) { mlt_log_verbose( MLT_TRANSITION_SERVICE(transition), "buffer overflow: dest_buffer_count %d\n", self->dest_buffer_count ); self->dest_buffer_count = MAX_SAMPLES * MAX_CHANNELS / channels_a - samples_a; memmove( self->dest_buffer, &self->dest_buffer[MAX_SAMPLES * MAX_CHANNELS - samples_a * channels_a], PCM16_BYTES( samples_a, channels_a ) ); } // Buffer the new dest samples. memcpy( &self->dest_buffer[self->dest_buffer_count * channels_a], buffer_a, bytes ); self->dest_buffer_count += samples_a; buffer_a = self->dest_buffer; // Do the mixing. if ( mlt_properties_get_int( MLT_TRANSITION_PROPERTIES(transition), "combine" ) ) { double weight = 1.0; if ( mlt_properties_get_int( MLT_FRAME_PROPERTIES( frame_a ), "meta.mixdown" ) ) weight = 1.0 - mlt_properties_get_double( MLT_FRAME_PROPERTIES( frame_a ), "meta.volume" ); combine_audio( weight, buffer_a, buffer_b, channels_a, channels_b, *channels, *samples ); } else { double mix_start = 0.5, mix_end = 0.5; if ( mlt_properties_get( b_props, "audio.previous_mix" ) ) mix_start = mlt_properties_get_double( b_props, "audio.previous_mix" ); if ( mlt_properties_get( b_props, "audio.mix" ) ) mix_end = mlt_properties_get_double( b_props, "audio.mix" ); if ( mlt_properties_get_int( b_props, "audio.reverse" ) ) { mix_start = 1.0 - mix_start; mix_end = 1.0 - mix_end; } mix_audio( mix_start, mix_end, buffer_a, buffer_b, channels_a, channels_b, *channels, *samples ); } // Copy the audio into the frame. bytes = PCM16_BYTES( *samples, *channels ); *buffer = mlt_pool_alloc( bytes ); memcpy( *buffer, buffer_a, bytes ); mlt_frame_set_audio( frame_a, *buffer, *format, bytes, mlt_pool_release ); if ( mlt_properties_get_int( b_props, "_speed" ) == 0 ) { // Flush the buffer when paused and scrubbing. samples_b = self->src_buffer_count; samples_a = self->dest_buffer_count; } else { // Determine the maximum amount of latency permitted in the buffer. int max_latency = CLAMP( *frequency / 1000, 0, MAX_SAMPLES ); // samples in 1ms // samples_b becomes the new target src buffer count. samples_b = CLAMP( self->src_buffer_count - *samples, 0, max_latency ); // samples_b becomes the number of samples to consume: difference between actual and the target. samples_b = self->src_buffer_count - samples_b; // samples_a becomes the new target dest buffer count. samples_a = CLAMP( self->dest_buffer_count - *samples, 0, max_latency ); // samples_a becomes the number of samples to consume: difference between actual and the target. samples_a = self->dest_buffer_count - samples_a; } // Consume the src buffer. self->src_buffer_count -= samples_b; if ( self->src_buffer_count ) { memmove( self->src_buffer, &self->src_buffer[samples_b * channels_b], PCM16_BYTES( self->src_buffer_count, channels_b )); } // Consume the dest buffer. self->dest_buffer_count -= samples_a; if ( self->dest_buffer_count ) { memmove( self->dest_buffer, &self->dest_buffer[samples_a * channels_a], PCM16_BYTES( self->dest_buffer_count, channels_a )); } return error; }
static int resample_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { // Get the filter service mlt_filter filter = mlt_frame_pop_audio( frame ); // Get the filter properties mlt_properties filter_properties = MLT_FILTER_PROPERTIES( filter ); mlt_service_lock( MLT_FILTER_SERVICE( filter ) ); // Get the resample information int output_rate = mlt_properties_get_int( filter_properties, "frequency" ); int16_t *sample_buffer = mlt_properties_get_data( filter_properties, "buffer", NULL ); // Obtain the resample context if it exists ReSampleContext *resample = mlt_properties_get_data( filter_properties, "audio_resample", NULL ); // If no resample frequency is specified, default to requested value if ( output_rate == 0 ) output_rate = *frequency; // Get the producer's audio int error = mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples ); if ( error ) return error; // Return now if no work to do if ( output_rate != *frequency ) { // Will store number of samples created int used = 0; mlt_log_debug( MLT_FILTER_SERVICE(filter), "channels %d samples %d frequency %d -> %d\n", *channels, *samples, *frequency, output_rate ); // Do not convert to s16 unless we need to change the rate if ( *format != mlt_audio_s16 ) { *format = mlt_audio_s16; mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples ); } // Create a resampler if nececessary if ( resample == NULL || *frequency != mlt_properties_get_int( filter_properties, "last_frequency" ) ) { // Create the resampler resample = av_audio_resample_init( *channels, *channels, output_rate, *frequency, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16, 16, 10, 0, 0.8 ); // And store it on properties mlt_properties_set_data( filter_properties, "audio_resample", resample, 0, ( mlt_destructor )audio_resample_close, NULL ); // And remember what it was created for mlt_properties_set_int( filter_properties, "last_frequency", *frequency ); } mlt_service_unlock( MLT_FILTER_SERVICE( filter ) ); // Resample the audio used = audio_resample( resample, sample_buffer, *buffer, *samples ); int size = used * *channels * sizeof( int16_t ); // Resize if necessary if ( used > *samples ) { *buffer = mlt_pool_realloc( *buffer, size ); mlt_frame_set_audio( frame, *buffer, *format, size, mlt_pool_release ); } // Copy samples memcpy( *buffer, sample_buffer, size ); // Update output variables *samples = used; *frequency = output_rate; } else { mlt_service_unlock( MLT_FILTER_SERVICE( filter ) ); } return error; }
static int filter_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { // Get the filter from the frame mlt_filter this = mlt_frame_pop_audio( frame ); // Get the properties from the filter mlt_properties filter_props = MLT_FILTER_PROPERTIES( this ); // Get the frame's filter instance properties mlt_properties instance_props = mlt_frame_unique_properties( frame, MLT_FILTER_SERVICE( this ) ); // Get the parameters double gain = mlt_properties_get_double( instance_props, "gain" ); double max_gain = mlt_properties_get_double( instance_props, "max_gain" ); double limiter_level = 0.5; /* -6 dBFS */ int normalise = mlt_properties_get_int( instance_props, "normalise" ); double amplitude = mlt_properties_get_double( instance_props, "amplitude" ); int i, j; double sample; int16_t peak; if ( mlt_properties_get( instance_props, "limiter" ) != NULL ) limiter_level = mlt_properties_get_double( instance_props, "limiter" ); // Get the producer's audio *format = mlt_audio_s16; mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples ); // fprintf( stderr, "filter_volume: frequency %d\n", *frequency ); // Determine numeric limits int bytes_per_samp = (samp_width - 1) / 8 + 1; int samplemax = (1 << (bytes_per_samp * 8 - 1)) - 1; int samplemin = -samplemax - 1; mlt_service_lock( MLT_FILTER_SERVICE( this ) ); if ( normalise ) { int window = mlt_properties_get_int( filter_props, "window" ); double *smooth_buffer = mlt_properties_get_data( filter_props, "smooth_buffer", NULL ); if ( window > 0 && smooth_buffer != NULL ) { int smooth_index = mlt_properties_get_int( filter_props, "_smooth_index" ); // Compute the signal power and put into smoothing buffer smooth_buffer[ smooth_index ] = signal_max_power( *buffer, *channels, *samples, &peak ); // fprintf( stderr, "filter_volume: raw power %f ", smooth_buffer[ smooth_index ] ); if ( smooth_buffer[ smooth_index ] > EPSILON ) { mlt_properties_set_int( filter_props, "_smooth_index", ( smooth_index + 1 ) % window ); // Smooth the data and compute the gain // fprintf( stderr, "smoothed %f over %d frames\n", get_smoothed_data( smooth_buffer, window ), window ); gain *= amplitude / get_smoothed_data( smooth_buffer, window ); } } else { gain *= amplitude / signal_max_power( (int16_t*) *buffer, *channels, *samples, &peak ); } } // if ( gain > 1.0 && normalise ) // fprintf(stderr, "filter_volume: limiter level %f gain %f\n", limiter_level, gain ); if ( max_gain > 0 && gain > max_gain ) gain = max_gain; // Initialise filter's previous gain value to prevent an inadvertant jump from 0 mlt_position last_position = mlt_properties_get_position( filter_props, "_last_position" ); mlt_position current_position = mlt_frame_get_position( frame ); if ( mlt_properties_get( filter_props, "_previous_gain" ) == NULL || current_position != last_position + 1 ) mlt_properties_set_double( filter_props, "_previous_gain", gain ); // Start the gain out at the previous double previous_gain = mlt_properties_get_double( filter_props, "_previous_gain" ); // Determine ramp increment double gain_step = ( gain - previous_gain ) / *samples; // fprintf( stderr, "filter_volume: previous gain %f current gain %f step %f\n", previous_gain, gain, gain_step ); // Save the current gain for the next iteration mlt_properties_set_double( filter_props, "_previous_gain", gain ); mlt_properties_set_position( filter_props, "_last_position", current_position ); mlt_service_unlock( MLT_FILTER_SERVICE( this ) ); // Ramp from the previous gain to the current gain = previous_gain; int16_t *p = (int16_t*) *buffer; // Apply the gain for ( i = 0; i < *samples; i++ ) { for ( j = 0; j < *channels; j++ ) { sample = *p * gain; *p = ROUND( sample ); if ( gain > 1.0 ) { /* use limiter function instead of clipping */ if ( normalise ) *p = ROUND( samplemax * limiter( sample / (double) samplemax, limiter_level ) ); /* perform clipping */ else if ( sample > samplemax ) *p = samplemax; else if ( sample < samplemin ) *p = samplemin; } p++; } gain += gain_step; } return 0; }
static void foreach_consumer_put( mlt_consumer consumer, mlt_frame frame ) { mlt_properties properties = MLT_CONSUMER_PROPERTIES( consumer ); mlt_consumer nested = NULL; char key[30]; int index = 0; do { snprintf( key, sizeof(key), "%d.consumer", index++ ); nested = mlt_properties_get_data( properties, key, NULL ); if ( nested ) { mlt_properties nested_props = MLT_CONSUMER_PROPERTIES(nested); double self_fps = mlt_properties_get_double( properties, "fps" ); double nested_fps = mlt_properties_get_double( nested_props, "fps" ); mlt_position nested_pos = mlt_properties_get_position( nested_props, "_multi_position" ); mlt_position self_pos = mlt_frame_get_position( frame ); double self_time = self_pos / self_fps; double nested_time = nested_pos / nested_fps; // get the audio for the current frame uint8_t *buffer = NULL; mlt_audio_format format = mlt_audio_s16; int channels = mlt_properties_get_int( properties, "channels" ); int frequency = mlt_properties_get_int( properties, "frequency" ); int current_samples = mlt_sample_calculator( self_fps, frequency, self_pos ); mlt_frame_get_audio( frame, (void**) &buffer, &format, &frequency, &channels, ¤t_samples ); int current_size = mlt_audio_format_size( format, current_samples, channels ); // get any leftover audio int prev_size = 0; uint8_t *prev_buffer = mlt_properties_get_data( nested_props, "_multi_audio", &prev_size ); uint8_t *new_buffer = NULL; if ( prev_size > 0 ) { new_buffer = mlt_pool_alloc( prev_size + current_size ); memcpy( new_buffer, prev_buffer, prev_size ); memcpy( new_buffer + prev_size, buffer, current_size ); buffer = new_buffer; } current_size += prev_size; current_samples += mlt_properties_get_int( nested_props, "_multi_samples" ); while ( nested_time <= self_time ) { // put ideal number of samples into cloned frame int deeply = index > 1 ? 1 : 0; mlt_frame clone_frame = mlt_frame_clone( frame, deeply ); int nested_samples = mlt_sample_calculator( nested_fps, frequency, nested_pos ); // -10 is an optimization to avoid tiny amounts of leftover samples nested_samples = nested_samples > current_samples - 10 ? current_samples : nested_samples; int nested_size = mlt_audio_format_size( format, nested_samples, channels ); if ( nested_size > 0 ) { prev_buffer = mlt_pool_alloc( nested_size ); memcpy( prev_buffer, buffer, nested_size ); } else { prev_buffer = NULL; nested_size = 0; } mlt_frame_set_audio( clone_frame, prev_buffer, format, nested_size, mlt_pool_release ); mlt_properties_set_int( MLT_FRAME_PROPERTIES(clone_frame), "audio_samples", nested_samples ); mlt_properties_set_int( MLT_FRAME_PROPERTIES(clone_frame), "audio_frequency", frequency ); mlt_properties_set_int( MLT_FRAME_PROPERTIES(clone_frame), "audio_channels", channels ); // chomp the audio current_samples -= nested_samples; current_size -= nested_size; buffer += nested_size; // send frame to nested consumer mlt_consumer_put_frame( nested, clone_frame ); mlt_properties_set_position( nested_props, "_multi_position", ++nested_pos ); nested_time = nested_pos / nested_fps; } // save any remaining audio if ( current_size > 0 ) { prev_buffer = mlt_pool_alloc( current_size ); memcpy( prev_buffer, buffer, current_size ); } else { prev_buffer = NULL; current_size = 0; } mlt_pool_release( new_buffer ); mlt_properties_set_data( nested_props, "_multi_audio", prev_buffer, current_size, mlt_pool_release, NULL ); mlt_properties_set_int( nested_props, "_multi_samples", current_samples ); } } while ( nested ); }
static void *consumer_read_ahead_thread( void *arg ) { // The argument is the consumer mlt_consumer self = arg; // Get the properties of the consumer mlt_properties properties = MLT_CONSUMER_PROPERTIES( self ); // Get the width and height int width = mlt_properties_get_int( properties, "width" ); int height = mlt_properties_get_int( properties, "height" ); // See if video is turned off int video_off = mlt_properties_get_int( properties, "video_off" ); int preview_off = mlt_properties_get_int( properties, "preview_off" ); int preview_format = mlt_properties_get_int( properties, "preview_format" ); // Get the audio settings mlt_audio_format afmt = mlt_audio_s16; const char *format = mlt_properties_get( properties, "mlt_audio_format" ); if ( format ) { if ( !strcmp( format, "none" ) ) afmt = mlt_audio_none; else if ( !strcmp( format, "s32" ) ) afmt = mlt_audio_s32; else if ( !strcmp( format, "s32le" ) ) afmt = mlt_audio_s32le; else if ( !strcmp( format, "float" ) ) afmt = mlt_audio_float; else if ( !strcmp( format, "f32le" ) ) afmt = mlt_audio_f32le; else if ( !strcmp( format, "u8" ) ) afmt = mlt_audio_u8; } int counter = 0; double fps = mlt_properties_get_double( properties, "fps" ); int channels = mlt_properties_get_int( properties, "channels" ); int frequency = mlt_properties_get_int( properties, "frequency" ); int samples = 0; void *audio = NULL; // See if audio is turned off int audio_off = mlt_properties_get_int( properties, "audio_off" ); // Get the maximum size of the buffer int buffer = mlt_properties_get_int( properties, "buffer" ) + 1; // General frame variable mlt_frame frame = NULL; uint8_t *image = NULL; // Time structures struct timeval ante; // Average time for get_frame and get_image int count = 0; int skipped = 0; int64_t time_process = 0; int skip_next = 0; mlt_position pos = 0; mlt_position start_pos = 0; mlt_position last_pos = 0; int frame_duration = mlt_properties_get_int( properties, "frame_duration" ); int drop_max = mlt_properties_get_int( properties, "drop_max" ); if ( preview_off && preview_format != 0 ) self->format = preview_format; // Get the first frame frame = mlt_consumer_get_frame( self ); if ( frame ) { // Get the image of the first frame if ( !video_off ) { mlt_events_fire( MLT_CONSUMER_PROPERTIES( self ), "consumer-frame-render", frame, NULL ); mlt_frame_get_image( frame, &image, &self->format, &width, &height, 0 ); } if ( !audio_off ) { samples = mlt_sample_calculator( fps, frequency, counter++ ); mlt_frame_get_audio( frame, &audio, &afmt, &frequency, &channels, &samples ); } // Mark as rendered mlt_properties_set_int( MLT_FRAME_PROPERTIES( frame ), "rendered", 1 ); last_pos = start_pos = pos = mlt_frame_get_position( frame ); } // Get the starting time (can ignore the times above) gettimeofday( &ante, NULL ); // Continue to read ahead while ( self->ahead ) { // Put the current frame into the queue pthread_mutex_lock( &self->queue_mutex ); while( self->ahead && mlt_deque_count( self->queue ) >= buffer ) pthread_cond_wait( &self->queue_cond, &self->queue_mutex ); mlt_deque_push_back( self->queue, frame ); pthread_cond_broadcast( &self->queue_cond ); pthread_mutex_unlock( &self->queue_mutex ); // Get the next frame frame = mlt_consumer_get_frame( self ); // If there's no frame, we're probably stopped... if ( frame == NULL ) continue; pos = mlt_frame_get_position( frame ); // Increment the counter used for averaging processing cost count ++; // All non-normal playback frames should be shown if ( mlt_properties_get_int( MLT_FRAME_PROPERTIES( frame ), "_speed" ) != 1 ) { #ifdef DEINTERLACE_ON_NOT_NORMAL_SPEED mlt_properties_set_int( MLT_FRAME_PROPERTIES( frame ), "consumer_deinterlace", 1 ); #endif // Indicate seeking or trick-play start_pos = pos; } // If skip flag not set or frame-dropping disabled if ( !skip_next || self->real_time == -1 ) { if ( !video_off ) { // Reset width/height - could have been changed by previous mlt_frame_get_image width = mlt_properties_get_int( properties, "width" ); height = mlt_properties_get_int( properties, "height" ); // Get the image mlt_events_fire( MLT_CONSUMER_PROPERTIES( self ), "consumer-frame-render", frame, NULL ); mlt_frame_get_image( frame, &image, &self->format, &width, &height, 0 ); } // Indicate the rendered image is available. mlt_properties_set_int( MLT_FRAME_PROPERTIES( frame ), "rendered", 1 ); // Reset consecutively-skipped counter skipped = 0; } else // Skip image processing { // Increment the number of consecutively-skipped frames skipped++; // If too many (1 sec) consecutively-skipped frames if ( skipped > drop_max ) { // Reset cost tracker time_process = 0; count = 1; mlt_log_verbose( self, "too many frames dropped - forcing next frame\n" ); } } // Always process audio if ( !audio_off ) { samples = mlt_sample_calculator( fps, frequency, counter++ ); mlt_frame_get_audio( frame, &audio, &afmt, &frequency, &channels, &samples ); } // Get the time to process this frame int64_t time_current = time_difference( &ante ); // If the current time is not suddenly some large amount if ( time_current < time_process / count * 20 || !time_process || count < 5 ) { // Accumulate the cost for processing this frame time_process += time_current; } else { mlt_log_debug( self, "current %"PRId64" threshold %"PRId64" count %d\n", time_current, (int64_t) (time_process / count * 20), count ); // Ignore the cost of this frame's time count--; } // Determine if we started, resumed, or seeked if ( pos != last_pos + 1 ) start_pos = pos; last_pos = pos; // Do not skip the first 20% of buffer at start, resume, or seek if ( pos - start_pos <= buffer / 5 + 1 ) { // Reset cost tracker time_process = 0; count = 1; } // Reset skip flag skip_next = 0; // Only consider skipping if the buffer level is low (or really small) if ( mlt_deque_count( self->queue ) <= buffer / 5 + 1 ) { // Skip next frame if average cost exceeds frame duration. if ( time_process / count > frame_duration ) skip_next = 1; if ( skip_next ) mlt_log_debug( self, "avg usec %"PRId64" (%"PRId64"/%d) duration %d\n", time_process/count, time_process, count, frame_duration); } } // Remove the last frame mlt_frame_close( frame ); return NULL; }
static int filter_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { // Get the properties of the a frame mlt_properties properties = MLT_FRAME_PROPERTIES( frame ); // Get the filter service mlt_filter filter = mlt_frame_pop_audio( frame ); int from = mlt_properties_get_int( properties, "channelcopy.from" ); int to = mlt_properties_get_int( properties, "channelcopy.to" ); int swap = mlt_properties_get_int( properties, "channelcopy.swap" ); // Get the producer's audio mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples ); // Copy channels as necessary if ( from != to) switch ( *format ) { case mlt_audio_u8: { uint8_t *f = (uint8_t*) *buffer + from; uint8_t *t = (uint8_t*) *buffer + to; uint8_t x; int i; if ( swap ) for ( i = 0; i < *samples; i++, f += *channels, t += *channels ) { x = *t; *t = *f; *f = x; } else for ( i = 0; i < *samples; i++, f += *channels, t += *channels ) *t = *f; break; } case mlt_audio_s16: { int16_t *f = (int16_t*) *buffer + from; int16_t *t = (int16_t*) *buffer + to; int16_t x; int i; if ( swap ) for ( i = 0; i < *samples; i++, f += *channels, t += *channels ) { x = *t; *t = *f; *f = x; } else for ( i = 0; i < *samples; i++, f += *channels, t += *channels ) *t = *f; break; } case mlt_audio_s32: { int32_t *f = (int32_t*) *buffer + from * *samples; int32_t *t = (int32_t*) *buffer + to * *samples; if ( swap ) { int32_t *x = malloc( *samples * sizeof(int32_t) ); memcpy( x, t, *samples * sizeof(int32_t) ); memcpy( t, f, *samples * sizeof(int32_t) ); memcpy( f, x, *samples * sizeof(int32_t) ); free( x ); } else { memcpy( t, f, *samples * sizeof(int32_t) ); } break; } case mlt_audio_s32le: case mlt_audio_f32le: { int32_t *f = (int32_t*) *buffer + from; int32_t *t = (int32_t*) *buffer + to; int32_t x; int i; if ( swap ) for ( i = 0; i < *samples; i++, f += *channels, t += *channels ) { x = *t; *t = *f; *f = x; } else for ( i = 0; i < *samples; i++, f += *channels, t += *channels ) *t = *f; break; } case mlt_audio_float: { float *f = (float*) *buffer + from * *samples; float *t = (float*) *buffer + to * *samples; if ( swap ) { float *x = malloc( *samples * sizeof(float) ); memcpy( x, t, *samples * sizeof(float) ); memcpy( t, f, *samples * sizeof(float) ); memcpy( f, x, *samples * sizeof(float) ); free( x ); } else { memcpy( t, f, *samples * sizeof(float) ); } break; } default: mlt_log_error( MLT_FILTER_SERVICE( filter ), "Invalid audio format\n" ); break; } return 0; }
static int get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { return mlt_frame_get_audio( frame, (void**) buffer, format, frequency, channels, samples ); }
static int ladspa_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { int error = 0; // Get the filter service mlt_filter filter = mlt_frame_pop_audio( frame ); // Get the filter properties mlt_properties filter_properties = MLT_FILTER_PROPERTIES( filter ); // Check if the channel configuration has changed int prev_channels = mlt_properties_get_int( filter_properties, "_prev_channels" ); if ( prev_channels != *channels ) { if( prev_channels ) { mlt_log_info( MLT_FILTER_SERVICE(filter), "Channel configuration changed. Old: %d New: %d.\n", prev_channels, *channels ); mlt_properties_set_data( filter_properties, "jackrack", NULL, 0, (mlt_destructor) NULL, NULL ); } mlt_properties_set_int( filter_properties, "_prev_channels", *channels ); } // Initialise LADSPA if needed jack_rack_t *jackrack = mlt_properties_get_data( filter_properties, "jackrack", NULL ); if ( jackrack == NULL ) { sample_rate = *frequency; // global inside jack_rack jackrack = initialise_jack_rack( filter_properties, *channels ); } if ( jackrack && jackrack->procinfo && jackrack->procinfo->chain && mlt_properties_get_int64( filter_properties, "_pluginid" ) ) { plugin_t *plugin = jackrack->procinfo->chain; LADSPA_Data value; int i, c; mlt_position position = mlt_filter_get_position( filter, frame ); mlt_position length = mlt_filter_get_length2( filter, frame ); // Get the producer's audio *format = mlt_audio_float; mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples ); // Resize the buffer if necessary. if ( *channels < jackrack->channels ) { // Add extra channels to satisfy the plugin. // Extra channels in the buffer will be ignored by downstream services. int old_size = mlt_audio_format_size( *format, *samples, *channels ); int new_size = mlt_audio_format_size( *format, *samples, jackrack->channels ); uint8_t* new_buffer = mlt_pool_alloc( new_size ); memcpy( new_buffer, *buffer, old_size ); // Put silence in extra channels. memset( new_buffer + old_size, 0, new_size - old_size ); mlt_frame_set_audio( frame, new_buffer, *format, new_size, mlt_pool_release ); *buffer = new_buffer; } for ( i = 0; i < plugin->desc->control_port_count; i++ ) { // Apply the control port values char key[20]; value = plugin_desc_get_default_control_value( plugin->desc, i, sample_rate ); snprintf( key, sizeof(key), "%d", i ); if ( mlt_properties_get( filter_properties, key ) ) value = mlt_properties_anim_get_double( filter_properties, key, position, length ); for ( c = 0; c < plugin->copies; c++ ) plugin->holders[c].control_memory[i] = value; } plugin->wet_dry_enabled = mlt_properties_get( filter_properties, "wetness" ) != NULL; if ( plugin->wet_dry_enabled ) { value = mlt_properties_anim_get_double( filter_properties, "wetness", position, length ); for ( c = 0; c < jackrack->channels; c++ ) plugin->wet_dry_values[c] = value; } // Configure the buffers LADSPA_Data **input_buffers = mlt_pool_alloc( sizeof( LADSPA_Data* ) * jackrack->channels ); LADSPA_Data **output_buffers = mlt_pool_alloc( sizeof( LADSPA_Data* ) * jackrack->channels ); // Some plugins crash with too many frames (samples). // So, feed the plugin with N samples per loop iteration. int samples_offset = 0; int sample_count = MIN(*samples, MAX_SAMPLE_COUNT); for (i = 0; samples_offset < *samples; i++) { int j = 0; for (; j < jackrack->channels; j++) output_buffers[j] = input_buffers[j] = (LADSPA_Data*) *buffer + j * (*samples) + samples_offset; sample_count = MIN(*samples - samples_offset, MAX_SAMPLE_COUNT); // Do LADSPA processing error = process_ladspa( jackrack->procinfo, sample_count, input_buffers, output_buffers ); samples_offset += MAX_SAMPLE_COUNT; } mlt_pool_release( input_buffers ); mlt_pool_release( output_buffers ); // read the status port values for ( i = 0; i < plugin->desc->status_port_count; i++ ) { char key[20]; int p = plugin->desc->status_port_indicies[i]; for ( c = 0; c < plugin->copies; c++ ) { snprintf( key, sizeof(key), "%d[%d]", p, c ); value = plugin->holders[c].status_memory[i]; mlt_properties_set_double( filter_properties, key, value ); } } } else { // Nothing to do. error = mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples ); } return error; }
static int filter_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { // Get the filter from the frame mlt_filter filter = mlt_frame_pop_audio( frame ); // Get the properties from the filter mlt_properties filter_props = MLT_FILTER_PROPERTIES( filter ); // Get the frame's filter instance properties mlt_properties instance_props = mlt_frame_unique_properties( frame, MLT_FILTER_SERVICE( filter ) ); // Get the parameters double gain = mlt_properties_get_double( instance_props, "gain" ); double max_gain = mlt_properties_get_double( instance_props, "max_gain" ); double limiter_level = 0.5; /* -6 dBFS */ int normalise = mlt_properties_get_int( instance_props, "normalise" ); double amplitude = mlt_properties_get_double( instance_props, "amplitude" ); int i, j; double sample; int16_t peak; // Use animated value for gain if "level" property is set char* level_property = mlt_properties_get( filter_props, "level" ); if ( level_property != NULL ) { mlt_position position = mlt_filter_get_position( filter, frame ); mlt_position length = mlt_filter_get_length2( filter, frame ); gain = mlt_properties_anim_get_double( filter_props, "level", position, length ); gain = DBFSTOAMP( gain ); } if ( mlt_properties_get( instance_props, "limiter" ) != NULL ) limiter_level = mlt_properties_get_double( instance_props, "limiter" ); // Get the producer's audio *format = normalise? mlt_audio_s16 : mlt_audio_f32le; mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples ); mlt_service_lock( MLT_FILTER_SERVICE( filter ) ); if ( normalise ) { int window = mlt_properties_get_int( filter_props, "window" ); double *smooth_buffer = mlt_properties_get_data( filter_props, "smooth_buffer", NULL ); if ( window > 0 && smooth_buffer != NULL ) { int smooth_index = mlt_properties_get_int( filter_props, "_smooth_index" ); // Compute the signal power and put into smoothing buffer smooth_buffer[ smooth_index ] = signal_max_power( *buffer, *channels, *samples, &peak ); if ( smooth_buffer[ smooth_index ] > EPSILON ) { mlt_properties_set_int( filter_props, "_smooth_index", ( smooth_index + 1 ) % window ); // Smooth the data and compute the gain gain *= amplitude / get_smoothed_data( smooth_buffer, window ); } } else { gain *= amplitude / signal_max_power( *buffer, *channels, *samples, &peak ); } } if ( max_gain > 0 && gain > max_gain ) gain = max_gain; // Initialise filter's previous gain value to prevent an inadvertant jump from 0 mlt_position last_position = mlt_properties_get_position( filter_props, "_last_position" ); mlt_position current_position = mlt_frame_get_position( frame ); if ( mlt_properties_get( filter_props, "_previous_gain" ) == NULL || current_position != last_position + 1 ) mlt_properties_set_double( filter_props, "_previous_gain", gain ); // Start the gain out at the previous double previous_gain = mlt_properties_get_double( filter_props, "_previous_gain" ); // Determine ramp increment double gain_step = ( gain - previous_gain ) / *samples; // Save the current gain for the next iteration mlt_properties_set_double( filter_props, "_previous_gain", gain ); mlt_properties_set_position( filter_props, "_last_position", current_position ); mlt_service_unlock( MLT_FILTER_SERVICE( filter ) ); // Ramp from the previous gain to the current gain = previous_gain; // Apply the gain if ( normalise ) { int16_t *p = *buffer; // Determine numeric limits int bytes_per_samp = (samp_width - 1) / 8 + 1; int samplemax = (1 << (bytes_per_samp * 8 - 1)) - 1; for ( i = 0; i < *samples; i++, gain += gain_step ) { for ( j = 0; j < *channels; j++ ) { sample = *p * gain; *p = ROUND( sample ); if ( gain > 1.0 && normalise ) { /* use limiter function instead of clipping */ *p = ROUND( samplemax * limiter( sample / (double) samplemax, limiter_level ) ); } p++; } } } else { float *p = *buffer; for ( i = 0; i < *samples; i++, gain += gain_step ) { for ( j = 0; j < *channels; j++, p++ ) { p[0] *= gain; } } } return 0; }
static int consumer_play_audio( consumer_sdl self, mlt_frame frame, int init_audio, int *duration ) { // Get the properties of self consumer mlt_properties properties = self->properties; mlt_audio_format afmt = mlt_audio_s16; // Set the preferred params of the test card signal int channels = mlt_properties_get_int( properties, "channels" ); int frequency = mlt_properties_get_int( properties, "frequency" ); int scrub = mlt_properties_get_int( properties, "scrub_audio" ); static int counter = 0; int samples = mlt_sample_calculator( mlt_properties_get_double( self->properties, "fps" ), frequency, counter++ ); int16_t *pcm; mlt_frame_get_audio( frame, (void**) &pcm, &afmt, &frequency, &channels, &samples ); *duration = ( ( samples * 1000 ) / frequency ); pcm += mlt_properties_get_int( properties, "audio_offset" ); if ( mlt_properties_get_int( properties, "audio_off" ) ) { self->playing = 1; init_audio = 1; return init_audio; } if ( init_audio == 1 ) { SDL_AudioSpec request; SDL_AudioSpec got; SDL_AudioDeviceID dev; int audio_buffer = mlt_properties_get_int( properties, "audio_buffer" ); // specify audio format memset( &request, 0, sizeof( SDL_AudioSpec ) ); self->playing = 0; request.freq = frequency; request.format = AUDIO_S16SYS; request.channels = mlt_properties_get_int( properties, "channels" ); request.samples = audio_buffer; request.callback = sdl_fill_audio; request.userdata = (void *)self; dev = sdl2_open_audio( &request, &got ); if( dev == 0 ) { mlt_log_error( MLT_CONSUMER_SERVICE( self ), "SDL failed to open audio\n" ); init_audio = 2; } else { if( got.channels != request.channels ) { mlt_log_info( MLT_CONSUMER_SERVICE( self ), "Unable to output %d channels. Change to %d\n", request.channels, got.channels ); } mlt_log_info( MLT_CONSUMER_SERVICE( self ), "Audio Opened: driver=%s channels=%d frequency=%d\n", SDL_GetCurrentAudioDriver(), got.channels, got.freq ); SDL_PauseAudioDevice( dev, 0 ); init_audio = 0; self->out_channels = got.channels; } } if ( init_audio == 0 ) { mlt_properties properties = MLT_FRAME_PROPERTIES( frame ); int samples_copied = 0; int dst_stride = self->out_channels * sizeof( *pcm ); pthread_mutex_lock( &self->audio_mutex ); while ( self->running && samples_copied < samples ) { int sample_space = ( sizeof( self->audio_buffer ) - self->audio_avail ) / dst_stride; while ( self->running && sample_space == 0 ) { pthread_cond_wait( &self->audio_cond, &self->audio_mutex ); sample_space = ( sizeof( self->audio_buffer ) - self->audio_avail ) / dst_stride; } if ( self->running ) { int samples_to_copy = samples - samples_copied; if ( samples_to_copy > sample_space ) { samples_to_copy = sample_space; } int dst_bytes = samples_to_copy * dst_stride; if ( scrub || mlt_properties_get_double( properties, "_speed" ) == 1 ) { if ( channels == self->out_channels ) { memcpy( &self->audio_buffer[ self->audio_avail ], pcm, dst_bytes ); pcm += samples_to_copy * channels; } else { int16_t *dest = (int16_t*) &self->audio_buffer[ self->audio_avail ]; int i = samples_to_copy + 1; while ( --i ) { memcpy( dest, pcm, dst_stride ); pcm += channels; dest += self->out_channels; } } } else { memset( &self->audio_buffer[ self->audio_avail ], 0, dst_bytes ); pcm += samples_to_copy * channels; } self->audio_avail += dst_bytes; samples_copied += samples_to_copy; } pthread_cond_broadcast( &self->audio_cond ); } pthread_mutex_unlock( &self->audio_mutex ); } else { self->playing = 1; } return init_audio; }
static int filter_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { mlt_properties properties = mlt_frame_pop_audio( frame ); mlt_filter filter = mlt_frame_pop_audio( frame ); mlt_properties filter_props = MLT_FILTER_PROPERTIES( filter ); mlt_properties frame_props = MLT_FRAME_PROPERTIES( frame ); // We can only mix s16 *format = mlt_audio_s16; mlt_frame_get_audio( frame, (void**) buffer, format, frequency, channels, samples ); // Apply silence int silent = mlt_properties_get_int( frame_props, "silent_audio" ); mlt_properties_set_int( frame_props, "silent_audio", 0 ); if ( silent ) memset( *buffer, 0, *samples * *channels * sizeof( int16_t ) ); int src_size = 0; int16_t *src = mlt_properties_get_data( filter_props, "scratch_buffer", &src_size ); int16_t *dest = *buffer; double v; // sample accumulator int i, out, in; double factors[6][6]; // mixing weights [in][out] double mix_start = 0.5, mix_end = 0.5; if ( mlt_properties_get( properties, "previous_mix" ) != NULL ) mix_start = mlt_properties_get_double( properties, "previous_mix" ); if ( mlt_properties_get( properties, "mix" ) != NULL ) mix_end = mlt_properties_get_double( properties, "mix" ); double weight = mix_start; double weight_step = ( mix_end - mix_start ) / *samples; int active_channel = mlt_properties_get_int( properties, "channel" ); int gang = mlt_properties_get_int( properties, "gang" ) ? 2 : 1; // Use an inline low-pass filter to help avoid clipping double Fc = 0.5; double B = exp(-2.0 * M_PI * Fc); double A = 1.0 - B; double vp[6]; // Setup or resize a scratch buffer if ( !src || src_size < *samples * *channels * sizeof(int16_t) ) { // We allocate 4 more samples than we need to deal with jitter in the sample count per frame. src_size = ( *samples + 4 ) * *channels * sizeof(int16_t); src = mlt_pool_alloc( src_size ); if ( !src ) return 0; mlt_properties_set_data( filter_props, "scratch_buffer", src, src_size, mlt_pool_release, NULL ); } // We must use a pristine copy as the source memcpy( src, *buffer, *samples * *channels * sizeof(int16_t) ); // Initialize the mix factors for ( i = 0; i < 6; i++ ) for ( out = 0; out < 6; out++ ) factors[i][out] = 0.0; for ( out = 0; out < *channels; out++ ) vp[out] = (double) dest[out]; for ( i = 0; i < *samples; i++ ) { // Recompute the mix factors switch ( active_channel ) { case -1: // Front L/R balance case -2: // Rear L/R balance { // Gang front/rear balance if requested int g, active = active_channel; for ( g = 0; g < gang; g++, active-- ) { int left = active == -1 ? 0 : 2; int right = left + 1; if ( weight < 0.0 ) { factors[left][left] = 1.0; factors[right][right] = weight + 1.0 < 0.0 ? 0.0 : weight + 1.0; } else { factors[left][left] = 1.0 - weight < 0.0 ? 0.0 : 1.0 - weight; factors[right][right] = 1.0; } } break; } case -3: // Left fade case -4: // right fade { // Gang left/right fade if requested int g, active = active_channel; for ( g = 0; g < gang; g++, active-- ) { int front = active == -3 ? 0 : 1; int rear = front + 2; if ( weight < 0.0 ) { factors[front][front] = 1.0; factors[rear][rear] = weight + 1.0 < 0.0 ? 0.0 : weight + 1.0; } else { factors[front][front] = 1.0 - weight < 0.0 ? 0.0 : 1.0 - weight; factors[rear][rear] = 1.0; } } break; } case 0: // left case 2: { int left = active_channel; int right = left + 1; factors[right][right] = 1.0; if ( weight < 0.0 ) // output left toward left { factors[left][left] = 0.5 - weight * 0.5; factors[left][right] = ( 1.0 + weight ) * 0.5; } else // output left toward right { factors[left][left] = ( 1.0 - weight ) * 0.5; factors[left][right] = 0.5 + weight * 0.5; } break; } case 1: // right case 3: { int right = active_channel; int left = right - 1; factors[left][left] = 1.0; if ( weight < 0.0 ) // output right toward left { factors[right][left] = 0.5 - weight * 0.5; factors[right][right] = ( 1.0 + weight ) * 0.5; } else // output right toward right { factors[right][left] = ( 1.0 - weight ) * 0.5; factors[right][right] = 0.5 + weight * 0.5; } break; } } // Do the mixing for ( out = 0; out < *channels && out < 6; out++ ) { v = 0; for ( in = 0; in < *channels && in < 6; in++ ) v += factors[in][out] * src[ i * *channels + in ]; v = v < -32767 ? -32767 : v > 32768 ? 32768 : v; vp[out] = dest[ i * *channels + out ] = (int16_t) ( v * A + vp[ out ] * B ); } weight += weight_step; } return 0; }
static int resample_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { // Get the filter service mlt_filter filter = mlt_frame_pop_audio( frame ); // Get the filter properties mlt_properties filter_properties = MLT_FILTER_PROPERTIES( filter ); // Get the resample information int output_rate = mlt_properties_get_int( filter_properties, "frequency" ); // If no resample frequency is specified, default to requested value if ( output_rate == 0 ) output_rate = *frequency; // Get the producer's audio int error = mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples ); if ( error ) return error; // Return now if no work to do if ( output_rate != *frequency && *frequency > 0 && *channels > 0 ) { mlt_log_debug( MLT_FILTER_SERVICE(filter), "channels %d samples %d frequency %d -> %d\n", *channels, *samples, *frequency, output_rate ); // Do not convert to float unless we need to change the rate if ( *format != mlt_audio_f32le ) frame->convert_audio( frame, buffer, format, mlt_audio_f32le ); mlt_service_lock( MLT_FILTER_SERVICE(filter) ); SRC_DATA data; data.data_in = *buffer; data.data_out = mlt_properties_get_data( filter_properties, "output_buffer", NULL ); data.src_ratio = ( float ) output_rate / ( float ) *frequency; data.input_frames = *samples; data.output_frames = BUFFER_LEN / *channels; data.end_of_input = 0; SRC_STATE *state = mlt_properties_get_data( filter_properties, "state", NULL ); if ( !state || mlt_properties_get_int( filter_properties, "channels" ) != *channels ) { // Recreate the resampler if the number of channels changed state = src_new( RESAMPLE_TYPE, *channels, &error ); mlt_properties_set_data( filter_properties, "state", state, 0, (mlt_destructor) src_delete, NULL ); mlt_properties_set_int( filter_properties, "channels", *channels ); } // Resample the audio error = src_process( state, &data ); if ( !error ) { // Update output variables *samples = data.output_frames_gen; *frequency = output_rate; *buffer = data.data_out; } else { mlt_log_error( MLT_FILTER_SERVICE( filter ), "%s %d,%d,%d\n", src_strerror( error ), *frequency, *samples, output_rate ); } mlt_service_unlock( MLT_FILTER_SERVICE(filter) ); } return error; }
static int filter_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { // Get the properties of the a frame mlt_properties properties = MLT_FRAME_PROPERTIES( frame ); int channels_out = mlt_properties_get_int( properties, "mono.channels" ); int i, j, size; // Get the producer's audio mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples ); if ( channels_out == -1 ) channels_out = *channels; size = *samples * channels_out; switch ( *format ) { case mlt_audio_s16: { size *= sizeof( int16_t ); int16_t *new_buffer = mlt_pool_alloc( size ); for ( i = 0; i < *samples; i++ ) { int16_t mixdown = 0; for ( j = 0; j < *channels; j++ ) mixdown += ((int16_t*) *buffer)[ ( i * *channels ) + j ] / *channels; for ( j = 0; j < channels_out; j++ ) new_buffer[ ( i * channels_out ) + j ] = mixdown; } *buffer = new_buffer; break; } case mlt_audio_s32: { size *= sizeof( int32_t ); int32_t *new_buffer = mlt_pool_alloc( size ); for ( i = 0; i < *samples; i++ ) { int32_t mixdown = 0; for ( j = 0; j < *channels; j++ ) mixdown += ((int32_t*) *buffer)[ ( j * *channels ) + i ] / *channels; for ( j = 0; j < channels_out; j++ ) new_buffer[ ( j * *samples ) + i ] = mixdown; } *buffer = new_buffer; break; } case mlt_audio_float: { size *= sizeof( float ); float *new_buffer = mlt_pool_alloc( size ); for ( i = 0; i < *samples; i++ ) { float mixdown = 0; for ( j = 0; j < *channels; j++ ) mixdown += ((float*) *buffer)[ ( j * *channels ) + i ] / *channels; for ( j = 0; j < channels_out; j++ ) new_buffer[ ( j * *samples ) + i ] = mixdown; } *buffer = new_buffer; break; } default: mlt_log_error( NULL, "[filter mono] Invalid audio format\n" ); break; } if ( size > *samples * channels_out ) { mlt_frame_set_audio( frame, *buffer, *format, size, mlt_pool_release ); *channels = channels_out; } return 0; }
static int filter_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { // Used to return number of channels in the source int channels_avail = *channels; // Get the producer's audio int error = mlt_frame_get_audio( frame, buffer, format, frequency, &channels_avail, samples ); if ( error ) return error; if ( channels_avail < *channels ) { int size = mlt_audio_format_size( *format, *samples, *channels ); int16_t *new_buffer = mlt_pool_alloc( size ); // Duplicate the existing channels if ( *format == mlt_audio_s16 ) { int i, j, k = 0; for ( i = 0; i < *samples; i++ ) { for ( j = 0; j < *channels; j++ ) { new_buffer[ ( i * *channels ) + j ] = ((int16_t*)(*buffer))[ ( i * channels_avail ) + k ]; k = ( k + 1 ) % channels_avail; } } } else if ( *format == mlt_audio_s32le || *format == mlt_audio_f32le ) { int32_t *p = (int32_t*) new_buffer; int i, j, k = 0; for ( i = 0; i < *samples; i++ ) { for ( j = 0; j < *channels; j++ ) { p[ ( i * *channels ) + j ] = ((int32_t*)(*buffer))[ ( i * channels_avail ) + k ]; k = ( k + 1 ) % channels_avail; } } } else { // non-interleaved - s32 or float int size_avail = mlt_audio_format_size( *format, *samples, channels_avail ); int32_t *p = (int32_t*) new_buffer; int i = *channels / channels_avail; while ( i-- ) { memcpy( p, *buffer, size_avail ); p += size_avail / sizeof(*p); } i = *channels % channels_avail; if ( i ) { size_avail = mlt_audio_format_size( *format, *samples, i ); memcpy( p, *buffer, size_avail ); } } // Update the audio buffer now - destroys the old mlt_frame_set_audio( frame, new_buffer, *format, size, mlt_pool_release ); *buffer = new_buffer; } else if ( channels_avail > *channels ) { int size = mlt_audio_format_size( *format, *samples, *channels ); int16_t *new_buffer = mlt_pool_alloc( size ); // Drop all but the first *channels if ( *format == mlt_audio_s16 ) { int i, j; for ( i = 0; i < *samples; i++ ) for ( j = 0; j < *channels; j++ ) new_buffer[ ( i * *channels ) + j ] = ((int16_t*)(*buffer))[ ( i * channels_avail ) + j ]; } else { // non-interleaved memcpy( new_buffer, *buffer, size ); } // Update the audio buffer now - destroys the old mlt_frame_set_audio( frame, new_buffer, *format, size, mlt_pool_release ); *buffer = new_buffer; } return error; }
static int ladspa_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { int error = 0; // Get the filter service mlt_filter filter = mlt_frame_pop_audio( frame ); // Get the filter properties mlt_properties filter_properties = MLT_FILTER_PROPERTIES( filter ); // Initialise LADSPA if needed jack_rack_t *jackrack = mlt_properties_get_data( filter_properties, "jackrack", NULL ); if ( jackrack == NULL ) { sample_rate = *frequency; // global inside jack_rack jackrack = initialise_jack_rack( filter_properties, *channels ); } if ( jackrack && jackrack->procinfo && jackrack->procinfo->chain && mlt_properties_get_int64( filter_properties, "_pluginid" ) ) { plugin_t *plugin = jackrack->procinfo->chain; LADSPA_Data value; int i, c; mlt_position position = mlt_filter_get_position( filter, frame ); mlt_position length = mlt_filter_get_length2( filter, frame ); // Get the producer's audio *channels = jackrack->channels; *format = mlt_audio_float; mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples ); for ( i = 0; i < plugin->desc->control_port_count; i++ ) { // Apply the control port values char key[20]; value = plugin_desc_get_default_control_value( plugin->desc, i, sample_rate ); snprintf( key, sizeof(key), "%d", i ); if ( mlt_properties_get( filter_properties, key ) ) value = mlt_properties_anim_get_double( filter_properties, key, position, length ); for ( c = 0; c < plugin->copies; c++ ) plugin->holders[c].control_memory[i] = value; } plugin->wet_dry_enabled = mlt_properties_get( filter_properties, "wetness" ) != NULL; if ( plugin->wet_dry_enabled ) { value = mlt_properties_anim_get_double( filter_properties, "wetness", position, length ); for ( c = 0; c < *channels; c++ ) plugin->wet_dry_values[c] = value; } // Configure the buffers LADSPA_Data **input_buffers = mlt_pool_alloc( sizeof( LADSPA_Data* ) * *channels ); LADSPA_Data **output_buffers = mlt_pool_alloc( sizeof( LADSPA_Data* ) * *channels ); for ( i = 0; i < *channels; i++ ) { input_buffers[i] = (LADSPA_Data*) *buffer + i * *samples; output_buffers[i] = (LADSPA_Data*) *buffer + i * *samples; } // Do LADSPA processing error = process_ladspa( jackrack->procinfo, *samples, input_buffers, output_buffers ); mlt_pool_release( input_buffers ); mlt_pool_release( output_buffers ); // read the status port values for ( i = 0; i < plugin->desc->status_port_count; i++ ) { char key[20]; int p = plugin->desc->status_port_indicies[i]; for ( c = 0; c < plugin->copies; c++ ) { snprintf( key, sizeof(key), "%d[%d]", p, c ); value = plugin->holders[c].status_memory[i]; mlt_properties_set_double( filter_properties, key, value ); } } } else { // Nothing to do. error = mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples ); } return error; }
HRESULT render( mlt_frame frame ) { HRESULT result = S_OK; // Get the audio double speed = mlt_properties_get_double( MLT_FRAME_PROPERTIES(frame), "_speed" ); if ( speed == 1.0 ) { mlt_audio_format format = mlt_audio_s16; int frequency = bmdAudioSampleRate48kHz; int samples = mlt_sample_calculator( m_fps, frequency, m_count ); int16_t *pcm = 0; if ( !mlt_frame_get_audio( frame, (void**) &pcm, &format, &frequency, &m_channels, &samples ) ) { int count = samples; if ( !m_isPrerolling ) { uint32_t audioCount = 0; uint32_t videoCount = 0; // Check for resync m_deckLinkOutput->GetBufferedAudioSampleFrameCount( &audioCount ); m_deckLinkOutput->GetBufferedVideoFrameCount( &videoCount ); // Underflow typically occurs during non-normal speed playback. if ( audioCount < 1 || videoCount < 1 ) { // Upon switching to normal playback, buffer some frames faster than realtime. mlt_log_info( &m_consumer, "buffer underrun: audio buf %u video buf %u frames\n", audioCount, videoCount ); m_prerollCounter = 0; } // While rebuffering if ( isBuffering() ) { // Only append audio to reach the ideal level and not overbuffer. int ideal = ( m_preroll - 1 ) * bmdAudioSampleRate48kHz / m_fps; int actual = m_fifo->used / m_channels + audioCount; int diff = ideal / 2 - actual; count = diff < 0 ? 0 : diff < count ? diff : count; } } if ( count > 0 ) sample_fifo_append( m_fifo, pcm, count * m_channels ); } } // Create video frames while pre-rolling if ( m_isPrerolling ) { createFrame(); if ( !m_videoFrame ) { mlt_log_error( &m_consumer, "failed to create video frame\n" ); return S_FALSE; } } // Get the video if ( mlt_properties_get_int( MLT_FRAME_PROPERTIES( frame ), "rendered") ) { mlt_image_format format = mlt_image_yuv422; uint8_t* image = 0; uint8_t* buffer = 0; if ( !mlt_frame_get_image( frame, &image, &format, &m_width, &m_height, 0 ) ) { m_videoFrame = (IDeckLinkMutableVideoFrame*) mlt_deque_pop_back( m_videoFrameQ ); m_videoFrame->GetBytes( (void**) &buffer ); if ( m_displayMode->GetFieldDominance() == bmdUpperFieldFirst ) // convert lower field first to top field first swab( image, buffer + m_width * 2, m_width * ( m_height - 1 ) * 2 ); else swab( image, buffer, m_width * m_height * 2 ); m_deckLinkOutput->ScheduleVideoFrame( m_videoFrame, m_count * m_duration, m_duration, m_timescale ); mlt_deque_push_front( m_videoFrameQ, m_videoFrame ); } } else { mlt_log_verbose( &m_consumer, "dropped video frame\n" ); } ++m_count; // Check for end of pre-roll if ( ++m_prerollCounter > m_preroll && m_isPrerolling ) { // Start audio and video output m_deckLinkOutput->EndAudioPreroll(); m_deckLinkOutput->StartScheduledPlayback( 0, m_timescale, 1.0 ); m_isPrerolling = false; } return result; }
// Replacement for broken mlt_frame_audio_mix - this filter uses an inline low pass filter // to allow mixing without volume hacking static int combine_audio( mlt_frame frame, mlt_frame that, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { int ret = 0; int16_t *src, *dest; int frequency_src = *frequency, frequency_dest = *frequency; int channels_src = *channels, channels_dest = *channels; int samples_src = *samples, samples_dest = *samples; int i, j; double vp[ 6 ]; double b_weight = 1.0; if ( mlt_properties_get_int( MLT_FRAME_PROPERTIES( frame ), "meta.mixdown" ) ) b_weight = 1.0 - mlt_properties_get_double( MLT_FRAME_PROPERTIES( frame ), "meta.volume" ); mlt_frame_get_audio( that, (void**) &src, format, &frequency_src, &channels_src, &samples_src ); mlt_frame_get_audio( frame, (void**) &dest, format, &frequency_dest, &channels_dest, &samples_dest ); int silent = mlt_properties_get_int( MLT_FRAME_PROPERTIES( frame ), "silent_audio" ); mlt_properties_set_int( MLT_FRAME_PROPERTIES( frame ), "silent_audio", 0 ); if ( silent ) memset( dest, 0, samples_dest * channels_dest * sizeof( int16_t ) ); silent = mlt_properties_get_int( MLT_FRAME_PROPERTIES( that ), "silent_audio" ); mlt_properties_set_int( MLT_FRAME_PROPERTIES( that ), "silent_audio", 0 ); if ( silent ) memset( src, 0, samples_src * channels_src * sizeof( int16_t ) ); if ( src == dest ) { *samples = samples_src; *channels = channels_src; *buffer = src; *frequency = frequency_src; return ret; } // determine number of samples to process *samples = samples_src < samples_dest ? samples_src : samples_dest; *channels = channels_src < channels_dest ? channels_src : channels_dest; *buffer = dest; *frequency = frequency_dest; for ( j = 0; j < *channels; j++ ) vp[ j ] = ( double )dest[ j ]; double Fc = 0.5; double B = exp(-2.0 * M_PI * Fc); double A = 1.0 - B; double v; for ( i = 0; i < *samples; i++ ) { for ( j = 0; j < *channels; j++ ) { v = ( double )( b_weight * dest[ i * channels_dest + j ] + src[ i * channels_src + j ] ); v = v < -32767 ? -32767 : v > 32768 ? 32768 : v; vp[ j ] = dest[ i * channels_dest + j ] = ( int16_t )( v * A + vp[ j ] * B ); } } return ret; }
static int consumer_play_audio( consumer_sdl self, mlt_frame frame, int init_audio, int *duration ) { // Get the properties of self consumer mlt_properties properties = self->properties; mlt_audio_format afmt = mlt_audio_s16; // Set the preferred params of the test card signal int channels = mlt_properties_get_int( properties, "channels" ); int dest_channels = channels; int frequency = mlt_properties_get_int( properties, "frequency" ); static int counter = 0; int samples = mlt_sample_calculator( mlt_properties_get_double( self->properties, "fps" ), frequency, counter++ ); int16_t *pcm; int bytes; mlt_frame_get_audio( frame, (void**) &pcm, &afmt, &frequency, &channels, &samples ); *duration = ( ( samples * 1000 ) / frequency ); pcm += mlt_properties_get_int( properties, "audio_offset" ); if ( mlt_properties_get_int( properties, "audio_off" ) ) { self->playing = 1; init_audio = 1; return init_audio; } if ( init_audio == 1 ) { SDL_AudioSpec request; SDL_AudioSpec got; int audio_buffer = mlt_properties_get_int( properties, "audio_buffer" ); // specify audio format memset( &request, 0, sizeof( SDL_AudioSpec ) ); self->playing = 0; request.freq = frequency; request.format = AUDIO_S16SYS; request.channels = dest_channels; request.samples = audio_buffer; request.callback = sdl_fill_audio; request.userdata = (void *)self; if ( SDL_OpenAudio( &request, &got ) != 0 ) { mlt_log_error( MLT_CONSUMER_SERVICE( self ), "SDL failed to open audio: %s\n", SDL_GetError() ); init_audio = 2; } else if ( got.size != 0 ) { SDL_PauseAudio( 0 ); init_audio = 0; } } if ( init_audio == 0 ) { mlt_properties properties = MLT_FRAME_PROPERTIES( frame ); bytes = samples * dest_channels * sizeof(*pcm); pthread_mutex_lock( &self->audio_mutex ); while ( self->running && bytes > ( sizeof( self->audio_buffer) - self->audio_avail ) ) pthread_cond_wait( &self->audio_cond, &self->audio_mutex ); if ( self->running ) { if ( mlt_properties_get_double( properties, "_speed" ) == 1 ) { if ( channels == dest_channels ) { memcpy( &self->audio_buffer[ self->audio_avail ], pcm, bytes ); } else { int16_t *dest = (int16_t*) &self->audio_buffer[ self->audio_avail ]; int i = samples + 1; while ( --i ) { memcpy( dest, pcm, dest_channels * sizeof(*pcm) ); pcm += channels; dest += dest_channels; } } } else { memset( &self->audio_buffer[ self->audio_avail ], 0, bytes ); } self->audio_avail += bytes; } pthread_cond_broadcast( &self->audio_cond ); pthread_mutex_unlock( &self->audio_mutex ); } else { self->playing = 1; } return init_audio; }
unsigned char *mlt_frame_get_waveform( mlt_frame self, int w, int h ) { int16_t *pcm = NULL; mlt_properties properties = MLT_FRAME_PROPERTIES( self ); mlt_audio_format format = mlt_audio_s16; int frequency = 16000; int channels = 2; mlt_producer producer = mlt_frame_get_original_producer( self ); double fps = mlt_producer_get_fps( mlt_producer_cut_parent( producer ) ); int samples = mlt_sample_calculator( fps, frequency, mlt_frame_get_position( self ) ); // Increase audio resolution proportional to requested image size while ( samples < w ) { frequency += 16000; samples = mlt_sample_calculator( fps, frequency, mlt_frame_get_position( self ) ); } // Get the pcm data mlt_frame_get_audio( self, (void**)&pcm, &format, &frequency, &channels, &samples ); // Make an 8-bit buffer large enough to hold rendering int size = w * h; if ( size <= 0 ) return NULL; unsigned char *bitmap = ( unsigned char* )mlt_pool_alloc( size ); if ( bitmap != NULL ) memset( bitmap, 0, size ); else return NULL; mlt_properties_set_data( properties, "waveform", bitmap, size, ( mlt_destructor )mlt_pool_release, NULL ); // Render vertical lines int16_t *ubound = pcm + samples * channels; int skip = samples / w; skip = !skip ? 1 : skip; unsigned char gray = 0xFF / skip; int i, j, k; // Iterate sample stream and along x coordinate for ( i = 0; pcm < ubound; i++ ) { // pcm data has channels interleaved for ( j = 0; j < channels; j++, pcm++ ) { // Determine sample's magnitude from 2s complement; int pcm_magnitude = *pcm < 0 ? ~(*pcm) + 1 : *pcm; // The height of a line is the ratio of the magnitude multiplied by // the vertical resolution of a single channel int height = h * pcm_magnitude / channels / 2 / 32768; // Determine the starting y coordinate - left top, right bottom int displacement = h * (j * 2 + 1) / channels / 2 - ( *pcm < 0 ? 0 : height ); // Position buffer pointer using y coordinate, stride, and x coordinate unsigned char *p = bitmap + i / skip + displacement * w; // Draw vertical line for ( k = 0; k < height + 1; k++ ) if ( *pcm < 0 ) p[ w * k ] = ( k == 0 ) ? 0xFF : p[ w * k ] + gray; else p[ w * k ] = ( k == height ) ? 0xFF : p[ w * k ] + gray; } } return bitmap; }