static int filter_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { // Get the filter from the frame mlt_filter this = mlt_frame_pop_audio( frame ); // Get the properties from the filter mlt_properties filter_props = MLT_FILTER_PROPERTIES( this ); // Get the properties of the a frame mlt_properties properties = MLT_FRAME_PROPERTIES( frame ); // Get the frame's filter instance properties char *name = mlt_properties_get( filter_props, "_unique_id" ); mlt_properties instance_props = mlt_properties_get_data( properties, name, NULL ); // Get the parameters double gain = mlt_properties_get_double( instance_props, "gain" ); double max_gain = mlt_properties_get_double( instance_props, "max_gain" ); double limiter_level = 0.5; /* -6 dBFS */ int normalise = mlt_properties_get_int( instance_props, "normalise" ); double amplitude = mlt_properties_get_double( instance_props, "amplitude" ); int i, j; double sample; int16_t peak; if ( mlt_properties_get( instance_props, "limiter" ) != NULL ) limiter_level = mlt_properties_get_double( instance_props, "limiter" ); // Get the producer's audio *format = mlt_audio_s16; mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples ); // fprintf( stderr, "filter_volume: frequency %d\n", *frequency ); // Determine numeric limits int bytes_per_samp = (samp_width - 1) / 8 + 1; int samplemax = (1 << (bytes_per_samp * 8 - 1)) - 1; int samplemin = -samplemax - 1; if ( normalise ) { int window = mlt_properties_get_int( filter_props, "window" ); double *smooth_buffer = mlt_properties_get_data( filter_props, "smooth_buffer", NULL ); if ( window > 0 && smooth_buffer != NULL ) { int smooth_index = mlt_properties_get_int( filter_props, "_smooth_index" ); // Compute the signal power and put into smoothing buffer smooth_buffer[ smooth_index ] = signal_max_power( *buffer, *channels, *samples, &peak ); // fprintf( stderr, "filter_volume: raw power %f ", smooth_buffer[ smooth_index ] ); if ( smooth_buffer[ smooth_index ] > EPSILON ) { mlt_properties_set_int( filter_props, "_smooth_index", ( smooth_index + 1 ) % window ); // Smooth the data and compute the gain // fprintf( stderr, "smoothed %f over %d frames\n", get_smoothed_data( smooth_buffer, window ), window ); gain *= amplitude / get_smoothed_data( smooth_buffer, window ); } } else { gain *= amplitude / signal_max_power( (int16_t*) *buffer, *channels, *samples, &peak ); } } // if ( gain > 1.0 && normalise ) // fprintf(stderr, "filter_volume: limiter level %f gain %f\n", limiter_level, gain ); if ( max_gain > 0 && gain > max_gain ) gain = max_gain; // Initialise filter's previous gain value to prevent an inadvertant jump from 0 mlt_position last_position = mlt_properties_get_position( filter_props, "_last_position" ); mlt_position current_position = mlt_frame_get_position( frame ); if ( mlt_properties_get( filter_props, "_previous_gain" ) == NULL || current_position != last_position + 1 ) mlt_properties_set_double( filter_props, "_previous_gain", gain ); // Start the gain out at the previous double previous_gain = mlt_properties_get_double( filter_props, "_previous_gain" ); // Determine ramp increment double gain_step = ( gain - previous_gain ) / *samples; // fprintf( stderr, "filter_volume: previous gain %f current gain %f step %f\n", previous_gain, gain, gain_step ); // Save the current gain for the next iteration mlt_properties_set_double( filter_props, "_previous_gain", gain ); mlt_properties_set_position( filter_props, "_last_position", current_position ); // Ramp from the previous gain to the current gain = previous_gain; int16_t *p = (int16_t*) *buffer; // Apply the gain for ( i = 0; i < *samples; i++ ) { for ( j = 0; j < *channels; j++ ) { sample = *p * gain; *p = ROUND( sample ); if ( gain > 1.0 ) { /* use limiter function instead of clipping */ if ( normalise ) *p = ROUND( samplemax * limiter( sample / (double) samplemax, limiter_level ) ); /* perform clipping */ else if ( sample > samplemax ) *p = samplemax; else if ( sample < samplemin ) *p = samplemin; } p++; } gain += gain_step; } return 0; }
double mlt_producer_get_speed( mlt_producer self ) { return mlt_properties_get_double( MLT_PRODUCER_PROPERTIES( self ), "_speed" ); }
static inline void get_affine( affine_t *affine, mlt_transition transition, float position ) { mlt_properties properties = MLT_TRANSITION_PROPERTIES( transition ); int keyed = mlt_properties_get_int( properties, "keyed" ); if ( keyed == 0 ) { float fix_rotate_x = mlt_properties_get_double( properties, "fix_rotate_x" ); float fix_rotate_y = mlt_properties_get_double( properties, "fix_rotate_y" ); float fix_rotate_z = mlt_properties_get_double( properties, "fix_rotate_z" ); float rotate_x = mlt_properties_get_double( properties, "rotate_x" ); float rotate_y = mlt_properties_get_double( properties, "rotate_y" ); float rotate_z = mlt_properties_get_double( properties, "rotate_z" ); float fix_shear_x = mlt_properties_get_double( properties, "fix_shear_x" ); float fix_shear_y = mlt_properties_get_double( properties, "fix_shear_y" ); float fix_shear_z = mlt_properties_get_double( properties, "fix_shear_z" ); float shear_x = mlt_properties_get_double( properties, "shear_x" ); float shear_y = mlt_properties_get_double( properties, "shear_y" ); float shear_z = mlt_properties_get_double( properties, "shear_z" ); float ox = mlt_properties_get_double( properties, "ox" ); float oy = mlt_properties_get_double( properties, "oy" ); affine_rotate_x( affine->matrix, fix_rotate_x + rotate_x * position ); affine_rotate_y( affine->matrix, fix_rotate_y + rotate_y * position ); affine_rotate_z( affine->matrix, fix_rotate_z + rotate_z * position ); affine_shear( affine->matrix, fix_shear_x + shear_x * position, fix_shear_y + shear_y * position, fix_shear_z + shear_z * position ); affine_offset( affine->matrix, ox, oy ); } else { float rotate_x = composite_calculate_key( transition, "rotate_x", "rotate_x_info", 360, position ); float rotate_y = composite_calculate_key( transition, "rotate_y", "rotate_y_info", 360, position ); float rotate_z = composite_calculate_key( transition, "rotate_z", "rotate_z_info", 360, position ); float shear_x = composite_calculate_key( transition, "shear_x", "shear_x_info", 360, position ); float shear_y = composite_calculate_key( transition, "shear_y", "shear_y_info", 360, position ); float shear_z = composite_calculate_key( transition, "shear_z", "shear_z_info", 360, position ); float o_x = composite_calculate_key( transition, "ox", "ox_info", 0, position ); float o_y = composite_calculate_key( transition, "oy", "oy_info", 0, position ); affine_rotate_x( affine->matrix, rotate_x ); affine_rotate_y( affine->matrix, rotate_y ); affine_rotate_z( affine->matrix, rotate_z ); affine_shear( affine->matrix, shear_x, shear_y, shear_z ); affine_offset( affine->matrix, o_x, o_y ); } }
static int filter_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { mlt_properties properties = mlt_frame_pop_audio( frame ); mlt_filter filter = mlt_frame_pop_audio( frame ); mlt_properties filter_props = MLT_FILTER_PROPERTIES( filter ); mlt_properties frame_props = MLT_FRAME_PROPERTIES( frame ); // We can only mix s16 *format = mlt_audio_s16; mlt_frame_get_audio( frame, (void**) buffer, format, frequency, channels, samples ); // Apply silence int silent = mlt_properties_get_int( frame_props, "silent_audio" ); mlt_properties_set_int( frame_props, "silent_audio", 0 ); if ( silent ) memset( *buffer, 0, *samples * *channels * sizeof( int16_t ) ); int src_size = 0; int16_t *src = mlt_properties_get_data( filter_props, "scratch_buffer", &src_size ); int16_t *dest = *buffer; double v; // sample accumulator int i, out, in; double factors[6][6]; // mixing weights [in][out] double mix_start = 0.5, mix_end = 0.5; if ( mlt_properties_get( properties, "previous_mix" ) != NULL ) mix_start = mlt_properties_get_double( properties, "previous_mix" ); if ( mlt_properties_get( properties, "mix" ) != NULL ) mix_end = mlt_properties_get_double( properties, "mix" ); double weight = mix_start; double weight_step = ( mix_end - mix_start ) / *samples; int active_channel = mlt_properties_get_int( properties, "channel" ); int gang = mlt_properties_get_int( properties, "gang" ) ? 2 : 1; // Use an inline low-pass filter to help avoid clipping double Fc = 0.5; double B = exp(-2.0 * M_PI * Fc); double A = 1.0 - B; double vp[6]; // Setup or resize a scratch buffer if ( !src || src_size < *samples * *channels * sizeof(int16_t) ) { // We allocate 4 more samples than we need to deal with jitter in the sample count per frame. src_size = ( *samples + 4 ) * *channels * sizeof(int16_t); src = mlt_pool_alloc( src_size ); if ( !src ) return 0; mlt_properties_set_data( filter_props, "scratch_buffer", src, src_size, mlt_pool_release, NULL ); } // We must use a pristine copy as the source memcpy( src, *buffer, *samples * *channels * sizeof(int16_t) ); // Initialize the mix factors for ( i = 0; i < 6; i++ ) for ( out = 0; out < 6; out++ ) factors[i][out] = 0.0; for ( out = 0; out < *channels; out++ ) vp[out] = (double) dest[out]; for ( i = 0; i < *samples; i++ ) { // Recompute the mix factors switch ( active_channel ) { case -1: // Front L/R balance case -2: // Rear L/R balance { // Gang front/rear balance if requested int g, active = active_channel; for ( g = 0; g < gang; g++, active-- ) { int left = active == -1 ? 0 : 2; int right = left + 1; if ( weight < 0.0 ) { factors[left][left] = 1.0; factors[right][right] = weight + 1.0 < 0.0 ? 0.0 : weight + 1.0; } else { factors[left][left] = 1.0 - weight < 0.0 ? 0.0 : 1.0 - weight; factors[right][right] = 1.0; } } break; } case -3: // Left fade case -4: // right fade { // Gang left/right fade if requested int g, active = active_channel; for ( g = 0; g < gang; g++, active-- ) { int front = active == -3 ? 0 : 1; int rear = front + 2; if ( weight < 0.0 ) { factors[front][front] = 1.0; factors[rear][rear] = weight + 1.0 < 0.0 ? 0.0 : weight + 1.0; } else { factors[front][front] = 1.0 - weight < 0.0 ? 0.0 : 1.0 - weight; factors[rear][rear] = 1.0; } } break; } case 0: // left case 2: { int left = active_channel; int right = left + 1; factors[right][right] = 1.0; if ( weight < 0.0 ) // output left toward left { factors[left][left] = 0.5 - weight * 0.5; factors[left][right] = ( 1.0 + weight ) * 0.5; } else // output left toward right { factors[left][left] = ( 1.0 - weight ) * 0.5; factors[left][right] = 0.5 + weight * 0.5; } break; } case 1: // right case 3: { int right = active_channel; int left = right - 1; factors[left][left] = 1.0; if ( weight < 0.0 ) // output right toward left { factors[right][left] = 0.5 - weight * 0.5; factors[right][right] = ( 1.0 + weight ) * 0.5; } else // output right toward right { factors[right][left] = ( 1.0 - weight ) * 0.5; factors[right][right] = 0.5 + weight * 0.5; } break; } } // Do the mixing for ( out = 0; out < *channels && out < 6; out++ ) { v = 0; for ( in = 0; in < *channels && in < 6; in++ ) v += factors[in][out] * src[ i * *channels + in ]; v = v < -32767 ? -32767 : v > 32768 ? 32768 : v; vp[out] = dest[ i * *channels + out ] = (int16_t) ( v * A + vp[ out ] * B ); } weight += weight_step; } return 0; }
static int consumer_play_audio( consumer_sdl self, mlt_frame frame, int init_audio, int *duration ) { // Get the properties of this consumer mlt_properties properties = self->properties; mlt_audio_format afmt = mlt_audio_s16; // Set the preferred params of the test card signal int channels = mlt_properties_get_int( properties, "channels" ); int frequency = mlt_properties_get_int( properties, "frequency" ); int scrub = mlt_properties_get_int( properties, "scrub_audio" ); static int counter = 0; int samples = mlt_sample_calculator( mlt_properties_get_double( self->properties, "fps" ), frequency, counter++ ); int16_t *pcm; int bytes; mlt_frame_get_audio( frame, (void**) &pcm, &afmt, &frequency, &channels, &samples ); *duration = ( ( samples * 1000 ) / frequency ); if ( mlt_properties_get_int( properties, "audio_off" ) ) { self->playing = 1; init_audio = 1; return init_audio; } if ( init_audio == 1 ) { SDL_AudioSpec request; SDL_AudioSpec got; int audio_buffer = mlt_properties_get_int( properties, "audio_buffer" ); // specify audio format memset( &request, 0, sizeof( SDL_AudioSpec ) ); self->playing = 0; request.freq = frequency; request.format = AUDIO_S16SYS; request.channels = channels; request.samples = audio_buffer; request.callback = sdl_fill_audio; request.userdata = (void *)self; if ( SDL_OpenAudio( &request, &got ) != 0 ) { mlt_log_error( MLT_CONSUMER_SERVICE( self ), "SDL failed to open audio: %s\n", SDL_GetError() ); init_audio = 2; } else if ( got.size != 0 ) { SDL_PauseAudio( 0 ); init_audio = 0; } } if ( init_audio == 0 ) { mlt_properties properties = MLT_FRAME_PROPERTIES( frame ); bytes = ( samples * channels * 2 ); pthread_mutex_lock( &self->audio_mutex ); while ( self->running && bytes > ( sizeof( self->audio_buffer) - self->audio_avail ) ) pthread_cond_wait( &self->audio_cond, &self->audio_mutex ); if ( self->running ) { if ( scrub || mlt_properties_get_double( properties, "_speed" ) == 1 ) memcpy( &self->audio_buffer[ self->audio_avail ], pcm, bytes ); else memset( &self->audio_buffer[ self->audio_avail ], 0, bytes ); self->audio_avail += bytes; } pthread_cond_broadcast( &self->audio_cond ); pthread_mutex_unlock( &self->audio_mutex ); } else { self->playing = 1; } return init_audio; }
static void *consumer_thread( void *arg ) { // Identify the arg consumer_sdl self = arg; // Get the consumer mlt_consumer consumer = &self->parent; // Get the properties mlt_properties consumer_props = MLT_CONSUMER_PROPERTIES( consumer ); // Video thread pthread_t thread; // internal intialization int init_audio = 1; int init_video = 1; mlt_frame frame = NULL; mlt_properties properties = NULL; int duration = 0; int64_t playtime = 0; struct timespec tm = { 0, 100000 }; // int last_position = -1; pthread_mutex_lock( &self->refresh_mutex ); self->refresh_count = 0; pthread_mutex_unlock( &self->refresh_mutex ); // Loop until told not to while( self->running ) { // Get a frame from the attached producer frame = mlt_consumer_rt_frame( consumer ); // Ensure that we have a frame if ( frame ) { // Get the frame properties properties = MLT_FRAME_PROPERTIES( frame ); // Get the speed of the frame double speed = mlt_properties_get_double( properties, "_speed" ); // Clear refresh mlt_events_block( consumer_props, consumer_props ); mlt_properties_set_int( consumer_props, "refresh", 0 ); mlt_events_unblock( consumer_props, consumer_props ); // Play audio init_audio = consumer_play_audio( self, frame, init_audio, &duration ); // Determine the start time now if ( self->playing && init_video ) { // Create the video thread pthread_create( &thread, NULL, video_thread, self ); // Video doesn't need to be initialised any more init_video = 0; } // Set playtime for this frame mlt_properties_set_int( properties, "playtime", playtime ); while ( self->running && speed != 0 && mlt_deque_count( self->queue ) > 15 ) nanosleep( &tm, NULL ); // Push this frame to the back of the queue if ( self->running && speed ) { pthread_mutex_lock( &self->video_mutex ); if ( self->is_purge && speed == 1.0 ) { mlt_frame_close( frame ); frame = NULL; self->is_purge = 0; } else { mlt_deque_push_back( self->queue, frame ); pthread_cond_broadcast( &self->video_cond ); } pthread_mutex_unlock( &self->video_mutex ); // Calculate the next playtime playtime += ( duration * 1000 ); } else if ( self->running ) { pthread_mutex_lock( &self->refresh_mutex ); consumer_play_video( self, frame ); mlt_frame_close( frame ); frame = NULL; self->refresh_count --; if ( self->refresh_count <= 0 ) { pthread_cond_wait( &self->refresh_cond, &self->refresh_mutex ); } pthread_mutex_unlock( &self->refresh_mutex ); } // Optimisation to reduce latency if ( speed == 1.0 ) { // TODO: disabled due to misbehavior on parallel-consumer // if ( last_position != -1 && last_position + 1 != mlt_frame_get_position( frame ) ) // mlt_consumer_purge( consumer ); // last_position = mlt_frame_get_position( frame ); } else { mlt_consumer_purge( consumer ); // last_position = -1; } } } // Kill the video thread if ( init_video == 0 ) { pthread_mutex_lock( &self->video_mutex ); pthread_cond_broadcast( &self->video_cond ); pthread_mutex_unlock( &self->video_mutex ); pthread_join( thread, NULL ); } if ( frame ) { // The video thread has cleared out the queue. But the audio was played // for this frame. So play the video before stopping so the display has // the option to catch up with the audio. consumer_play_video( self, frame ); mlt_frame_close( frame ); frame = NULL; } self->audio_avail = 0; return NULL; }
HRESULT render( mlt_frame frame ) { HRESULT result = S_OK; // Get the audio double speed = mlt_properties_get_double( MLT_FRAME_PROPERTIES(frame), "_speed" ); if ( speed == 1.0 ) { mlt_audio_format format = mlt_audio_s16; int frequency = bmdAudioSampleRate48kHz; int samples = mlt_sample_calculator( m_fps, frequency, m_count ); int16_t *pcm = 0; if ( !mlt_frame_get_audio( frame, (void**) &pcm, &format, &frequency, &m_channels, &samples ) ) { int count = samples; if ( !m_isPrerolling ) { uint32_t audioCount = 0; uint32_t videoCount = 0; // Check for resync m_deckLinkOutput->GetBufferedAudioSampleFrameCount( &audioCount ); m_deckLinkOutput->GetBufferedVideoFrameCount( &videoCount ); // Underflow typically occurs during non-normal speed playback. if ( audioCount < 1 || videoCount < 1 ) { // Upon switching to normal playback, buffer some frames faster than realtime. mlt_log_info( &m_consumer, "buffer underrun: audio buf %u video buf %u frames\n", audioCount, videoCount ); m_prerollCounter = 0; } // While rebuffering if ( isBuffering() ) { // Only append audio to reach the ideal level and not overbuffer. int ideal = ( m_preroll - 1 ) * bmdAudioSampleRate48kHz / m_fps; int actual = m_fifo->used / m_channels + audioCount; int diff = ideal / 2 - actual; count = diff < 0 ? 0 : diff < count ? diff : count; } } if ( count > 0 ) sample_fifo_append( m_fifo, pcm, count * m_channels ); } } // Create video frames while pre-rolling if ( m_isPrerolling ) { createFrame(); if ( !m_videoFrame ) { mlt_log_error( &m_consumer, "failed to create video frame\n" ); return S_FALSE; } } // Get the video if ( mlt_properties_get_int( MLT_FRAME_PROPERTIES( frame ), "rendered") ) { mlt_image_format format = mlt_image_yuv422; uint8_t* image = 0; uint8_t* buffer = 0; if ( !mlt_frame_get_image( frame, &image, &format, &m_width, &m_height, 0 ) ) { m_videoFrame = (IDeckLinkMutableVideoFrame*) mlt_deque_pop_back( m_videoFrameQ ); m_videoFrame->GetBytes( (void**) &buffer ); if ( m_displayMode->GetFieldDominance() == bmdUpperFieldFirst ) // convert lower field first to top field first swab( image, buffer + m_width * 2, m_width * ( m_height - 1 ) * 2 ); else swab( image, buffer, m_width * m_height * 2 ); m_deckLinkOutput->ScheduleVideoFrame( m_videoFrame, m_count * m_duration, m_duration, m_timescale ); mlt_deque_push_front( m_videoFrameQ, m_videoFrame ); } } else { mlt_log_verbose( &m_consumer, "dropped video frame\n" ); } ++m_count; // Check for end of pre-roll if ( ++m_prerollCounter > m_preroll && m_isPrerolling ) { // Start audio and video output m_deckLinkOutput->EndAudioPreroll(); m_deckLinkOutput->StartScheduledPlayback( 0, m_timescale, 1.0 ); m_isPrerolling = false; } return result; }
static int filter_get_image( mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable ) { mlt_filter filter = mlt_frame_pop_service( frame ); *format = mlt_image_rgb24; mlt_properties_set_int( MLT_FRAME_PROPERTIES(frame), "consumer_deinterlace", 1 ); int error = mlt_frame_get_image( frame, image, format, width, height, 1 ); if ( !error && *image ) { videostab self = filter->child; mlt_position length = mlt_filter_get_length2( filter, frame ); int h = *height; int w = *width; // Service locks are for concurrency control mlt_service_lock( MLT_FILTER_SERVICE( filter ) ); if ( !self->initialized ) { // Initialize our context self->initialized = 1; self->es = es_init( w, h ); self->pos_i = (vc*) malloc( length * sizeof(vc) ); self->pos_h = (vc*) malloc( length * sizeof(vc) ); self->pos_y = (vc*) malloc( h * sizeof(vc) ); self->rs = rs_init( w, h ); } char *vectors = mlt_properties_get( MLT_FILTER_PROPERTIES(filter), "vectors" ); if ( !vectors ) { // Analyse mlt_position pos = mlt_filter_get_position( filter, frame ); self->pos_i[pos] = vc_add( pos == 0 ? vc_zero() : self->pos_i[pos - 1], es_estimate( self->es, *image ) ); // On last frame if ( pos == length - 1 ) { mlt_profile profile = mlt_service_profile( MLT_FILTER_SERVICE(filter) ); double fps = mlt_profile_fps( profile ); // Filter and store the results hipass( self->pos_i, self->pos_h, length, fps ); serialize_vectors( self, length ); } } else { // Apply if ( self->initialized != 2 ) { // Load analysis results from property self->initialized = 2; deserialize_vectors( self, vectors, length ); } if ( self->initialized == 2 ) { // Stabilize float shutter_angle = mlt_properties_get_double( MLT_FRAME_PROPERTIES(frame) , "shutterangle" ); float pos = mlt_filter_get_position( filter, frame ); int i; for (i = 0; i < h; i ++) self->pos_y[i] = interp( self->lanc_kernels,self->pos_h, length, pos + (i - h / 2.0) * shutter_angle / (h * 360.0) ); rs_resample( self->lanc_kernels,self->rs, *image, self->pos_y ); } } mlt_service_unlock( MLT_FILTER_SERVICE( filter ) ); } return error; }
int mlt_frame_get_audio( mlt_frame self, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { mlt_get_audio get_audio = mlt_frame_pop_audio( self ); mlt_properties properties = MLT_FRAME_PROPERTIES( self ); int hide = mlt_properties_get_int( properties, "test_audio" ); mlt_audio_format requested_format = *format; if ( hide == 0 && get_audio != NULL ) { get_audio( self, buffer, format, frequency, channels, samples ); mlt_properties_set_int( properties, "audio_frequency", *frequency ); mlt_properties_set_int( properties, "audio_channels", *channels ); mlt_properties_set_int( properties, "audio_samples", *samples ); mlt_properties_set_int( properties, "audio_format", *format ); if ( self->convert_audio && *buffer && requested_format != mlt_audio_none ) self->convert_audio( self, buffer, format, requested_format ); } else if ( mlt_properties_get_data( properties, "audio", NULL ) ) { *buffer = mlt_properties_get_data( properties, "audio", NULL ); *format = mlt_properties_get_int( properties, "audio_format" ); *frequency = mlt_properties_get_int( properties, "audio_frequency" ); *channels = mlt_properties_get_int( properties, "audio_channels" ); *samples = mlt_properties_get_int( properties, "audio_samples" ); if ( self->convert_audio && *buffer && requested_format != mlt_audio_none ) self->convert_audio( self, buffer, format, requested_format ); } else { int size = 0; *samples = *samples <= 0 ? 1920 : *samples; *channels = *channels <= 0 ? 2 : *channels; *frequency = *frequency <= 0 ? 48000 : *frequency; mlt_properties_set_int( properties, "audio_frequency", *frequency ); mlt_properties_set_int( properties, "audio_channels", *channels ); mlt_properties_set_int( properties, "audio_samples", *samples ); mlt_properties_set_int( properties, "audio_format", *format ); size = mlt_audio_format_size( *format, *samples, *channels ); if ( size ) *buffer = mlt_pool_alloc( size ); else *buffer = NULL; if ( *buffer ) memset( *buffer, 0, size ); mlt_properties_set_data( properties, "audio", *buffer, size, ( mlt_destructor )mlt_pool_release, NULL ); mlt_properties_set_int( properties, "test_audio", 1 ); } // TODO: This does not belong here if ( *format == mlt_audio_s16 && mlt_properties_get( properties, "meta.volume" ) && *buffer ) { double value = mlt_properties_get_double( properties, "meta.volume" ); if ( value == 0.0 ) { memset( *buffer, 0, *samples * *channels * 2 ); } else if ( value != 1.0 ) { int total = *samples * *channels; int16_t *p = *buffer; while ( total -- ) { *p = *p * value; p ++; } } mlt_properties_set( properties, "meta.volume", NULL ); } return 0; }
static int filter_get_image( mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable ) { mlt_filter filter = (mlt_filter) mlt_frame_pop_service( frame ); mlt_properties properties = MLT_FILTER_PROPERTIES( filter ); mlt_position position = mlt_filter_get_position( filter, frame ); mlt_position length = mlt_filter_get_length2( filter, frame ); double level = 1.0; // Use animated "level" property only if it has been set since init char* level_property = mlt_properties_get( properties, "level" ); if ( level_property != NULL ) { level = mlt_properties_anim_get_double( properties, "level", position, length ); } else { // Get level using old "start,"end" mechanics // Get the starting brightness level level = fabs( mlt_properties_get_double( properties, "start" ) ); // If there is an end adjust gain to the range if ( mlt_properties_get( properties, "end" ) != NULL ) { // Determine the time position of this frame in the transition duration double end = fabs( mlt_properties_get_double( properties, "end" ) ); level += ( end - level ) * mlt_filter_get_progress( filter, frame ); } } // Do not cause an image conversion unless there is real work to do. if ( level != 1.0 ) *format = mlt_image_yuv422; // Get the image int error = mlt_frame_get_image( frame, image, format, width, height, 1 ); // Only process if we have no error. if ( error == 0 ) { // Only process if level is something other than 1 if ( level != 1.0 && *format == mlt_image_yuv422 ) { int i = *width * *height + 1; uint8_t *p = *image; int32_t m = level * ( 1 << 16 ); int32_t n = 128 * ( ( 1 << 16 ) - m ); while ( --i ) { p[0] = CLAMP( (p[0] * m) >> 16, 16, 235 ); p[1] = CLAMP( (p[1] * m + n) >> 16, 16, 240 ); p += 2; } } // Process the alpha channel if requested. if ( mlt_properties_get( properties, "alpha" ) ) { double alpha = mlt_properties_anim_get_double( properties, "alpha", position, length ); alpha = alpha >= 0.0 ? alpha : level; if ( alpha != 1.0 ) { int32_t m = alpha * ( 1 << 16 ); int i = *width * *height + 1; if ( *format == mlt_image_rgb24a ) { uint8_t *p = *image + 3; for ( ; --i; p += 4 ) p[0] = ( p[0] * m ) >> 16; } else {
double mlt_frame_get_aspect_ratio( mlt_frame self ) { return mlt_properties_get_double( MLT_FRAME_PROPERTIES( self ), "aspect_ratio" ); }
static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_format *format, int *width, int *height, int writable ) { // Obtain properties of frame mlt_properties properties = MLT_FRAME_PROPERTIES( frame ); // Obtain the producer for this frame mlt_producer producer = mlt_properties_get_data( properties, "producer_colour", NULL ); mlt_service_lock( MLT_PRODUCER_SERVICE( producer ) ); // Obtain properties of producer mlt_properties producer_props = MLT_PRODUCER_PROPERTIES( producer ); // Get the current and previous colour strings char *now = mlt_properties_get( producer_props, "resource" ); char *then = mlt_properties_get( producer_props, "_resource" ); // Get the current image and dimensions cached in the producer int size = 0; uint8_t *image = mlt_properties_get_data( producer_props, "image", &size ); int current_width = mlt_properties_get_int( producer_props, "_width" ); int current_height = mlt_properties_get_int( producer_props, "_height" ); mlt_image_format current_format = mlt_properties_get_int( producer_props, "_format" ); // Parse the colour if ( now && strchr( now, '/' ) ) { now = strdup( strrchr( now, '/' ) + 1 ); mlt_properties_set( producer_props, "resource", now ); free( now ); now = mlt_properties_get( producer_props, "resource" ); } mlt_color color = mlt_properties_get_color( producer_props, "resource" ); // Choose suitable out values if nothing specific requested if ( *format == mlt_image_none || *format == mlt_image_glsl ) *format = mlt_image_rgb24a; if ( *width <= 0 ) *width = mlt_service_profile( MLT_PRODUCER_SERVICE(producer) )->width; if ( *height <= 0 ) *height = mlt_service_profile( MLT_PRODUCER_SERVICE(producer) )->height; // Choose default image format if specific request is unsuported if (*format!=mlt_image_yuv420p && *format!=mlt_image_yuv422 && *format!=mlt_image_rgb24 && *format!= mlt_image_glsl && *format!= mlt_image_glsl_texture) *format = mlt_image_rgb24a; // See if we need to regenerate if ( !now || ( then && strcmp( now, then ) ) || *width != current_width || *height != current_height || *format != current_format ) { // Color the image int i = *width * *height + 1; int bpp; // Allocate the image size = mlt_image_format_size( *format, *width, *height, &bpp ); uint8_t *p = image = mlt_pool_alloc( size ); // Update the producer mlt_properties_set_data( producer_props, "image", image, size, mlt_pool_release, NULL ); mlt_properties_set_int( producer_props, "_width", *width ); mlt_properties_set_int( producer_props, "_height", *height ); mlt_properties_set_int( producer_props, "_format", *format ); mlt_properties_set( producer_props, "_resource", now ); mlt_service_unlock( MLT_PRODUCER_SERVICE( producer ) ); switch ( *format ) { case mlt_image_yuv420p: { int plane_size = *width * *height; uint8_t y, u, v; RGB2YUV_601_SCALED( color.r, color.g, color.b, y, u, v ); memset(p + 0, y, plane_size); memset(p + plane_size, u, plane_size/4); memset(p + plane_size + plane_size/4, v, plane_size/4); break; } case mlt_image_yuv422: { int uneven = *width % 2; int count = ( *width - uneven ) / 2 + 1; uint8_t y, u, v; RGB2YUV_601_SCALED( color.r, color.g, color.b, y, u, v ); i = *height + 1; while ( --i ) { int j = count; while ( --j ) { *p ++ = y; *p ++ = u; *p ++ = y; *p ++ = v; } if ( uneven ) { *p ++ = y; *p ++ = u; } } break; } case mlt_image_rgb24: while ( --i ) { *p ++ = color.r; *p ++ = color.g; *p ++ = color.b; } break; case mlt_image_glsl: case mlt_image_glsl_texture: memset(p, 0, size); break; case mlt_image_rgb24a: while ( --i ) { *p ++ = color.r; *p ++ = color.g; *p ++ = color.b; *p ++ = color.a; } break; default: mlt_log_error( MLT_PRODUCER_SERVICE( producer ), "invalid image format %s\n", mlt_image_format_name( *format ) ); } } else { mlt_service_unlock( MLT_PRODUCER_SERVICE( producer ) ); } // Create the alpha channel int alpha_size = *width * *height; uint8_t *alpha = mlt_pool_alloc( alpha_size ); // Initialise the alpha if ( alpha ) memset( alpha, color.a, alpha_size ); // Clone our image *buffer = mlt_pool_alloc( size ); memcpy( *buffer, image, size ); // Now update properties so we free the copy after mlt_frame_set_image( frame, *buffer, size, mlt_pool_release ); mlt_frame_set_alpha( frame, alpha, alpha_size, mlt_pool_release ); mlt_properties_set_double( properties, "aspect_ratio", mlt_properties_get_double( producer_props, "aspect_ratio" ) ); mlt_properties_set_int( properties, "meta.media.width", *width ); mlt_properties_set_int( properties, "meta.media.height", *height ); return 0; }
static int get_image( mlt_frame a_frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable ) { int error = 0; // Get the b frame from the stack mlt_frame b_frame = (mlt_frame) mlt_frame_pop_frame( a_frame ); // Get the transition object mlt_transition transition = (mlt_transition) mlt_frame_pop_service( a_frame ); // Get the properties of the transition mlt_properties properties = MLT_TRANSITION_PROPERTIES( transition ); // Get the properties of the a frame mlt_properties a_props = MLT_FRAME_PROPERTIES( a_frame ); // Get the movit objects mlt_service service = MLT_TRANSITION_SERVICE( transition ); mlt_service_lock( service ); EffectChain* chain = GlslManager::get_chain( service ); Effect* effect = (Effect*) mlt_properties_get_data( properties, "movit effect", NULL ); MltInput* a_input = GlslManager::get_input( service ); MltInput* b_input = (MltInput*) mlt_properties_get_data( properties, "movit input B", NULL ); mlt_image_format output_format = *format; if ( !chain || !a_input ) { mlt_service_unlock( service ); return 2; } // Get the transition parameters int reverse = mlt_properties_get_int( properties, "reverse" ); double mix = mlt_properties_get( properties, "mix" ) ? mlt_properties_get_double( properties, "mix" ) : mlt_transition_get_progress( transition, a_frame ); double inverse = 1.0 - mix; // Set the movit parameters bool ok = effect->set_float( "strength_first", reverse ? mix : inverse ); ok |= effect->set_float( "strength_second", reverse ? inverse : mix ); assert( ok ); // Get the frames' textures GLuint* texture_id[2] = {0, 0}; *format = mlt_image_glsl_texture; mlt_frame_get_image( a_frame, (uint8_t**) &texture_id[0], format, width, height, 0 ); a_input->useFBOInput( chain, *texture_id[0] ); *format = mlt_image_glsl_texture; mlt_frame_get_image( b_frame, (uint8_t**) &texture_id[1], format, width, height, 0 ); b_input->useFBOInput( chain, *texture_id[1] ); // Set resolution to that of the a_frame *width = mlt_properties_get_int( a_props, "width" ); *height = mlt_properties_get_int( a_props, "height" ); // Setup rendering to an FBO GlslManager* glsl = GlslManager::get_instance(); glsl_fbo fbo = glsl->get_fbo( *width, *height ); if ( output_format == mlt_image_glsl_texture ) { glsl_texture texture = glsl->get_texture( *width, *height, GL_RGBA ); glBindFramebuffer( GL_FRAMEBUFFER, fbo->fbo ); check_error(); glFramebufferTexture2D( GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture->texture, 0 ); check_error(); glBindFramebuffer( GL_FRAMEBUFFER, 0 ); check_error(); GlslManager::render( service, chain, fbo->fbo, *width, *height ); glFinish(); check_error(); glBindFramebuffer( GL_FRAMEBUFFER, 0 ); check_error(); *image = (uint8_t*) &texture->texture; mlt_frame_set_image( a_frame, *image, 0, NULL ); mlt_properties_set_data( properties, "movit.convert", texture, 0, (mlt_destructor) GlslManager::release_texture, NULL ); *format = output_format; } else { // Use a PBO to hold the data we read back with glReadPixels() // (Intel/DRI goes into a slow path if we don't read to PBO) GLenum gl_format = ( output_format == mlt_image_rgb24a || output_format == mlt_image_opengl )? GL_RGBA : GL_RGB; int img_size = *width * *height * ( gl_format == GL_RGB? 3 : 4 ); glsl_pbo pbo = glsl->get_pbo( img_size ); glsl_texture texture = glsl->get_texture( *width, *height, gl_format ); if ( fbo && pbo && texture ) { // Set the FBO glBindFramebuffer( GL_FRAMEBUFFER, fbo->fbo ); check_error(); glFramebufferTexture2D( GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture->texture, 0 ); check_error(); glBindFramebuffer( GL_FRAMEBUFFER, 0 ); check_error(); GlslManager::render( service, chain, fbo->fbo, *width, *height ); // Read FBO into PBO glBindBuffer( GL_PIXEL_PACK_BUFFER_ARB, pbo->pbo ); check_error(); glBufferData( GL_PIXEL_PACK_BUFFER_ARB, img_size, NULL, GL_STREAM_READ ); check_error(); glReadPixels( 0, 0, *width, *height, gl_format, GL_UNSIGNED_BYTE, BUFFER_OFFSET(0) ); check_error(); // Copy from PBO uint8_t* buf = (uint8_t*) glMapBuffer( GL_PIXEL_PACK_BUFFER_ARB, GL_READ_ONLY ); check_error(); *format = gl_format == GL_RGBA ? mlt_image_rgb24a : mlt_image_rgb24; *image = (uint8_t*) mlt_pool_alloc( img_size ); mlt_frame_set_image( a_frame, *image, img_size, mlt_pool_release ); memcpy( *image, buf, img_size ); // Release PBO and FBO glUnmapBuffer( GL_PIXEL_PACK_BUFFER_ARB ); check_error(); glBindBuffer( GL_PIXEL_PACK_BUFFER_ARB, 0 ); check_error(); glBindFramebuffer( GL_FRAMEBUFFER, 0 ); check_error(); glBindTexture( GL_TEXTURE_2D, 0 ); check_error(); GlslManager::release_texture( texture ); } else { error = 1; } } if ( fbo ) GlslManager::release_fbo( fbo ); mlt_service_unlock( service ); return error; }
static mlt_frame transition_process( mlt_transition transition, mlt_frame a_frame, mlt_frame b_frame ) { mlt_properties properties = MLT_TRANSITION_PROPERTIES( transition ); mlt_properties b_props = MLT_FRAME_PROPERTIES( b_frame ); // Only if mix is specified, otherwise a producer may set the mix if ( mlt_properties_get( properties, "start" ) ) { // Determine the time position of this frame in the transition duration mlt_properties props = mlt_properties_get_data( MLT_FRAME_PROPERTIES( b_frame ), "_producer", NULL ); mlt_position in = mlt_properties_get_int( props, "in" ); mlt_position out = mlt_properties_get_int( props, "out" ); int length = mlt_properties_get_int( properties, "length" ); mlt_position time = mlt_properties_get_int( props, "_frame" ); double mix = mlt_transition_get_progress( transition, b_frame ); if ( mlt_properties_get_int( properties, "always_active" ) ) mix = ( double ) ( time - in ) / ( double ) ( out - in + 1 ); // TODO: Check the logic here - shouldn't we be computing current and next mixing levels in all cases? if ( length == 0 ) { // If there is an end mix level adjust mix to the range if ( mlt_properties_get( properties, "end" ) ) { double start = mlt_properties_get_double( properties, "start" ); double end = mlt_properties_get_double( properties, "end" ); mix = start + ( end - start ) * mix; } // A negative means total crossfade (uses position) else if ( mlt_properties_get_double( properties, "start" ) >= 0 ) { // Otherwise, start/constructor is a constant mix level mix = mlt_properties_get_double( properties, "start" ); } // Finally, set the mix property on the frame mlt_properties_set_double( b_props, "audio.mix", mix ); // Initialise transition previous mix value to prevent an inadvertant jump from 0 mlt_position last_position = mlt_properties_get_position( properties, "_last_position" ); mlt_position current_position = mlt_frame_get_position( b_frame ); mlt_properties_set_position( properties, "_last_position", current_position ); if ( !mlt_properties_get( properties, "_previous_mix" ) || current_position != last_position + 1 ) mlt_properties_set_double( properties, "_previous_mix", mix ); // Tell b frame what the previous mix level was mlt_properties_set_double( b_props, "audio.previous_mix", mlt_properties_get_double( properties, "_previous_mix" ) ); // Save the current mix level for the next iteration mlt_properties_set_double( properties, "_previous_mix", mlt_properties_get_double( b_props, "audio.mix" ) ); mlt_properties_set_double( b_props, "audio.reverse", mlt_properties_get_double( properties, "reverse" ) ); } else { double level = mlt_properties_get_double( properties, "start" ); double mix_start = level; double mix_end = mix_start; double mix_increment = 1.0 / length; if ( time - in < length ) { mix_start = mix_start * ( ( double )( time - in ) / length ); mix_end = mix_start + mix_increment; } else if ( time > out - length ) { mix_end = mix_start * ( ( double )( out - time - in ) / length ); mix_start = mix_end - mix_increment; } mix_start = mix_start < 0 ? 0 : mix_start > level ? level : mix_start; mix_end = mix_end < 0 ? 0 : mix_end > level ? level : mix_end; mlt_properties_set_double( b_props, "audio.previous_mix", mix_start ); mlt_properties_set_double( b_props, "audio.mix", mix_end ); } } // Override the get_audio method mlt_frame_push_audio( a_frame, transition ); mlt_frame_push_audio( a_frame, b_frame ); mlt_frame_push_audio( a_frame, transition_get_audio ); return a_frame; }
int consumer_start( mlt_consumer parent ) { consumer_sdl self = parent->child; if ( !self->running ) { mlt_properties properties = MLT_CONSUMER_PROPERTIES( parent ); int audio_off = mlt_properties_get_int( properties, "audio_off" ); char *output_display = mlt_properties_get( properties, "output_display" ); char *window_id = mlt_properties_get( properties, "window_id" ); char *audio_driver = mlt_properties_get( properties, "audio_driver" ); char *video_driver = mlt_properties_get( properties, "video_driver" ); char *audio_device = mlt_properties_get( properties, "audio_device" ); consumer_stop( parent ); self->running = 1; self->joined = 0; if ( output_display != NULL ) setenv( "DISPLAY", output_display, 1 ); if ( window_id != NULL ) setenv( "SDL_WINDOWID", window_id, 1 ); if ( video_driver != NULL ) setenv( "SDL_VIDEODRIVER", video_driver, 1 ); if ( audio_driver != NULL ) setenv( "SDL_AUDIODRIVER", audio_driver, 1 ); if ( audio_device != NULL ) setenv( "AUDIODEV", audio_device, 1 ); if ( ! mlt_properties_get_int( self->properties, "resolution" ) ) { if ( mlt_properties_get_int( self->properties, "width" ) > 0 ) self->width = mlt_properties_get_int( self->properties, "width" ); if ( mlt_properties_get_int( self->properties, "height" ) > 0 ) self->height = mlt_properties_get_int( self->properties, "height" ); } if ( audio_off == 0 ) SDL_InitSubSystem( SDL_INIT_AUDIO ); // Default window size if ( mlt_properties_get_int( self->properties, "resolution" ) ) { self->window_width = self->width; self->window_height = self->height; } else { double display_ratio = mlt_properties_get_double( self->properties, "display_ratio" ); self->window_width = ( double )self->height * display_ratio + 0.5; self->window_height = self->height; } #if defined(__APPLE__) || defined(_WIN32) // Initialize SDL video if needed. if ( setup_sdl_video(self) ) return 1; #endif pthread_create( &self->thread, NULL, consumer_thread, self ); } return 0; }
static int transition_get_audio( mlt_frame frame_a, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { int error = 0; // Get the b frame from the stack mlt_frame frame_b = mlt_frame_pop_audio( frame_a ); // Get the effect mlt_transition transition = mlt_frame_pop_audio( frame_a ); // Get the properties of the b frame mlt_properties b_props = MLT_FRAME_PROPERTIES( frame_b ); transition_mix self = transition->child; int16_t *buffer_b, *buffer_a; int frequency_b = *frequency, frequency_a = *frequency; int channels_b = *channels, channels_a = *channels; int samples_b = *samples, samples_a = *samples; // We can only mix s16 *format = mlt_audio_s16; mlt_frame_get_audio( frame_b, (void**) &buffer_b, format, &frequency_b, &channels_b, &samples_b ); mlt_frame_get_audio( frame_a, (void**) &buffer_a, format, &frequency_a, &channels_a, &samples_a ); if ( buffer_b == buffer_a ) { *samples = samples_b; *channels = channels_b; *buffer = buffer_b; *frequency = frequency_b; return error; } int silent = mlt_properties_get_int( MLT_FRAME_PROPERTIES( frame_a ), "silent_audio" ); mlt_properties_set_int( MLT_FRAME_PROPERTIES( frame_a ), "silent_audio", 0 ); if ( silent ) memset( buffer_a, 0, samples_a * channels_a * sizeof( int16_t ) ); silent = mlt_properties_get_int( b_props, "silent_audio" ); mlt_properties_set_int( b_props, "silent_audio", 0 ); if ( silent ) memset( buffer_b, 0, samples_b * channels_b * sizeof( int16_t ) ); // determine number of samples to process *samples = MIN( self->src_buffer_count + samples_b, self->dest_buffer_count + samples_a ); *channels = MIN( MIN( channels_b, channels_a ), MAX_CHANNELS ); *frequency = frequency_a; // Prevent src buffer overflow by discarding oldest samples. samples_b = MIN( samples_b, MAX_SAMPLES * MAX_CHANNELS / channels_b ); size_t bytes = PCM16_BYTES( samples_b, channels_b ); if ( PCM16_BYTES( self->src_buffer_count + samples_b, channels_b ) > MAX_BYTES ) { mlt_log_verbose( MLT_TRANSITION_SERVICE(transition), "buffer overflow: src_buffer_count %d\n", self->src_buffer_count ); self->src_buffer_count = MAX_SAMPLES * MAX_CHANNELS / channels_b - samples_b; memmove( self->src_buffer, &self->src_buffer[MAX_SAMPLES * MAX_CHANNELS - samples_b * channels_b], PCM16_BYTES( samples_b, channels_b ) ); } // Buffer new src samples. memcpy( &self->src_buffer[self->src_buffer_count * channels_b], buffer_b, bytes ); self->src_buffer_count += samples_b; buffer_b = self->src_buffer; // Prevent dest buffer overflow by discarding oldest samples. samples_a = MIN( samples_a, MAX_SAMPLES * MAX_CHANNELS / channels_a ); bytes = PCM16_BYTES( samples_a, channels_a ); if ( PCM16_BYTES( self->dest_buffer_count + samples_a, channels_a ) > MAX_BYTES ) { mlt_log_verbose( MLT_TRANSITION_SERVICE(transition), "buffer overflow: dest_buffer_count %d\n", self->dest_buffer_count ); self->dest_buffer_count = MAX_SAMPLES * MAX_CHANNELS / channels_a - samples_a; memmove( self->dest_buffer, &self->dest_buffer[MAX_SAMPLES * MAX_CHANNELS - samples_a * channels_a], PCM16_BYTES( samples_a, channels_a ) ); } // Buffer the new dest samples. memcpy( &self->dest_buffer[self->dest_buffer_count * channels_a], buffer_a, bytes ); self->dest_buffer_count += samples_a; buffer_a = self->dest_buffer; // Do the mixing. if ( mlt_properties_get_int( MLT_TRANSITION_PROPERTIES(transition), "combine" ) ) { double weight = 1.0; if ( mlt_properties_get_int( MLT_FRAME_PROPERTIES( frame_a ), "meta.mixdown" ) ) weight = 1.0 - mlt_properties_get_double( MLT_FRAME_PROPERTIES( frame_a ), "meta.volume" ); combine_audio( weight, buffer_a, buffer_b, channels_a, channels_b, *channels, *samples ); } else { double mix_start = 0.5, mix_end = 0.5; if ( mlt_properties_get( b_props, "audio.previous_mix" ) ) mix_start = mlt_properties_get_double( b_props, "audio.previous_mix" ); if ( mlt_properties_get( b_props, "audio.mix" ) ) mix_end = mlt_properties_get_double( b_props, "audio.mix" ); if ( mlt_properties_get_int( b_props, "audio.reverse" ) ) { mix_start = 1.0 - mix_start; mix_end = 1.0 - mix_end; } mix_audio( mix_start, mix_end, buffer_a, buffer_b, channels_a, channels_b, *channels, *samples ); } // Copy the audio into the frame. bytes = PCM16_BYTES( *samples, *channels ); *buffer = mlt_pool_alloc( bytes ); memcpy( *buffer, buffer_a, bytes ); mlt_frame_set_audio( frame_a, *buffer, *format, bytes, mlt_pool_release ); if ( mlt_properties_get_int( b_props, "_speed" ) == 0 ) { // Flush the buffer when paused and scrubbing. samples_b = self->src_buffer_count; samples_a = self->dest_buffer_count; } else { // Determine the maximum amount of latency permitted in the buffer. int max_latency = CLAMP( *frequency / 1000, 0, MAX_SAMPLES ); // samples in 1ms // samples_b becomes the new target src buffer count. samples_b = CLAMP( self->src_buffer_count - *samples, 0, max_latency ); // samples_b becomes the number of samples to consume: difference between actual and the target. samples_b = self->src_buffer_count - samples_b; // samples_a becomes the new target dest buffer count. samples_a = CLAMP( self->dest_buffer_count - *samples, 0, max_latency ); // samples_a becomes the number of samples to consume: difference between actual and the target. samples_a = self->dest_buffer_count - samples_a; } // Consume the src buffer. self->src_buffer_count -= samples_b; if ( self->src_buffer_count ) { memmove( self->src_buffer, &self->src_buffer[samples_b * channels_b], PCM16_BYTES( self->src_buffer_count, channels_b )); } // Consume the dest buffer. self->dest_buffer_count -= samples_a; if ( self->dest_buffer_count ) { memmove( self->dest_buffer, &self->dest_buffer[samples_a * channels_a], PCM16_BYTES( self->dest_buffer_count, channels_a )); } return error; }
static int consumer_play_audio( consumer_sdl self, mlt_frame frame, int init_audio, int *duration ) { // Get the properties of self consumer mlt_properties properties = self->properties; mlt_audio_format afmt = mlt_audio_s16; // Set the preferred params of the test card signal int channels = mlt_properties_get_int( properties, "channels" ); int frequency = mlt_properties_get_int( properties, "frequency" ); int scrub = mlt_properties_get_int( properties, "scrub_audio" ); static int counter = 0; int samples = mlt_sample_calculator( mlt_properties_get_double( self->properties, "fps" ), frequency, counter++ ); int16_t *pcm; mlt_frame_get_audio( frame, (void**) &pcm, &afmt, &frequency, &channels, &samples ); *duration = ( ( samples * 1000 ) / frequency ); pcm += mlt_properties_get_int( properties, "audio_offset" ); if ( mlt_properties_get_int( properties, "audio_off" ) ) { self->playing = 1; init_audio = 1; return init_audio; } if ( init_audio == 1 ) { SDL_AudioSpec request; SDL_AudioSpec got; SDL_AudioDeviceID dev; int audio_buffer = mlt_properties_get_int( properties, "audio_buffer" ); // specify audio format memset( &request, 0, sizeof( SDL_AudioSpec ) ); self->playing = 0; request.freq = frequency; request.format = AUDIO_S16SYS; request.channels = mlt_properties_get_int( properties, "channels" ); request.samples = audio_buffer; request.callback = sdl_fill_audio; request.userdata = (void *)self; dev = sdl2_open_audio( &request, &got ); if( dev == 0 ) { mlt_log_error( MLT_CONSUMER_SERVICE( self ), "SDL failed to open audio\n" ); init_audio = 2; } else { if( got.channels != request.channels ) { mlt_log_info( MLT_CONSUMER_SERVICE( self ), "Unable to output %d channels. Change to %d\n", request.channels, got.channels ); } mlt_log_info( MLT_CONSUMER_SERVICE( self ), "Audio Opened: driver=%s channels=%d frequency=%d\n", SDL_GetCurrentAudioDriver(), got.channels, got.freq ); SDL_PauseAudioDevice( dev, 0 ); init_audio = 0; self->out_channels = got.channels; } } if ( init_audio == 0 ) { mlt_properties properties = MLT_FRAME_PROPERTIES( frame ); int samples_copied = 0; int dst_stride = self->out_channels * sizeof( *pcm ); pthread_mutex_lock( &self->audio_mutex ); while ( self->running && samples_copied < samples ) { int sample_space = ( sizeof( self->audio_buffer ) - self->audio_avail ) / dst_stride; while ( self->running && sample_space == 0 ) { pthread_cond_wait( &self->audio_cond, &self->audio_mutex ); sample_space = ( sizeof( self->audio_buffer ) - self->audio_avail ) / dst_stride; } if ( self->running ) { int samples_to_copy = samples - samples_copied; if ( samples_to_copy > sample_space ) { samples_to_copy = sample_space; } int dst_bytes = samples_to_copy * dst_stride; if ( scrub || mlt_properties_get_double( properties, "_speed" ) == 1 ) { if ( channels == self->out_channels ) { memcpy( &self->audio_buffer[ self->audio_avail ], pcm, dst_bytes ); pcm += samples_to_copy * channels; } else { int16_t *dest = (int16_t*) &self->audio_buffer[ self->audio_avail ]; int i = samples_to_copy + 1; while ( --i ) { memcpy( dest, pcm, dst_stride ); pcm += channels; dest += self->out_channels; } } } else { memset( &self->audio_buffer[ self->audio_avail ], 0, dst_bytes ); pcm += samples_to_copy * channels; } self->audio_avail += dst_bytes; samples_copied += samples_to_copy; } pthread_cond_broadcast( &self->audio_cond ); } pthread_mutex_unlock( &self->audio_mutex ); } else { self->playing = 1; } return init_audio; }
static void convert_fft_to_spectrum( mlt_filter filter, mlt_frame frame, int spect_bands, float* spectrum ) { private_data* pdata = (private_data*)filter->child; mlt_properties filter_properties = MLT_FILTER_PROPERTIES( filter ); mlt_properties fft_properties = MLT_FILTER_PROPERTIES( pdata->fft ); mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame ); double low_freq = mlt_properties_get_int( filter_properties, "frequency_low" ); double hi_freq = mlt_properties_get_int( filter_properties, "frequency_high" ); int bin_count = mlt_properties_get_int( fft_properties, "bin_count" ); double bin_width = mlt_properties_get_double( fft_properties, "bin_width" ); float* bins = (float*)mlt_properties_get_data( frame_properties, pdata->fft_prop_name, NULL ); double threshold = mlt_properties_get_int( filter_properties, "threshold" ); int reverse = mlt_properties_get_int( filter_properties, "reverse" ); // Map the linear fft bin frequencies to a log scale spectrum. double band_freq_factor = pow( hi_freq / low_freq, 1.0 / (double)spect_bands ); double band_freq_low = low_freq; double band_freq_hi = band_freq_low * band_freq_factor; int bin_index = 0; int spect_index = 0; double bin_freq = 0; // Skip bins that occur before the low frequency of the spectrum while( bin_freq < band_freq_low ) { bin_freq += bin_width; bin_index ++; } for( spect_index = 0; spect_index < spect_bands && bin_index < bin_count; spect_index++ ) { float mag = 0.0; if( bin_freq > band_freq_hi ) { // There is no bin for this point. Interpolate between the two closest. if( bin_index == 0 ) { mag = bins[bin_index]; } else { double y0 = bins[bin_index - 1]; double y1 = bins[bin_index]; double spect_center = band_freq_low + (band_freq_hi - band_freq_low) / 2; double prev_freq = bin_freq - bin_width; double t = bin_width / ( spect_center - prev_freq ); mag = y0 + ( y1 - y0 ) * t; } } else { // Find the bin frequency with the greatest magnitude in the range for // this spectrum point. while( bin_freq < band_freq_hi && bin_index < bin_count ) { if( mag < bins[bin_index] ) { mag = bins[bin_index]; } bin_freq += bin_width; bin_index ++; } } // Scale the magnitude to the range 0.0-1.0 based on dB double dB = mag > 0.0 ? 20 * log10( mag ) : -1000.0; double spect_val = 0; if( dB >= threshold ) { spect_val = 1.0 - (dB / threshold); } if( reverse ) { spectrum[spect_bands - spect_index - 1] = spect_val; } else { spectrum[spect_index] = spect_val; } // Calculate the next spectrum point frequency range. band_freq_low = band_freq_hi; band_freq_hi = band_freq_hi * band_freq_factor; } }
static int consumer_play_video( consumer_sdl self, mlt_frame frame ) { // Get the properties of this consumer mlt_properties properties = self->properties; #ifdef MLT_IMAGE_FORMAT mlt_image_format vfmt = mlt_properties_get_int( properties, "mlt_image_format" ); #else mlt_image_format vfmt = mlt_image_yuv422; #endif int width = self->width, height = self->height; uint8_t *image; int video_off = mlt_properties_get_int( properties, "video_off" ); int preview_off = mlt_properties_get_int( properties, "preview_off" ); int display_off = video_off | preview_off; if ( self->running && !display_off ) { // Get the image, width and height mlt_frame_get_image( frame, &image, &vfmt, &width, &height, 0 ); if ( self->running ) { // Determine window's new display aspect ratio int x = mlt_properties_get_int( properties, "window_width" ); if ( x && x != self->window_width ) self->window_width = x; x = mlt_properties_get_int( properties, "window_height" ); if ( x && x != self->window_height ) self->window_height = x; double this_aspect = ( double )self->window_width / self->window_height; // Get the display aspect ratio double display_ratio = mlt_properties_get_double( properties, "display_ratio" ); // Determine frame's display aspect ratio double frame_aspect = mlt_frame_get_aspect_ratio( frame ) * width / height; // Store the width and height received self->width = width; self->height = height; // If using hardware scaler if ( mlt_properties_get( properties, "rescale" ) != NULL && !strcmp( mlt_properties_get( properties, "rescale" ), "none" ) ) { // Use hardware scaler to normalise display aspect ratio self->sdl_rect.w = frame_aspect / this_aspect * self->window_width; self->sdl_rect.h = self->window_height; if ( self->sdl_rect.w > self->window_width ) { self->sdl_rect.w = self->window_width; self->sdl_rect.h = this_aspect / frame_aspect * self->window_height; } } // Special case optimisation to negate odd effect of sample aspect ratio // not corresponding exactly with image resolution. else if ( (int)( this_aspect * 1000 ) == (int)( display_ratio * 1000 ) ) { self->sdl_rect.w = self->window_width; self->sdl_rect.h = self->window_height; } // Use hardware scaler to normalise sample aspect ratio else if ( self->window_height * display_ratio > self->window_width ) { self->sdl_rect.w = self->window_width; self->sdl_rect.h = self->window_width / display_ratio; } else { self->sdl_rect.w = self->window_height * display_ratio; self->sdl_rect.h = self->window_height; } self->sdl_rect.x = ( self->window_width - self->sdl_rect.w ) / 2; self->sdl_rect.y = ( self->window_height - self->sdl_rect.h ) / 2; self->sdl_rect.x -= self->sdl_rect.x % 2; mlt_properties_set_int( self->properties, "rect_x", self->sdl_rect.x ); mlt_properties_set_int( self->properties, "rect_y", self->sdl_rect.y ); mlt_properties_set_int( self->properties, "rect_w", self->sdl_rect.w ); mlt_properties_set_int( self->properties, "rect_h", self->sdl_rect.h ); } if ( self->running && image ) { unsigned char* planes[4]; int strides[4]; // We use height-1 because mlt_image_format_size() uses height + 1. // XXX Remove -1 when mlt_image_format_size() is changed. mlt_image_format_planes( vfmt, width, height - 1, image, planes, strides ); if ( strides[1] ) { SDL_UpdateYUVTexture( self->sdl_texture, NULL, planes[0], strides[0], planes[1], strides[1], planes[2], strides[2] ); } else { SDL_UpdateTexture( self->sdl_texture, NULL, planes[0], strides[0] ); } SDL_RenderClear( self->sdl_renderer ); SDL_RenderCopy( self->sdl_renderer, self->sdl_texture, NULL, &self->sdl_rect ); SDL_RenderPresent( self->sdl_renderer ); } mlt_events_fire( properties, "consumer-frame-show", frame, NULL ); } else if ( self->running ) { if ( !video_off ) { mlt_image_format preview_format = mlt_properties_get_int( properties, "preview_format" ); vfmt = preview_format == mlt_image_none ? mlt_image_rgb24a : preview_format; mlt_frame_get_image( frame, &image, &vfmt, &width, &height, 0 ); } mlt_events_fire( properties, "consumer-frame-show", frame, NULL ); } return 0; }
static void *video_thread( void *arg ) { // Identify the arg consumer_sdl self = arg; // Obtain time of thread start struct timeval now; int64_t start = 0; int64_t elapsed = 0; struct timespec tm; mlt_frame next = NULL; mlt_properties properties = NULL; double speed = 0; // Get real time flag int real_time = mlt_properties_get_int( self->properties, "real_time" ); // Get the current time gettimeofday( &now, NULL ); // Determine start time start = ( int64_t )now.tv_sec * 1000000 + now.tv_usec; while ( self->running ) { // Pop the next frame pthread_mutex_lock( &self->video_mutex ); next = mlt_deque_pop_front( self->queue ); while ( next == NULL && self->running ) { pthread_cond_wait( &self->video_cond, &self->video_mutex ); next = mlt_deque_pop_front( self->queue ); } pthread_mutex_unlock( &self->video_mutex ); if ( !self->running || next == NULL ) break; // Get the properties properties = MLT_FRAME_PROPERTIES( next ); // Get the speed of the frame speed = mlt_properties_get_double( properties, "_speed" ); // Get the current time gettimeofday( &now, NULL ); // Get the elapsed time elapsed = ( ( int64_t )now.tv_sec * 1000000 + now.tv_usec ) - start; // See if we have to delay the display of the current frame if ( mlt_properties_get_int( properties, "rendered" ) == 1 ) { // Obtain the scheduled playout time int64_t scheduled = mlt_properties_get_int( properties, "playtime" ); // Determine the difference between the elapsed time and the scheduled playout time int64_t difference = scheduled - elapsed; // Smooth playback a bit if ( real_time && ( difference > 20000 && speed == 1.0 ) ) { tm.tv_sec = difference / 1000000; tm.tv_nsec = ( difference % 1000000 ) * 500; nanosleep( &tm, NULL ); } // Show current frame if not too old if ( !real_time || ( difference > -10000 || speed != 1.0 || mlt_deque_count( self->queue ) < 2 ) ) consumer_play_video( self, next ); // If the queue is empty, recalculate start to allow build up again if ( real_time && ( mlt_deque_count( self->queue ) == 0 && speed == 1.0 ) ) { gettimeofday( &now, NULL ); start = ( ( int64_t )now.tv_sec * 1000000 + now.tv_usec ) - scheduled + 20000; } } // This frame can now be closed mlt_frame_close( next ); next = NULL; } // This consumer is stopping. But audio has already been played for all // the frames in the queue. Spit out all the frames so that the display has // the option to catch up with the audio. if ( next != NULL ) { consumer_play_video( self, next ); mlt_frame_close( next ); next = NULL; } while ( mlt_deque_count( self->queue ) > 0 ) { next = mlt_deque_pop_front( self->queue ); consumer_play_video( self, next ); mlt_frame_close( next ); next = NULL; } mlt_consumer_stopped( &self->parent ); return NULL; }
static void *video_thread( void *arg ) { // Identify the arg consumer_sdl self = arg; // Obtain time of thread start struct timeval now; int64_t start = 0; int64_t elapsed = 0; struct timespec tm; mlt_frame next = NULL; mlt_properties properties = NULL; double speed = 0; // Get real time flag int real_time = mlt_properties_get_int( self->properties, "real_time" ); #if !defined(__APPLE__) && !defined(_WIN32) if ( setup_sdl_video(self) ) self->running = 0; #endif // Determine start time gettimeofday( &now, NULL ); start = ( int64_t )now.tv_sec * 1000000 + now.tv_usec; while ( self->running ) { // Pop the next frame pthread_mutex_lock( &self->video_mutex ); next = mlt_deque_pop_front( self->queue ); while ( next == NULL && self->running ) { pthread_cond_wait( &self->video_cond, &self->video_mutex ); next = mlt_deque_pop_front( self->queue ); } pthread_mutex_unlock( &self->video_mutex ); if ( !self->running || next == NULL ) break; // Get the properties properties = MLT_FRAME_PROPERTIES( next ); // Get the speed of the frame speed = mlt_properties_get_double( properties, "_speed" ); // Get the current time gettimeofday( &now, NULL ); // Get the elapsed time elapsed = ( ( int64_t )now.tv_sec * 1000000 + now.tv_usec ) - start; // See if we have to delay the display of the current frame if ( mlt_properties_get_int( properties, "rendered" ) == 1 && self->running ) { // Obtain the scheduled playout time int64_t scheduled = mlt_properties_get_int( properties, "playtime" ); // Determine the difference between the elapsed time and the scheduled playout time int64_t difference = scheduled - elapsed; // Smooth playback a bit if ( real_time && ( difference > 20000 && speed == 1.0 ) ) { tm.tv_sec = difference / 1000000; tm.tv_nsec = ( difference % 1000000 ) * 500; nanosleep( &tm, NULL ); } // Show current frame if not too old if ( !real_time || ( difference > -10000 || speed != 1.0 || mlt_deque_count( self->queue ) < 2 ) ) consumer_play_video( self, next ); // If the queue is empty, recalculate start to allow build up again if ( real_time && ( mlt_deque_count( self->queue ) == 0 && speed == 1.0 ) ) { gettimeofday( &now, NULL ); start = ( ( int64_t )now.tv_sec * 1000000 + now.tv_usec ) - scheduled + 20000; } } else { static int dropped = 0; mlt_log_info( MLT_CONSUMER_SERVICE(&self->parent), "dropped video frame %d\n", ++dropped ); } // This frame can now be closed mlt_frame_close( next ); next = NULL; } if ( next != NULL ) mlt_frame_close( next ); mlt_consumer_stopped( &self->parent ); return NULL; }
static int filter_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { // Get the filter from the frame mlt_filter filter = mlt_frame_pop_audio( frame ); // Get the properties from the filter mlt_properties filter_props = MLT_FILTER_PROPERTIES( filter ); // Get the frame's filter instance properties mlt_properties instance_props = mlt_frame_unique_properties( frame, MLT_FILTER_SERVICE( filter ) ); // Get the parameters double gain = mlt_properties_get_double( instance_props, "gain" ); double max_gain = mlt_properties_get_double( instance_props, "max_gain" ); double limiter_level = 0.5; /* -6 dBFS */ int normalise = mlt_properties_get_int( instance_props, "normalise" ); double amplitude = mlt_properties_get_double( instance_props, "amplitude" ); int i, j; double sample; int16_t peak; // Use animated value for gain if "level" property is set char* level_property = mlt_properties_get( filter_props, "level" ); if ( level_property != NULL ) { mlt_position position = mlt_filter_get_position( filter, frame ); mlt_position length = mlt_filter_get_length2( filter, frame ); gain = mlt_properties_anim_get_double( filter_props, "level", position, length ); gain = DBFSTOAMP( gain ); } if ( mlt_properties_get( instance_props, "limiter" ) != NULL ) limiter_level = mlt_properties_get_double( instance_props, "limiter" ); // Get the producer's audio *format = normalise? mlt_audio_s16 : mlt_audio_f32le; mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples ); mlt_service_lock( MLT_FILTER_SERVICE( filter ) ); if ( normalise ) { int window = mlt_properties_get_int( filter_props, "window" ); double *smooth_buffer = mlt_properties_get_data( filter_props, "smooth_buffer", NULL ); if ( window > 0 && smooth_buffer != NULL ) { int smooth_index = mlt_properties_get_int( filter_props, "_smooth_index" ); // Compute the signal power and put into smoothing buffer smooth_buffer[ smooth_index ] = signal_max_power( *buffer, *channels, *samples, &peak ); if ( smooth_buffer[ smooth_index ] > EPSILON ) { mlt_properties_set_int( filter_props, "_smooth_index", ( smooth_index + 1 ) % window ); // Smooth the data and compute the gain gain *= amplitude / get_smoothed_data( smooth_buffer, window ); } } else { gain *= amplitude / signal_max_power( *buffer, *channels, *samples, &peak ); } } if ( max_gain > 0 && gain > max_gain ) gain = max_gain; // Initialise filter's previous gain value to prevent an inadvertant jump from 0 mlt_position last_position = mlt_properties_get_position( filter_props, "_last_position" ); mlt_position current_position = mlt_frame_get_position( frame ); if ( mlt_properties_get( filter_props, "_previous_gain" ) == NULL || current_position != last_position + 1 ) mlt_properties_set_double( filter_props, "_previous_gain", gain ); // Start the gain out at the previous double previous_gain = mlt_properties_get_double( filter_props, "_previous_gain" ); // Determine ramp increment double gain_step = ( gain - previous_gain ) / *samples; // Save the current gain for the next iteration mlt_properties_set_double( filter_props, "_previous_gain", gain ); mlt_properties_set_position( filter_props, "_last_position", current_position ); mlt_service_unlock( MLT_FILTER_SERVICE( filter ) ); // Ramp from the previous gain to the current gain = previous_gain; // Apply the gain if ( normalise ) { int16_t *p = *buffer; // Determine numeric limits int bytes_per_samp = (samp_width - 1) / 8 + 1; int samplemax = (1 << (bytes_per_samp * 8 - 1)) - 1; for ( i = 0; i < *samples; i++, gain += gain_step ) { for ( j = 0; j < *channels; j++ ) { sample = *p * gain; *p = ROUND( sample ); if ( gain > 1.0 && normalise ) { /* use limiter function instead of clipping */ *p = ROUND( samplemax * limiter( sample / (double) samplemax, limiter_level ) ); } p++; } } } else { float *p = *buffer; for ( i = 0; i < *samples; i++, gain += gain_step ) { for ( j = 0; j < *channels; j++, p++ ) { p[0] *= gain; } } } return 0; }
static void *consumer_thread( void *arg ) { // Identify the arg consumer_sdl self = arg; // Get the consumer mlt_consumer consumer = &self->parent; // Convenience functionality int terminate_on_pause = mlt_properties_get_int( MLT_CONSUMER_PROPERTIES( consumer ), "terminate_on_pause" ); int terminated = 0; // Video thread pthread_t thread; // internal intialization int init_audio = 1; int init_video = 1; mlt_frame frame = NULL; int duration = 0; int64_t playtime = 0; struct timespec tm = { 0, 100000 }; // Loop until told not to while( self->running ) { // Get a frame from the attached producer frame = !terminated? mlt_consumer_rt_frame( consumer ) : NULL; // Check for termination if ( terminate_on_pause && frame ) terminated = mlt_properties_get_double( MLT_FRAME_PROPERTIES( frame ), "_speed" ) == 0.0; // Ensure that we have a frame if ( frame ) { // Play audio init_audio = consumer_play_audio( self, frame, init_audio, &duration ); // Determine the start time now if ( self->playing && init_video ) { // Create the video thread pthread_create( &thread, NULL, video_thread, self ); // Video doesn't need to be initialised any more init_video = 0; } // Set playtime for this frame mlt_properties_set_int( MLT_FRAME_PROPERTIES( frame ), "playtime", playtime ); while ( self->running && mlt_deque_count( self->queue ) > 15 ) nanosleep( &tm, NULL ); // Push this frame to the back of the queue pthread_mutex_lock( &self->video_mutex ); if ( self->is_purge ) { mlt_frame_close( frame ); frame = NULL; self->is_purge = 0; } else { mlt_deque_push_back( self->queue, frame ); pthread_cond_broadcast( &self->video_cond ); } pthread_mutex_unlock( &self->video_mutex ); // Calculate the next playtime playtime += ( duration * 1000 ); } else if ( terminated ) { if ( init_video || mlt_deque_count( self->queue ) == 0 ) break; else nanosleep( &tm, NULL ); } } self->running = 0; // Unblock sdl_preview if ( mlt_properties_get_int( MLT_CONSUMER_PROPERTIES( consumer ), "put_mode" ) && mlt_properties_get_int( MLT_CONSUMER_PROPERTIES( consumer ), "put_pending" ) ) { frame = mlt_consumer_get_frame( consumer ); if ( frame ) mlt_frame_close( frame ); frame = NULL; } // Kill the video thread if ( init_video == 0 ) { pthread_mutex_lock( &self->video_mutex ); pthread_cond_broadcast( &self->video_cond ); pthread_mutex_unlock( &self->video_mutex ); pthread_join( thread, NULL ); } while( mlt_deque_count( self->queue ) ) mlt_frame_close( mlt_deque_pop_back( self->queue ) ); self->audio_avail = 0; return NULL; }
static int transition_get_image( mlt_frame a_frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable ) { // Get the b frame from the stack mlt_frame b_frame = mlt_frame_pop_frame( a_frame ); // Get the transition object mlt_transition transition = mlt_frame_pop_service( a_frame ); // Get the properties of the transition mlt_properties properties = MLT_TRANSITION_PROPERTIES( transition ); // Get the properties of the a frame mlt_properties a_props = MLT_FRAME_PROPERTIES( a_frame ); // Get the properties of the b frame mlt_properties b_props = MLT_FRAME_PROPERTIES( b_frame ); // Image, format, width, height and image for the b frame uint8_t *b_image = NULL; mlt_image_format b_format = mlt_image_rgb24a; int b_width; int b_height; // Assign the current position mlt_position position = mlt_transition_get_position( transition, a_frame ); int mirror = mlt_properties_get_position( properties, "mirror" ); int length = mlt_transition_get_length( transition ); if ( mlt_properties_get_int( properties, "always_active" ) ) { mlt_properties props = mlt_properties_get_data( b_props, "_producer", NULL ); mlt_position in = mlt_properties_get_int( props, "in" ); mlt_position out = mlt_properties_get_int( props, "out" ); length = out - in + 1; } // Obtain the normalised width and height from the a_frame mlt_profile profile = mlt_service_profile( MLT_TRANSITION_SERVICE( transition ) ); int normalised_width = profile->width; int normalised_height = profile->height; double consumer_ar = mlt_profile_sar( mlt_service_profile( MLT_TRANSITION_SERVICE(transition) ) ); // Structures for geometry struct mlt_geometry_item_s result; if ( mirror && position > length / 2 ) position = abs( position - length ); // Fetch the a frame image *format = mlt_image_rgb24a; mlt_frame_get_image( a_frame, image, format, width, height, 1 ); // Calculate the region now mlt_service_lock( MLT_TRANSITION_SERVICE( transition ) ); composite_calculate( transition, &result, normalised_width, normalised_height, ( float )position ); mlt_service_unlock( MLT_TRANSITION_SERVICE( transition ) ); // Fetch the b frame image result.w = ( result.w * *width / normalised_width ); result.h = ( result.h * *height / normalised_height ); result.x = ( result.x * *width / normalised_width ); result.y = ( result.y * *height / normalised_height ); // Request full resolution of b frame image. b_width = mlt_properties_get_int( b_props, "meta.media.width" ); b_height = mlt_properties_get_int( b_props, "meta.media.height" ); mlt_properties_set_int( b_props, "rescale_width", b_width ); mlt_properties_set_int( b_props, "rescale_height", b_height ); // Suppress padding and aspect normalization. char *interps = mlt_properties_get( a_props, "rescale.interp" ); if ( interps ) interps = strdup( interps ); mlt_properties_set( b_props, "rescale.interp", "none" ); // This is not a field-aware transform. mlt_properties_set_int( b_props, "consumer_deinterlace", 1 ); mlt_frame_get_image( b_frame, &b_image, &b_format, &b_width, &b_height, 0 ); // Check that both images are of the correct format and process if ( *format == mlt_image_rgb24a && b_format == mlt_image_rgb24a ) { float x, y; float dx, dy; float dz; float sw, sh; uint8_t *p = *image; // Get values from the transition float scale_x = mlt_properties_get_double( properties, "scale_x" ); float scale_y = mlt_properties_get_double( properties, "scale_y" ); int scale = mlt_properties_get_int( properties, "scale" ); int b_alpha = mlt_properties_get_int( properties, "b_alpha" ); float geom_scale_x = (float) b_width / result.w; float geom_scale_y = (float) b_height / result.h; float cx = result.x + result.w / 2.0; float cy = result.y + result.h / 2.0; float lower_x = - cx; float lower_y = - cy; float x_offset = (float) b_width / 2.0; float y_offset = (float) b_height / 2.0; affine_t affine; interpp interp = interpBL_b32; int i, j; // loop counters affine_init( affine.matrix ); // Compute the affine transform get_affine( &affine, transition, ( float )position ); dz = MapZ( affine.matrix, 0, 0 ); if ( ( int )abs( dz * 1000 ) < 25 ) { if ( interps ) free( interps ); return 0; } // Factor scaling into the transformation based on output resolution. if ( mlt_properties_get_int( properties, "distort" ) ) { scale_x = geom_scale_x * ( scale_x == 0 ? 1 : scale_x ); scale_y = geom_scale_y * ( scale_y == 0 ? 1 : scale_y ); } else { // Determine scale with respect to aspect ratio. double consumer_dar = consumer_ar * normalised_width / normalised_height; double b_ar = mlt_properties_get_double( b_props, "aspect_ratio" ); double b_dar = b_ar * b_width / b_height; if ( b_dar > consumer_dar ) { scale_x = geom_scale_x * ( scale_x == 0 ? 1 : scale_x ); scale_y = geom_scale_x * ( scale_y == 0 ? 1 : scale_y ); } else { scale_x = geom_scale_y * ( scale_x == 0 ? 1 : scale_x ); scale_y = geom_scale_y * ( scale_y == 0 ? 1 : scale_y ); } scale_x *= consumer_ar / b_ar; } if ( scale ) { affine_max_output( affine.matrix, &sw, &sh, dz, *width, *height ); affine_scale( affine.matrix, sw * MIN( geom_scale_x, geom_scale_y ), sh * MIN( geom_scale_x, geom_scale_y ) ); } else if ( scale_x != 0 && scale_y != 0 ) { affine_scale( affine.matrix, scale_x, scale_y ); } // Set the interpolation function if ( interps == NULL || strcmp( interps, "nearest" ) == 0 || strcmp( interps, "neighbor" ) == 0 ) interp = interpNN_b32; else if ( strcmp( interps, "tiles" ) == 0 || strcmp( interps, "fast_bilinear" ) == 0 ) interp = interpNN_b32; else if ( strcmp( interps, "bilinear" ) == 0 ) interp = interpBL_b32; else if ( strcmp( interps, "bicubic" ) == 0 ) interp = interpBC_b32; // TODO: lanczos 8x8 else if ( strcmp( interps, "hyper" ) == 0 || strcmp( interps, "sinc" ) == 0 || strcmp( interps, "lanczos" ) == 0 ) interp = interpBC_b32; else if ( strcmp( interps, "spline" ) == 0 ) // TODO: spline 4x4 or 6x6 interp = interpBC_b32; // Do the transform with interpolation for ( i = 0, y = lower_y; i < *height; i++, y++ ) { for ( j = 0, x = lower_x; j < *width; j++, x++ ) { dx = MapX( affine.matrix, x, y ) / dz + x_offset; dy = MapY( affine.matrix, x, y ) / dz + y_offset; if ( dx >= 0 && dx < (b_width - 1) && dy >=0 && dy < (b_height - 1) ) interp( b_image, b_width, b_height, dx, dy, result.mix/100.0, p, b_alpha ); p += 4; } } } if ( interps ) free( interps ); return 0; }
static mlt_frame filter_process( mlt_filter filter, mlt_frame frame ) { mlt_properties properties = MLT_FILTER_PROPERTIES( filter ); mlt_properties frame_props = MLT_FRAME_PROPERTIES( frame ); mlt_properties instance_props = mlt_properties_new(); // Only if mix is specified, otherwise a producer may set the mix if ( mlt_properties_get( properties, "start" ) != NULL ) { // Determine the time position of this frame in the filter duration mlt_properties props = mlt_properties_get_data( frame_props, "_producer", NULL ); int always_active = mlt_properties_get_int( properties, "always_active" ); mlt_position in = !always_active ? mlt_filter_get_in( filter ) : mlt_properties_get_int( props, "in" ); mlt_position out = !always_active ? mlt_filter_get_out( filter ) : mlt_properties_get_int( props, "out" ); int length = mlt_properties_get_int( properties, "length" ); mlt_position time = !always_active ? mlt_frame_get_position( frame ) : mlt_properties_get_int( props, "_frame" ); double mix = ( double )( time - in ) / ( double )( out - in + 1 ); if ( length == 0 ) { // If there is an end mix level adjust mix to the range if ( mlt_properties_get( properties, "end" ) != NULL ) { double start = mlt_properties_get_double( properties, "start" ); double end = mlt_properties_get_double( properties, "end" ); mix = start + ( end - start ) * mix; } // Use constant mix level if only start else if ( mlt_properties_get( properties, "start" ) != NULL ) { mix = mlt_properties_get_double( properties, "start" ); } // Use animated property "split" to get mix level if property is set char* split_property = mlt_properties_get( properties, "split" ); if ( split_property ) { mlt_position pos = mlt_filter_get_position( filter, frame ); mlt_position len = mlt_filter_get_length2( filter, frame ); mix = mlt_properties_anim_get_double( properties, "split", pos, len ); } // Convert it from [0, 1] to [-1, 1] mix = mix * 2.0 - 1.0; // Finally, set the mix property on the frame mlt_properties_set_double( instance_props, "mix", mix ); // Initialise filter previous mix value to prevent an inadvertant jump from 0 mlt_position last_position = mlt_properties_get_position( properties, "_last_position" ); mlt_position current_position = mlt_frame_get_position( frame ); mlt_properties_set_position( properties, "_last_position", current_position ); if ( mlt_properties_get( properties, "_previous_mix" ) == NULL || current_position != last_position + 1 ) mlt_properties_set_double( properties, "_previous_mix", mix ); // Tell the frame what the previous mix level was mlt_properties_set_double( instance_props, "previous_mix", mlt_properties_get_double( properties, "_previous_mix" ) ); // Save the current mix level for the next iteration mlt_properties_set_double( properties, "_previous_mix", mix ); } else { double level = mlt_properties_get_double( properties, "start" ); double mix_start = level; double mix_end = mix_start; double mix_increment = 1.0 / length; if ( time - in < length ) { mix_start *= ( double )( time - in ) / length; mix_end = mix_start + mix_increment; } else if ( time > out - length ) { mix_end = mix_start * ( ( double )( out - time - in ) / length ); mix_start = mix_end - mix_increment; } mix_start = mix_start < 0 ? 0 : mix_start > level ? level : mix_start; mix_end = mix_end < 0 ? 0 : mix_end > level ? level : mix_end; mlt_properties_set_double( instance_props, "previous_mix", mix_start ); mlt_properties_set_double( instance_props, "mix", mix_end ); } mlt_properties_set_int( instance_props, "channel", mlt_properties_get_int( properties, "channel" ) ); mlt_properties_set_int( instance_props, "gang", mlt_properties_get_int( properties, "gang" ) ); } mlt_properties_set_data( frame_props, mlt_properties_get( properties, "_unique_id" ), instance_props, 0, (mlt_destructor) mlt_properties_close, NULL ); // Override the get_audio method mlt_frame_push_audio( frame, filter ); mlt_frame_push_audio( frame, instance_props ); mlt_frame_push_audio( frame, filter_get_audio ); return frame; }