static int get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { context cx = mlt_frame_pop_audio( frame ); mlt_frame nested_frame = mlt_frame_pop_audio( frame ); int result = 0; // if not repeating last frame if ( mlt_frame_get_position( nested_frame ) != cx->audio_position ) { double fps = mlt_profile_fps( cx->profile ); if ( mlt_producer_get_fps( cx->self ) < fps ) fps = mlt_producer_get_fps( cx->self ); *samples = mlt_sample_calculator( fps, *frequency, cx->audio_counter++ ); result = mlt_frame_get_audio( nested_frame, buffer, format, frequency, channels, samples ); int size = mlt_audio_format_size( *format, *samples, *channels ); int16_t *new_buffer = mlt_pool_alloc( size ); mlt_frame_set_audio( frame, new_buffer, *format, size, mlt_pool_release ); memcpy( new_buffer, *buffer, size ); *buffer = new_buffer; cx->audio_position = mlt_frame_get_position( nested_frame ); } else { // otherwise return no samples *samples = 0; *buffer = NULL; } return result; }
static void get_time_info( mlt_producer producer, mlt_frame frame, time_info* info ) { mlt_properties producer_properties = MLT_PRODUCER_PROPERTIES( producer ); mlt_position position = mlt_frame_original_position( frame ); info->fps = ceil( mlt_producer_get_fps( producer ) ); char* direction = mlt_properties_get( producer_properties, "direction" ); if( !strcmp( direction, "down" ) ) { mlt_position length = mlt_properties_get_int( producer_properties, "length" ); info->position = length - 1 - position; } else { info->position = position; } char* tc_str = NULL; if( mlt_properties_get_int( producer_properties, "drop" ) ) { tc_str = mlt_properties_frames_to_time( producer_properties, info->position, mlt_time_smpte_df ); } else { tc_str = mlt_properties_frames_to_time( producer_properties, info->position, mlt_time_smpte_ndf ); } sscanf( tc_str, "%02d:%02d:%02d%c%d", &info->hours, &info->minutes, &info->seconds, &info->sep, &info->frames ); }
static int filter_get_image( mlt_frame frame, uint8_t **image, mlt_image_format *image_format, int *width, int *height, int writable ) { int error = 0; mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame ); mlt_filter filter = (mlt_filter)mlt_frame_pop_service( frame ); int samples = 0; int channels = 0; int frequency = 0; mlt_audio_format audio_format = mlt_audio_s16; int16_t* audio = (int16_t*)mlt_properties_get_data( frame_properties, "audio", NULL ); if ( !audio && !preprocess_warned ) { // This filter depends on the consumer processing the audio before the // video. If the audio is not preprocessed, this filter will process it. // If this filter processes the audio, it could cause confusion for the // consumer if it needs different audio properties. mlt_log_warning( MLT_FILTER_SERVICE(filter), "Audio not preprocessed. Potential audio distortion.\n" ); preprocess_warned = true; } *image_format = mlt_image_rgb24a; // Get the current image error = mlt_frame_get_image( frame, image, image_format, width, height, writable ); // Get the audio if( !error ) { frequency = mlt_properties_get_int( frame_properties, "audio_frequency" ); if (!frequency) { frequency = 48000; } channels = mlt_properties_get_int( frame_properties, "audio_channels" ); if (!channels) { channels = 2; } samples = mlt_properties_get_int( frame_properties, "audio_samples" ); if (!samples) { mlt_producer producer = mlt_frame_get_original_producer( frame ); double fps = mlt_producer_get_fps( mlt_producer_cut_parent( producer ) ); samples = mlt_sample_calculator( fps, frequency, mlt_frame_get_position( frame ) ); } error = mlt_frame_get_audio( frame, (void**)&audio, &audio_format, &frequency, &channels, &samples ); } // Draw the waveforms if( !error ) { QImage qimg( *width, *height, QImage::Format_ARGB32 ); convert_mlt_to_qimage_rgba( *image, &qimg, *width, *height ); draw_waveforms( filter, frame, &qimg, audio, channels, samples ); convert_qimage_to_mlt_rgba( &qimg, *image, *width, *height ); } return error; }
static void transport_action( mlt_producer producer, char *value ) { mlt_properties properties = MLT_PRODUCER_PROPERTIES( producer ); mlt_multitrack multitrack = mlt_properties_get_data( properties, "multitrack", NULL ); mlt_consumer consumer = mlt_properties_get_data( properties, "transport_consumer", NULL ); mlt_properties jack = mlt_properties_get_data( MLT_CONSUMER_PROPERTIES( consumer ), "jack_filter", NULL ); mlt_position position = producer? mlt_producer_position( producer ) : 0; mlt_properties_set_int( properties, "stats_off", 1 ); if ( strlen( value ) == 1 ) { switch( value[ 0 ] ) { case 'q': case 'Q': mlt_properties_set_int( properties, "done", 1 ); mlt_events_fire( jack, "jack-stop", NULL ); break; case '0': position = 0; mlt_producer_set_speed( producer, 1 ); mlt_producer_seek( producer, position ); mlt_consumer_purge( consumer ); mlt_events_fire( jack, "jack-seek", &position, NULL ); break; case '1': mlt_producer_set_speed( producer, -10 ); break; case '2': mlt_producer_set_speed( producer, -5 ); break; case '3': mlt_producer_set_speed( producer, -2 ); break; case '4': mlt_producer_set_speed( producer, -1 ); break; case '5': mlt_producer_set_speed( producer, 0 ); mlt_consumer_purge( consumer ); mlt_producer_seek( producer, mlt_consumer_position( consumer ) + 1 ); mlt_events_fire( jack, "jack-stop", NULL ); break; case '6': case ' ': if ( !jack || mlt_producer_get_speed( producer ) != 0 ) mlt_producer_set_speed( producer, 1 ); mlt_consumer_purge( consumer ); mlt_events_fire( jack, "jack-start", NULL ); break; case '7': mlt_producer_set_speed( producer, 2 ); break; case '8': mlt_producer_set_speed( producer, 5 ); break; case '9': mlt_producer_set_speed( producer, 10 ); break; case 'd': if ( multitrack != NULL ) { int i = 0; mlt_position last = -1; fprintf( stderr, "\n" ); for ( i = 0; 1; i ++ ) { position = mlt_multitrack_clip( multitrack, mlt_whence_relative_start, i ); if ( position == last ) break; last = position; fprintf( stderr, "%d: %d\n", i, (int)position ); } } break; case 'g': if ( multitrack != NULL ) { position = mlt_multitrack_clip( multitrack, mlt_whence_relative_current, 0 ); mlt_producer_seek( producer, position ); mlt_consumer_purge( consumer ); mlt_events_fire( jack, "jack-seek", &position, NULL ); } break; case 'H': if ( producer != NULL ) { position -= mlt_producer_get_fps( producer ) * 60; mlt_consumer_purge( consumer ); mlt_producer_seek( producer, position ); mlt_events_fire( jack, "jack-seek", &position, NULL ); } break; case 'h': if ( producer != NULL ) { position--; mlt_producer_set_speed( producer, 0 ); mlt_consumer_purge( consumer ); mlt_producer_seek( producer, position ); mlt_events_fire( jack, "jack-stop", NULL ); mlt_events_fire( jack, "jack-seek", &position, NULL ); } break; case 'j': if ( multitrack != NULL ) { position = mlt_multitrack_clip( multitrack, mlt_whence_relative_current, 1 ); mlt_consumer_purge( consumer ); mlt_producer_seek( producer, position ); mlt_events_fire( jack, "jack-seek", &position, NULL ); } break; case 'k': if ( multitrack != NULL ) { position = mlt_multitrack_clip( multitrack, mlt_whence_relative_current, -1 ); mlt_consumer_purge( consumer ); mlt_producer_seek( producer, position ); mlt_events_fire( jack, "jack-seek", &position, NULL ); } break; case 'l': if ( producer != NULL ) { position++; mlt_consumer_purge( consumer ); if ( mlt_producer_get_speed( producer ) != 0 ) { mlt_producer_set_speed( producer, 0 ); mlt_events_fire( jack, "jack-stop", NULL ); } else { mlt_producer_seek( producer, position ); mlt_events_fire( jack, "jack-seek", &position, NULL ); } } break; case 'L': if ( producer != NULL ) { position += mlt_producer_get_fps( producer ) * 60; mlt_consumer_purge( consumer ); mlt_producer_seek( producer, position ); mlt_events_fire( jack, "jack-seek", &position, NULL ); } break; } mlt_properties_set_int( MLT_CONSUMER_PROPERTIES( consumer ), "refresh", 1 ); } mlt_properties_set_int( properties, "stats_off", 0 ); }
static double get_aspect_ratio( AVStream *stream, AVCodecContext *codec_context, AVPacket *pkt ) { double aspect_ratio = 1.0; if ( codec_context->codec_id == CODEC_ID_DVVIDEO ) { if ( pkt ) { if ( dv_is_pal( pkt ) ) { aspect_ratio = dv_is_wide( pkt ) ? 64.0/45.0 // 16:9 PAL : 16.0/15.0; // 4:3 PAL } else { aspect_ratio = dv_is_wide( pkt ) ? 32.0/27.0 // 16:9 NTSC : 8.0/9.0; // 4:3 NTSC } } else { AVRational ar = #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0) stream->sample_aspect_ratio; #else codec_context->sample_aspect_ratio; #endif // Override FFmpeg's notion of DV aspect ratios, which are // based upon a width of 704. Since we do not have a normaliser // that crops (nor is cropping 720 wide ITU-R 601 video always desirable) // we just coerce the values to facilitate a passive behaviour through // the rescale normaliser when using equivalent producers and consumers. // = display_aspect / (width * height) if ( ar.num == 10 && ar.den == 11 ) aspect_ratio = 8.0/9.0; // 4:3 NTSC else if ( ar.num == 59 && ar.den == 54 ) aspect_ratio = 16.0/15.0; // 4:3 PAL else if ( ar.num == 40 && ar.den == 33 ) aspect_ratio = 32.0/27.0; // 16:9 NTSC else if ( ar.num == 118 && ar.den == 81 ) aspect_ratio = 64.0/45.0; // 16:9 PAL } } else { AVRational codec_sar = codec_context->sample_aspect_ratio; AVRational stream_sar = #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0) stream->sample_aspect_ratio; #else { 0, 1 }; #endif if ( codec_sar.num > 0 ) aspect_ratio = av_q2d( codec_sar ); else if ( stream_sar.num > 0 ) aspect_ratio = av_q2d( stream_sar ); } return aspect_ratio; } /** Open the file. */ static int producer_open( mlt_producer this, mlt_profile profile, char *file ) { // Return an error code (0 == no error) int error = 0; // Context for avformat AVFormatContext *context = NULL; // Get the properties mlt_properties properties = MLT_PRODUCER_PROPERTIES( this ); // We will treat everything with the producer fps double fps = mlt_profile_fps( profile ); // Lock the mutex now avformat_lock( ); // If "MRL", then create AVInputFormat AVInputFormat *format = NULL; AVFormatParameters *params = NULL; char *standard = NULL; char *mrl = strchr( file, ':' ); // AV option (0 = both, 1 = video, 2 = audio) int av = 0; // Only if there is not a protocol specification that avformat can handle if ( mrl && !url_exist( file ) ) { // 'file' becomes format abbreviation mrl[0] = 0; // Lookup the format format = av_find_input_format( file ); // Eat the format designator file = ++mrl; if ( format ) { // Allocate params params = calloc( sizeof( AVFormatParameters ), 1 ); // These are required by video4linux (defaults) params->width = 640; params->height = 480; params->time_base= (AVRational){1,25}; // params->device = file; params->channels = 2; params->sample_rate = 48000; } // XXX: this does not work anymore since avdevice // TODO: make producer_avddevice? // Parse out params mrl = strchr( file, '?' ); while ( mrl ) { mrl[0] = 0; char *name = strdup( ++mrl ); char *value = strchr( name, ':' ); if ( value ) { value[0] = 0; value++; char *t = strchr( value, '&' ); if ( t ) t[0] = 0; if ( !strcmp( name, "frame_rate" ) ) params->time_base.den = atoi( value ); else if ( !strcmp( name, "frame_rate_base" ) ) params->time_base.num = atoi( value ); else if ( !strcmp( name, "sample_rate" ) ) params->sample_rate = atoi( value ); else if ( !strcmp( name, "channels" ) ) params->channels = atoi( value ); else if ( !strcmp( name, "width" ) ) params->width = atoi( value ); else if ( !strcmp( name, "height" ) ) params->height = atoi( value ); else if ( !strcmp( name, "standard" ) ) { standard = strdup( value ); params->standard = standard; } else if ( !strcmp( name, "av" ) ) av = atoi( value ); } free( name ); mrl = strchr( mrl, '&' ); } } // Now attempt to open the file error = av_open_input_file( &context, file, format, 0, params ) < 0; // Cleanup AVFormatParameters free( standard ); free( params ); // If successful, then try to get additional info if ( error == 0 ) { // Get the stream info error = av_find_stream_info( context ) < 0; // Continue if no error if ( error == 0 ) { // We will default to the first audio and video streams found int audio_index = -1; int video_index = -1; int av_bypass = 0; // Now set properties where we can (use default unknowns if required) if ( context->duration != AV_NOPTS_VALUE ) { // This isn't going to be accurate for all formats mlt_position frames = ( mlt_position )( ( ( double )context->duration / ( double )AV_TIME_BASE ) * fps + 0.5 ); mlt_properties_set_position( properties, "out", frames - 1 ); mlt_properties_set_position( properties, "length", frames ); } // Find default audio and video streams find_default_streams( properties, context, &audio_index, &video_index ); if ( context->start_time != AV_NOPTS_VALUE ) mlt_properties_set_double( properties, "_start_time", context->start_time ); // Check if we're seekable (something funny about mpeg here :-/) if ( strcmp( file, "pipe:" ) && strncmp( file, "http://", 6 ) && strncmp( file, "udp:", 4 ) && strncmp( file, "tcp:", 4 ) && strncmp( file, "rtsp:", 5 ) && strncmp( file, "rtp:", 4 ) ) { mlt_properties_set_int( properties, "seekable", av_seek_frame( context, -1, mlt_properties_get_double( properties, "_start_time" ), AVSEEK_FLAG_BACKWARD ) >= 0 ); mlt_properties_set_data( properties, "dummy_context", context, 0, producer_file_close, NULL ); av_open_input_file( &context, file, NULL, 0, NULL ); av_find_stream_info( context ); } else av_bypass = 1; // Store selected audio and video indexes on properties mlt_properties_set_int( properties, "_audio_index", audio_index ); mlt_properties_set_int( properties, "_video_index", video_index ); mlt_properties_set_int( properties, "_last_position", -1 ); // Fetch the width, height and aspect ratio if ( video_index != -1 ) { AVCodecContext *codec_context = context->streams[ video_index ]->codec; mlt_properties_set_int( properties, "width", codec_context->width ); mlt_properties_set_int( properties, "height", codec_context->height ); if ( codec_context->codec_id == CODEC_ID_DVVIDEO ) { // Fetch the first frame of DV so we can read it directly AVPacket pkt; int ret = 0; while ( ret >= 0 ) { ret = av_read_frame( context, &pkt ); if ( ret >= 0 && pkt.stream_index == video_index && pkt.size > 0 ) { mlt_properties_set_double( properties, "aspect_ratio", get_aspect_ratio( context->streams[ video_index ], codec_context, &pkt ) ); break; } } } else { mlt_properties_set_double( properties, "aspect_ratio", get_aspect_ratio( context->streams[ video_index ], codec_context, NULL ) ); } } // Read Metadata if (context->title != NULL) mlt_properties_set(properties, "meta.attr.title.markup", context->title ); if (context->author != NULL) mlt_properties_set(properties, "meta.attr.author.markup", context->author ); if (context->copyright != NULL) mlt_properties_set(properties, "meta.attr.copyright.markup", context->copyright ); if (context->comment != NULL) mlt_properties_set(properties, "meta.attr.comment.markup", context->comment ); if (context->album != NULL) mlt_properties_set(properties, "meta.attr.album.markup", context->album ); if (context->year != 0) mlt_properties_set_int(properties, "meta.attr.year.markup", context->year ); if (context->track != 0) mlt_properties_set_int(properties, "meta.attr.track.markup", context->track ); // We're going to cheat here - for a/v files, we will have two contexts (reasoning will be clear later) if ( av == 0 && audio_index != -1 && video_index != -1 ) { // We'll use the open one as our video_context mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL ); // And open again for our audio context av_open_input_file( &context, file, NULL, 0, NULL ); av_find_stream_info( context ); // Audio context mlt_properties_set_data( properties, "audio_context", context, 0, producer_file_close, NULL ); } else if ( av != 2 && video_index != -1 ) { // We only have a video context mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL ); } else if ( audio_index != -1 ) { // We only have an audio context mlt_properties_set_data( properties, "audio_context", context, 0, producer_file_close, NULL ); } else { // Something has gone wrong error = -1; } mlt_properties_set_int( properties, "av_bypass", av_bypass ); } } // Unlock the mutex now avformat_unlock( ); return error; } /** Convert a frame position to a time code. */ static double producer_time_of_frame( mlt_producer this, mlt_position position ) { return ( double )position / mlt_producer_get_fps( this ); } static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt, mlt_image_format format, int width, int height ) { #ifdef SWSCALE if ( format == mlt_image_yuv420p ) { struct SwsContext *context = sws_getContext( width, height, pix_fmt, width, height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL); AVPicture output; output.data[0] = buffer; output.data[1] = buffer + width * height; output.data[2] = buffer + ( 3 * width * height ) / 2; output.linesize[0] = width; output.linesize[1] = width >> 1; output.linesize[2] = width >> 1; sws_scale( context, frame->data, frame->linesize, 0, height, output.data, output.linesize); sws_freeContext( context ); } else if ( format == mlt_image_rgb24 ) { struct SwsContext *context = sws_getContext( width, height, pix_fmt, width, height, PIX_FMT_RGB24, SWS_FAST_BILINEAR, NULL, NULL, NULL); AVPicture output; avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height ); sws_scale( context, frame->data, frame->linesize, 0, height, output.data, output.linesize); sws_freeContext( context ); } else { struct SwsContext *context = sws_getContext( width, height, pix_fmt, width, height, PIX_FMT_YUYV422, SWS_FAST_BILINEAR, NULL, NULL, NULL); AVPicture output; avpicture_fill( &output, buffer, PIX_FMT_YUYV422, width, height ); sws_scale( context, frame->data, frame->linesize, 0, height, output.data, output.linesize); sws_freeContext( context ); } #else if ( format == mlt_image_yuv420p ) { AVPicture pict; pict.data[0] = buffer; pict.data[1] = buffer + width * height; pict.data[2] = buffer + ( 3 * width * height ) / 2; pict.linesize[0] = width; pict.linesize[1] = width >> 1; pict.linesize[2] = width >> 1; img_convert( &pict, PIX_FMT_YUV420P, (AVPicture *)frame, pix_fmt, width, height ); }
static void transport_action( mlt_producer producer, char *value ) { mlt_properties properties = MLT_PRODUCER_PROPERTIES( producer ); mlt_multitrack multitrack = mlt_properties_get_data( properties, "multitrack", NULL ); mlt_consumer consumer = mlt_properties_get_data( properties, "transport_consumer", NULL ); mlt_properties_set_int( properties, "stats_off", 1 ); if ( strlen( value ) == 1 ) { switch( value[ 0 ] ) { case 'q': mlt_properties_set_int( properties, "done", 1 ); break; case '0': mlt_producer_set_speed( producer, 1 ); mlt_producer_seek( producer, 0 ); break; case '1': mlt_producer_set_speed( producer, -10 ); break; case '2': mlt_producer_set_speed( producer, -5 ); break; case '3': mlt_producer_set_speed( producer, -2 ); break; case '4': mlt_producer_set_speed( producer, -1 ); break; case '5': mlt_producer_set_speed( producer, 0 ); break; case '6': case ' ': mlt_producer_set_speed( producer, 1 ); break; case '7': mlt_producer_set_speed( producer, 2 ); break; case '8': mlt_producer_set_speed( producer, 5 ); break; case '9': mlt_producer_set_speed( producer, 10 ); break; case 'd': if ( multitrack != NULL ) { int i = 0; mlt_position last = -1; fprintf( stderr, "\n" ); for ( i = 0; 1; i ++ ) { mlt_position time = mlt_multitrack_clip( multitrack, mlt_whence_relative_start, i ); if ( time == last ) break; last = time; fprintf( stderr, "%d: %d\n", i, (int)time ); } } break; case 'g': if ( multitrack != NULL ) { mlt_position time = mlt_multitrack_clip( multitrack, mlt_whence_relative_current, 0 ); mlt_producer_seek( producer, time ); } break; case 'H': if ( producer != NULL ) { mlt_position position = mlt_producer_position( producer ); mlt_producer_seek( producer, position - ( mlt_producer_get_fps( producer ) * 60 ) ); } break; case 'h': if ( producer != NULL ) { mlt_position position = mlt_producer_position( producer ); mlt_producer_set_speed( producer, 0 ); mlt_producer_seek( producer, position - 1 ); } break; case 'j': if ( multitrack != NULL ) { mlt_position time = mlt_multitrack_clip( multitrack, mlt_whence_relative_current, 1 ); mlt_producer_seek( producer, time ); } break; case 'k': if ( multitrack != NULL ) { mlt_position time = mlt_multitrack_clip( multitrack, mlt_whence_relative_current, -1 ); mlt_producer_seek( producer, time ); } break; case 'l': if ( producer != NULL ) { mlt_position position = mlt_producer_position( producer ); if ( mlt_producer_get_speed( producer ) != 0 ) mlt_producer_set_speed( producer, 0 ); else mlt_producer_seek( producer, position + 1 ); } break; case 'L': if ( producer != NULL ) { mlt_position position = mlt_producer_position( producer ); mlt_producer_seek( producer, position + ( mlt_producer_get_fps( producer ) * 60 ) ); } break; } mlt_properties_set_int( MLT_CONSUMER_PROPERTIES( consumer ), "refresh", 1 ); } mlt_properties_set_int( properties, "stats_off", 0 ); }
unsigned char *mlt_frame_get_waveform( mlt_frame self, int w, int h ) { int16_t *pcm = NULL; mlt_properties properties = MLT_FRAME_PROPERTIES( self ); mlt_audio_format format = mlt_audio_s16; int frequency = 16000; int channels = 2; mlt_producer producer = mlt_frame_get_original_producer( self ); double fps = mlt_producer_get_fps( mlt_producer_cut_parent( producer ) ); int samples = mlt_sample_calculator( fps, frequency, mlt_frame_get_position( self ) ); // Increase audio resolution proportional to requested image size while ( samples < w ) { frequency += 16000; samples = mlt_sample_calculator( fps, frequency, mlt_frame_get_position( self ) ); } // Get the pcm data mlt_frame_get_audio( self, (void**)&pcm, &format, &frequency, &channels, &samples ); // Make an 8-bit buffer large enough to hold rendering int size = w * h; if ( size <= 0 ) return NULL; unsigned char *bitmap = ( unsigned char* )mlt_pool_alloc( size ); if ( bitmap != NULL ) memset( bitmap, 0, size ); else return NULL; mlt_properties_set_data( properties, "waveform", bitmap, size, ( mlt_destructor )mlt_pool_release, NULL ); // Render vertical lines int16_t *ubound = pcm + samples * channels; int skip = samples / w; skip = !skip ? 1 : skip; unsigned char gray = 0xFF / skip; int i, j, k; // Iterate sample stream and along x coordinate for ( i = 0; pcm < ubound; i++ ) { // pcm data has channels interleaved for ( j = 0; j < channels; j++, pcm++ ) { // Determine sample's magnitude from 2s complement; int pcm_magnitude = *pcm < 0 ? ~(*pcm) + 1 : *pcm; // The height of a line is the ratio of the magnitude multiplied by // the vertical resolution of a single channel int height = h * pcm_magnitude / channels / 2 / 32768; // Determine the starting y coordinate - left top, right bottom int displacement = h * (j * 2 + 1) / channels / 2 - ( *pcm < 0 ? 0 : height ); // Position buffer pointer using y coordinate, stride, and x coordinate unsigned char *p = bitmap + i / skip + displacement * w; // Draw vertical line for ( k = 0; k < height + 1; k++ ) if ( *pcm < 0 ) p[ w * k ] = ( k == 0 ) ? 0xFF : p[ w * k ] + gray; else p[ w * k ] = ( k == height ) ? 0xFF : p[ w * k ] + gray; } } return bitmap; }
mlt_frame getFrame() { struct timeval now; struct timespec tm; double fps = mlt_producer_get_fps( getProducer() ); mlt_position position = mlt_producer_position( getProducer() ); mlt_frame frame = mlt_cache_get_frame( m_cache, position ); // Allow the buffer to fill to the requested initial buffer level. if ( m_isBuffering ) { int prefill = mlt_properties_get_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "prefill" ); int buffer = mlt_properties_get_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "buffer" ); m_isBuffering = false; prefill = prefill > buffer ? buffer : prefill; pthread_mutex_lock( &m_mutex ); while ( mlt_deque_count( m_queue ) < prefill ) { // Wait up to buffer/fps seconds gettimeofday( &now, NULL ); long usec = now.tv_sec * 1000000 + now.tv_usec; usec += 1000000 * buffer / fps; tm.tv_sec = usec / 1000000; tm.tv_nsec = (usec % 1000000) * 1000; if ( pthread_cond_timedwait( &m_condition, &m_mutex, &tm ) ) break; } pthread_mutex_unlock( &m_mutex ); } if ( !frame ) { // Wait if queue is empty pthread_mutex_lock( &m_mutex ); while ( mlt_deque_count( m_queue ) < 1 ) { // Wait up to twice frame duration gettimeofday( &now, NULL ); long usec = now.tv_sec * 1000000 + now.tv_usec; usec += 2000000 / fps; tm.tv_sec = usec / 1000000; tm.tv_nsec = (usec % 1000000) * 1000; if ( pthread_cond_timedwait( &m_condition, &m_mutex, &tm ) ) // Stop waiting if error (timed out) break; } frame = ( mlt_frame ) mlt_deque_pop_front( m_queue ); pthread_mutex_unlock( &m_mutex ); // add to cache if ( frame ) { mlt_frame_set_position( frame, position ); mlt_cache_put_frame( m_cache, frame ); } } // Set frame timestamp and properties if ( frame ) { mlt_profile profile = mlt_service_profile( MLT_PRODUCER_SERVICE( getProducer() ) ); mlt_properties properties = MLT_FRAME_PROPERTIES( frame ); mlt_properties_set_int( properties, "progressive", profile->progressive ); mlt_properties_set_int( properties, "meta.media.progressive", profile->progressive ); mlt_properties_set_int( properties, "top_field_first", m_topFieldFirst ); mlt_properties_set_double( properties, "aspect_ratio", mlt_profile_sar( profile ) ); mlt_properties_set_int( properties, "meta.media.sample_aspect_num", profile->sample_aspect_num ); mlt_properties_set_int( properties, "meta.media.sample_aspect_den", profile->sample_aspect_den ); mlt_properties_set_int( properties, "meta.media.frame_rate_num", profile->frame_rate_num ); mlt_properties_set_int( properties, "meta.media.frame_rate_den", profile->frame_rate_den ); mlt_properties_set_int( properties, "width", profile->width ); mlt_properties_set_int( properties, "meta.media.width", profile->width ); mlt_properties_set_int( properties, "height", profile->height ); mlt_properties_set_int( properties, "meta.media.height", profile->height ); mlt_properties_set_int( properties, "format", mlt_image_yuv422 ); mlt_properties_set_int( properties, "colorspace", m_colorspace ); mlt_properties_set_int( properties, "meta.media.colorspace", m_colorspace ); mlt_properties_set_int( properties, "audio_frequency", 48000 ); mlt_properties_set_int( properties, "audio_channels", mlt_properties_get_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "channels" ) ); } else mlt_log_warning( getProducer(), "buffer underrun\n" ); return frame; }
static int producer_get_audio( mlt_frame frame, int16_t** buffer, mlt_audio_format* format, int* frequency, int* channels, int* samples ) { mlt_producer producer = (mlt_producer)mlt_frame_pop_audio( frame ); mlt_properties producer_properties = MLT_PRODUCER_PROPERTIES( producer ); char* sound = mlt_properties_get( producer_properties, "sound" ); double fps = mlt_producer_get_fps( producer ); mlt_position position = mlt_frame_original_position( frame ); int size = 0; int do_beep = 0; time_info info; if( fps == 0 ) fps = 25; // Correct the returns if necessary *format = mlt_audio_float; *frequency = *frequency <= 0 ? 48000 : *frequency; *channels = *channels <= 0 ? 2 : *channels; *samples = *samples <= 0 ? mlt_sample_calculator( fps, *frequency, position ) : *samples; // Allocate the buffer size = *samples * *channels * sizeof( float ); *buffer = mlt_pool_alloc( size ); mlt_service_lock( MLT_PRODUCER_SERVICE( producer ) ); get_time_info( producer, frame, &info ); // Determine if this should be a tone or silence. if( strcmp( sound, "none") ) { if( !strcmp( sound, "2pop" ) ) { mlt_position out = mlt_properties_get_int( producer_properties, "out" ); mlt_position frames = out - position; if( frames == ( info.fps * 2 ) ) { do_beep = 1; } } else if( !strcmp( sound, "frame0" ) ) { if( info.frames == 0 ) { do_beep = 1; } } } if( do_beep ) { fill_beep( producer_properties, (float*)*buffer, *frequency, *channels, *samples ); } else { // Fill silence. memset( *buffer, 0, size ); } mlt_service_unlock( MLT_PRODUCER_SERVICE( producer ) ); // Set the buffer for destruction mlt_frame_set_audio( frame, *buffer, *format, size, mlt_pool_release ); return 0; }
static int get_frame( mlt_producer self, mlt_frame_ptr frame, int index ) { mlt_properties properties = MLT_PRODUCER_PROPERTIES(self); context cx = mlt_properties_get_data( properties, "context", NULL ); if ( !cx ) { // Allocate and initialize our context cx = mlt_pool_alloc( sizeof( struct context_s ) ); memset( cx, 0, sizeof( *cx ) ); mlt_properties_set_data( properties, "context", cx, 0, mlt_pool_release, NULL ); cx->self = self; char *profile_name = mlt_properties_get( properties, "profile" ); if ( !profile_name ) profile_name = mlt_properties_get( properties, "mlt_profile" ); mlt_profile profile = mlt_service_profile( MLT_PRODUCER_SERVICE( self ) ); if ( profile_name ) { cx->profile = mlt_profile_init( profile_name ); cx->profile->is_explicit = 1; } else { cx->profile = mlt_profile_clone( profile ); cx->profile->is_explicit = 0; } // Encapsulate a real producer for the resource cx->producer = mlt_factory_producer( cx->profile, NULL, mlt_properties_get( properties, "resource" ) ); if ( ( profile_name && !strcmp( profile_name, "auto" ) ) || mlt_properties_get_int( properties, "autoprofile" ) ) { mlt_profile_from_producer( cx->profile, cx->producer ); mlt_producer_close( cx->producer ); cx->producer = mlt_factory_producer( cx->profile, NULL, mlt_properties_get( properties, "resource" ) ); } // Since we control the seeking, prevent it from seeking on its own mlt_producer_set_speed( cx->producer, 0 ); cx->audio_position = -1; // We will encapsulate a consumer cx->consumer = mlt_consumer_new( cx->profile ); // Do not use _pass_list on real_time so that it defaults to 0 in the absence of // an explicit real_time property. mlt_properties_set_int( MLT_CONSUMER_PROPERTIES( cx->consumer ), "real_time", mlt_properties_get_int( properties, "real_time" ) ); mlt_properties_pass_list( MLT_CONSUMER_PROPERTIES( cx->consumer ), properties, "buffer, prefill, deinterlace_method, rescale" ); // Connect it all together mlt_consumer_connect( cx->consumer, MLT_PRODUCER_SERVICE( cx->producer ) ); mlt_consumer_start( cx->consumer ); } // Generate a frame *frame = mlt_frame_init( MLT_PRODUCER_SERVICE( self ) ); if ( *frame ) { // Seek the producer to the correct place // Calculate our positions double actual_position = (double) mlt_producer_frame( self ); if ( mlt_producer_get_speed( self ) != 0 ) actual_position *= mlt_producer_get_speed( self ); mlt_position need_first = floor( actual_position ); mlt_producer_seek( cx->producer, lrint( need_first * mlt_profile_fps( cx->profile ) / mlt_producer_get_fps( self ) ) ); // Get the nested frame mlt_frame nested_frame = mlt_consumer_rt_frame( cx->consumer ); // Stack the producer and our methods on the nested frame mlt_frame_push_service( *frame, nested_frame ); mlt_frame_push_service( *frame, cx ); mlt_frame_push_get_image( *frame, get_image ); mlt_frame_push_audio( *frame, nested_frame ); mlt_frame_push_audio( *frame, cx ); mlt_frame_push_audio( *frame, get_audio ); // Give the returned frame temporal identity mlt_frame_set_position( *frame, mlt_producer_position( self ) ); // Store the nested frame on the produced frame for destruction mlt_properties frame_props = MLT_FRAME_PROPERTIES( *frame ); mlt_properties_set_data( frame_props, "_producer_consumer.frame", nested_frame, 0, (mlt_destructor) mlt_frame_close, NULL ); // Inform the normalizers about our video properties mlt_properties_set_double( frame_props, "aspect_ratio", mlt_profile_sar( cx->profile ) ); mlt_properties_set_int( frame_props, "width", cx->profile->width ); mlt_properties_set_int( frame_props, "height", cx->profile->height ); mlt_properties_set_int( frame_props, "meta.media.width", cx->profile->width ); mlt_properties_set_int( frame_props, "meta.media.height", cx->profile->height ); mlt_properties_set_int( frame_props, "progressive", cx->profile->progressive ); } // Calculate the next timecode mlt_producer_prepare_next( self ); return 0; }