static int producer_get_frame( mlt_producer producer, mlt_frame_ptr frame, int index ) { // Generate a frame *frame = mlt_frame_init( MLT_PRODUCER_SERVICE( producer ) ); mlt_profile profile = mlt_service_profile( MLT_PRODUCER_SERVICE( producer ) ); if ( *frame != NULL ) { // Obtain properties of frame mlt_properties frame_properties = MLT_FRAME_PROPERTIES( *frame ); // Update time code on the frame mlt_frame_set_position( *frame, mlt_producer_frame( producer ) ); mlt_properties_set_int( frame_properties, "progressive", 1 ); mlt_properties_set_double( frame_properties, "aspect_ratio", mlt_profile_sar( profile ) ); mlt_properties_set_int( frame_properties, "meta.media.width", profile->width ); mlt_properties_set_int( frame_properties, "meta.media.height", profile->height ); // Configure callbacks mlt_frame_push_service( *frame, producer ); mlt_frame_push_get_image( *frame, producer_get_image ); mlt_frame_push_audio( *frame, producer ); mlt_frame_push_audio( *frame, producer_get_audio ); } // Calculate the next time code mlt_producer_prepare_next( producer ); return 0; }
static int producer_get_frame( mlt_producer parent, mlt_frame_ptr frame, int index ) { // Get the mutiltrack object mlt_multitrack self = parent->child; // Check if we have a track for this index if ( index >= 0 && index < self->count && self->list[ index ] != NULL ) { // Get the producer for this track mlt_producer producer = self->list[ index ]->producer; // Get the track hide property int hide = mlt_properties_get_int( MLT_PRODUCER_PROPERTIES( mlt_producer_cut_parent( producer ) ), "hide" ); // Obtain the current position mlt_position position = mlt_producer_frame( parent ); // Get the parent properties mlt_properties producer_properties = MLT_PRODUCER_PROPERTIES( parent ); // Get the speed double speed = mlt_properties_get_double( producer_properties, "_speed" ); // Make sure we're at the same point mlt_producer_seek( producer, position ); // Get the frame from the producer mlt_service_get_frame( MLT_PRODUCER_SERVICE( producer ), frame, 0 ); // Indicate speed of this producer mlt_properties properties = MLT_FRAME_PROPERTIES( *frame ); mlt_properties_set_double( properties, "_speed", speed ); mlt_frame_set_position( *frame, position ); mlt_properties_set_int( properties, "hide", hide ); } else { // Generate a test frame *frame = mlt_frame_init( MLT_PRODUCER_SERVICE( parent ) ); // Update position on the frame we're creating mlt_frame_set_position( *frame, mlt_producer_position( parent ) ); // Move on to the next frame if ( index >= self->count ) { // Let tractor know if we've reached the end mlt_properties_set_int( MLT_FRAME_PROPERTIES( *frame ), "last_track", 1 ); // Move to the next frame mlt_producer_prepare_next( parent ); } } return 0; }
static int producer_get_frame( mlt_producer producer, mlt_frame_ptr frame, int index ) { producer_kino this = producer->child; uint8_t *data = mlt_pool_alloc( FRAME_SIZE_625_50 ); // Obtain the current frame number uint64_t position = mlt_producer_frame( producer ); // Create an empty frame *frame = mlt_frame_init( MLT_PRODUCER_SERVICE( producer ) ); // Seek and fetch if ( kino_wrapper_get_frame( this->wrapper, data, position ) ) { // Get the frames properties mlt_properties properties = MLT_FRAME_PROPERTIES( *frame ); // Determine if we're PAL or NTSC int is_pal = kino_wrapper_is_pal( this->wrapper ); // Pass the dv data mlt_properties_set_data( properties, "dv_data", data, FRAME_SIZE_625_50, ( mlt_destructor )mlt_pool_release, NULL ); // Update other info on the frame mlt_properties_set_int( properties, "width", 720 ); mlt_properties_set_int( properties, "height", is_pal ? 576 : 480 ); mlt_properties_set_int( properties, "top_field_first", is_pal ? 0 : ( data[ 5 ] & 0x07 ) == 0 ? 0 : 1 ); } else { mlt_pool_release( data ); } // Update timecode on the frame we're creating mlt_frame_set_position( *frame, mlt_producer_position( producer ) ); // Calculate the next timecode mlt_producer_prepare_next( producer ); return 0; }
static int producer_get_frame( mlt_producer parent, mlt_frame_ptr frame, int track ) { mlt_tractor self = parent->child; // We only respond to the first track requests if ( track == 0 && self->producer != NULL ) { int i = 0; int done = 0; mlt_frame temp = NULL; int count = 0; int image_count = 0; // Get the properties of the parent producer mlt_properties properties = MLT_PRODUCER_PROPERTIES( parent ); // Try to obtain the multitrack associated to the tractor mlt_multitrack multitrack = mlt_properties_get_data( properties, "multitrack", NULL ); // Or a specific producer mlt_producer producer = mlt_properties_get_data( properties, "producer", NULL ); // Determine whether this tractor feeds to the consumer or stops here int global_feed = mlt_properties_get_int( properties, "global_feed" ); // If we don't have one, we're in trouble... if ( multitrack != NULL ) { // The output frame will hold the 'global' data feeds (ie: those which are targetted for the final frame) mlt_deque data_queue = mlt_deque_init( ); // Used to garbage collect all frames char label[ 30 ]; // Get the id of the tractor char *id = mlt_properties_get( properties, "_unique_id" ); // Will be used to store the frame properties object mlt_properties frame_properties = NULL; // We'll store audio and video frames to use here mlt_frame audio = NULL; mlt_frame video = NULL; mlt_frame first_video = NULL; // Temporary properties mlt_properties temp_properties = NULL; // Get the multitrack's producer mlt_producer target = MLT_MULTITRACK_PRODUCER( multitrack ); mlt_producer_seek( target, mlt_producer_frame( parent ) ); mlt_producer_set_speed( target, mlt_producer_get_speed( parent ) ); // We will create one frame and attach everything to it *frame = mlt_frame_init( MLT_PRODUCER_SERVICE( parent ) ); // Get the properties of the frame frame_properties = MLT_FRAME_PROPERTIES( *frame ); // Loop through each of the tracks we're harvesting for ( i = 0; !done; i ++ ) { // Get a frame from the producer mlt_service_get_frame( self->producer, &temp, i ); // Get the temporary properties temp_properties = MLT_FRAME_PROPERTIES( temp ); // Pass all unique meta properties from the producer's frame to the new frame mlt_properties_lock( temp_properties ); int props_count = mlt_properties_count( temp_properties ); int j; for ( j = 0; j < props_count; j ++ ) { char *name = mlt_properties_get_name( temp_properties, j ); if ( !strncmp( name, "meta.", 5 ) && !mlt_properties_get( frame_properties, name ) ) mlt_properties_set( frame_properties, name, mlt_properties_get_value( temp_properties, j ) ); } mlt_properties_unlock( temp_properties ); // Copy the format conversion virtual functions if ( ! (*frame)->convert_image && temp->convert_image ) (*frame)->convert_image = temp->convert_image; if ( ! (*frame)->convert_audio && temp->convert_audio ) (*frame)->convert_audio = temp->convert_audio; // Check for last track done = mlt_properties_get_int( temp_properties, "last_track" ); // Handle fx only tracks if ( mlt_properties_get_int( temp_properties, "fx_cut" ) ) { int hide = ( video == NULL ? 1 : 0 ) | ( audio == NULL ? 2 : 0 ); mlt_properties_set_int( temp_properties, "hide", hide ); } // We store all frames with a destructor on the output frame sprintf( label, "_%s_%d", id, count ++ ); mlt_properties_set_data( frame_properties, label, temp, 0, ( mlt_destructor )mlt_frame_close, NULL ); // We want to append all 'final' feeds to the global queue if ( !done && mlt_properties_get_data( temp_properties, "data_queue", NULL ) != NULL ) { // Move the contents of this queue on to the output frames data queue mlt_deque sub_queue = mlt_properties_get_data( MLT_FRAME_PROPERTIES( temp ), "data_queue", NULL ); mlt_deque temp = mlt_deque_init( ); while ( global_feed && mlt_deque_count( sub_queue ) ) { mlt_properties p = mlt_deque_pop_back( sub_queue ); if ( mlt_properties_get_int( p, "final" ) ) mlt_deque_push_back( data_queue, p ); else mlt_deque_push_back( temp, p ); } while( mlt_deque_count( temp ) ) mlt_deque_push_front( sub_queue, mlt_deque_pop_back( temp ) ); mlt_deque_close( temp ); } // Now do the same with the global queue but without the conditional behaviour if ( mlt_properties_get_data( temp_properties, "global_queue", NULL ) != NULL ) { mlt_deque sub_queue = mlt_properties_get_data( MLT_FRAME_PROPERTIES( temp ), "global_queue", NULL ); while ( mlt_deque_count( sub_queue ) ) { mlt_properties p = mlt_deque_pop_back( sub_queue ); mlt_deque_push_back( data_queue, p ); } } // Pick up first video and audio frames if ( !done && !mlt_frame_is_test_audio( temp ) && !( mlt_properties_get_int( temp_properties, "hide" ) & 2 ) ) { // Order of frame creation is starting to get problematic if ( audio != NULL ) { mlt_deque_push_front( MLT_FRAME_AUDIO_STACK( temp ), producer_get_audio ); mlt_deque_push_front( MLT_FRAME_AUDIO_STACK( temp ), audio ); } audio = temp; } if ( !done && !mlt_frame_is_test_card( temp ) && !( mlt_properties_get_int( temp_properties, "hide" ) & 1 ) ) { if ( video != NULL ) { mlt_deque_push_front( MLT_FRAME_IMAGE_STACK( temp ), producer_get_image ); mlt_deque_push_front( MLT_FRAME_IMAGE_STACK( temp ), video ); } video = temp; if ( first_video == NULL ) first_video = temp; mlt_properties_set_int( MLT_FRAME_PROPERTIES( temp ), "image_count", ++ image_count ); image_count = 1; } } // Now stack callbacks if ( audio != NULL ) { mlt_frame_push_audio( *frame, audio ); mlt_frame_push_audio( *frame, producer_get_audio ); } if ( video != NULL ) { mlt_properties video_properties = MLT_FRAME_PROPERTIES( first_video ); mlt_frame_push_service( *frame, video ); mlt_frame_push_service( *frame, producer_get_image ); if ( global_feed ) mlt_properties_set_data( frame_properties, "data_queue", data_queue, 0, NULL, NULL ); mlt_properties_set_data( video_properties, "global_queue", data_queue, 0, destroy_data_queue, NULL ); mlt_properties_set_int( frame_properties, "width", mlt_properties_get_int( video_properties, "width" ) ); mlt_properties_set_int( frame_properties, "height", mlt_properties_get_int( video_properties, "height" ) ); mlt_properties_pass_list( frame_properties, video_properties, "meta.media.width, meta.media.height" ); mlt_properties_set_int( frame_properties, "progressive", mlt_properties_get_int( video_properties, "progressive" ) ); mlt_properties_set_double( frame_properties, "aspect_ratio", mlt_properties_get_double( video_properties, "aspect_ratio" ) ); mlt_properties_set_int( frame_properties, "image_count", image_count ); mlt_properties_set_data( frame_properties, "_producer", mlt_frame_get_original_producer( first_video ), 0, NULL, NULL ); } else { destroy_data_queue( data_queue ); } mlt_frame_set_position( *frame, mlt_producer_frame( parent ) ); mlt_properties_set_int( MLT_FRAME_PROPERTIES( *frame ), "test_audio", audio == NULL ); mlt_properties_set_int( MLT_FRAME_PROPERTIES( *frame ), "test_image", video == NULL ); } else if ( producer != NULL ) { mlt_producer_seek( producer, mlt_producer_frame( parent ) ); mlt_producer_set_speed( producer, mlt_producer_get_speed( parent ) ); mlt_service_get_frame( self->producer, frame, track ); } else { mlt_log( MLT_PRODUCER_SERVICE( parent ), MLT_LOG_ERROR, "tractor without a multitrack!!\n" ); mlt_service_get_frame( self->producer, frame, track ); } // Prepare the next frame mlt_producer_prepare_next( parent ); // Indicate our found status return 0; } else { // Generate a test card *frame = mlt_frame_init( MLT_PRODUCER_SERVICE( parent ) ); return 0; } }
static int get_frame( mlt_producer self, mlt_frame_ptr frame, int index ) { mlt_properties properties = MLT_PRODUCER_PROPERTIES(self); context cx = mlt_properties_get_data( properties, "context", NULL ); if ( !cx ) { // Allocate and initialize our context cx = mlt_pool_alloc( sizeof( struct context_s ) ); memset( cx, 0, sizeof( *cx ) ); mlt_properties_set_data( properties, "context", cx, 0, mlt_pool_release, NULL ); cx->self = self; char *profile_name = mlt_properties_get( properties, "profile" ); if ( !profile_name ) profile_name = mlt_properties_get( properties, "mlt_profile" ); mlt_profile profile = mlt_service_profile( MLT_PRODUCER_SERVICE( self ) ); if ( profile_name ) { cx->profile = mlt_profile_init( profile_name ); cx->profile->is_explicit = 1; } else { cx->profile = mlt_profile_clone( profile ); cx->profile->is_explicit = 0; } // Encapsulate a real producer for the resource cx->producer = mlt_factory_producer( cx->profile, NULL, mlt_properties_get( properties, "resource" ) ); if ( ( profile_name && !strcmp( profile_name, "auto" ) ) || mlt_properties_get_int( properties, "autoprofile" ) ) { mlt_profile_from_producer( cx->profile, cx->producer ); mlt_producer_close( cx->producer ); cx->producer = mlt_factory_producer( cx->profile, NULL, mlt_properties_get( properties, "resource" ) ); } // Since we control the seeking, prevent it from seeking on its own mlt_producer_set_speed( cx->producer, 0 ); cx->audio_position = -1; // We will encapsulate a consumer cx->consumer = mlt_consumer_new( cx->profile ); // Do not use _pass_list on real_time so that it defaults to 0 in the absence of // an explicit real_time property. mlt_properties_set_int( MLT_CONSUMER_PROPERTIES( cx->consumer ), "real_time", mlt_properties_get_int( properties, "real_time" ) ); mlt_properties_pass_list( MLT_CONSUMER_PROPERTIES( cx->consumer ), properties, "buffer, prefill, deinterlace_method, rescale" ); // Connect it all together mlt_consumer_connect( cx->consumer, MLT_PRODUCER_SERVICE( cx->producer ) ); mlt_consumer_start( cx->consumer ); } // Generate a frame *frame = mlt_frame_init( MLT_PRODUCER_SERVICE( self ) ); if ( *frame ) { // Seek the producer to the correct place // Calculate our positions double actual_position = (double) mlt_producer_frame( self ); if ( mlt_producer_get_speed( self ) != 0 ) actual_position *= mlt_producer_get_speed( self ); mlt_position need_first = floor( actual_position ); mlt_producer_seek( cx->producer, lrint( need_first * mlt_profile_fps( cx->profile ) / mlt_producer_get_fps( self ) ) ); // Get the nested frame mlt_frame nested_frame = mlt_consumer_rt_frame( cx->consumer ); // Stack the producer and our methods on the nested frame mlt_frame_push_service( *frame, nested_frame ); mlt_frame_push_service( *frame, cx ); mlt_frame_push_get_image( *frame, get_image ); mlt_frame_push_audio( *frame, nested_frame ); mlt_frame_push_audio( *frame, cx ); mlt_frame_push_audio( *frame, get_audio ); // Give the returned frame temporal identity mlt_frame_set_position( *frame, mlt_producer_position( self ) ); // Store the nested frame on the produced frame for destruction mlt_properties frame_props = MLT_FRAME_PROPERTIES( *frame ); mlt_properties_set_data( frame_props, "_producer_consumer.frame", nested_frame, 0, (mlt_destructor) mlt_frame_close, NULL ); // Inform the normalizers about our video properties mlt_properties_set_double( frame_props, "aspect_ratio", mlt_profile_sar( cx->profile ) ); mlt_properties_set_int( frame_props, "width", cx->profile->width ); mlt_properties_set_int( frame_props, "height", cx->profile->height ); mlt_properties_set_int( frame_props, "meta.media.width", cx->profile->width ); mlt_properties_set_int( frame_props, "meta.media.height", cx->profile->height ); mlt_properties_set_int( frame_props, "progressive", cx->profile->progressive ); } // Calculate the next timecode mlt_producer_prepare_next( self ); return 0; }