Пример #1
0
 virtual HRESULT STDMETHODCALLTYPE VideoInputFormatChanged(
     BMDVideoInputFormatChangedEvents events,
     IDeckLinkDisplayMode* mode,
     BMDDetectedVideoInputFormatFlags flags )
 {
     mlt_profile profile = mlt_service_profile( MLT_PRODUCER_SERVICE( getProducer() ) );
     if ( events & bmdVideoInputDisplayModeChanged )
     {
         BMDTimeValue duration;
         BMDTimeScale timescale;
         mode->GetFrameRate( &duration, &timescale );
         profile->width = mode->GetWidth();
         profile->height = mode->GetHeight() + m_vancLines;
         profile->frame_rate_num = timescale;
         profile->frame_rate_den = duration;
         if ( profile->width == 720 )
         {
             if ( profile->height == 576 )
             {
                 profile->sample_aspect_num = 16;
                 profile->sample_aspect_den = 15;
             }
             else
             {
                 profile->sample_aspect_num = 8;
                 profile->sample_aspect_den = 9;
             }
             profile->display_aspect_num = 4;
             profile->display_aspect_den = 3;
         }
         else
         {
             profile->sample_aspect_num = 1;
             profile->sample_aspect_den = 1;
             profile->display_aspect_num = 16;
             profile->display_aspect_den = 9;
         }
         free( profile->description );
         profile->description = strdup( "decklink" );
         mlt_log_verbose( getProducer(), "format changed %dx%d %.3f fps\n",
                          profile->width, profile->height, (double) profile->frame_rate_num / profile->frame_rate_den );
     }
     if ( events & bmdVideoInputFieldDominanceChanged )
     {
         profile->progressive = mode->GetFieldDominance() == bmdProgressiveFrame;
         m_topFieldFirst = mode->GetFieldDominance() == bmdUpperFieldFirst;
         mlt_log_verbose( getProducer(), "field dominance changed prog %d tff %d\n",
                          profile->progressive, m_topFieldFirst );
     }
     if ( events & bmdVideoInputColorspaceChanged )
     {
         profile->colorspace = m_colorspace =
                                   ( mode->GetFlags() & bmdDisplayModeColorspaceRec709 ) ? 709 : 601;
         mlt_log_verbose( getProducer(), "colorspace changed %d\n", profile->colorspace );
     }
     return S_OK;
 }
Пример #2
0
	IDeckLinkDisplayMode* getDisplayMode()
	{
		mlt_profile profile = mlt_service_profile( MLT_CONSUMER_SERVICE( getConsumer() ) );
		IDeckLinkDisplayModeIterator* iter = NULL;
		IDeckLinkDisplayMode* mode = NULL;
		IDeckLinkDisplayMode* result = 0;

		if ( m_deckLinkOutput->GetDisplayModeIterator( &iter ) == S_OK )
		{
			while ( !result && iter->Next( &mode ) == S_OK )
			{
				m_width = mode->GetWidth();
				m_height = mode->GetHeight();
				mode->GetFrameRate( &m_duration, &m_timescale );
				m_fps = (double) m_timescale / m_duration;
				int p = mode->GetFieldDominance() == bmdProgressiveFrame;
				mlt_log_verbose( getConsumer(), "BMD mode %dx%d %.3f fps prog %d\n", m_width, m_height, m_fps, p );

				if ( m_width == profile->width && p == profile->progressive
					 && (int) m_fps == (int) mlt_profile_fps( profile )
					 && ( m_height == profile->height || ( m_height == 486 && profile->height == 480 ) ) )
					result = mode;
				else
					SAFE_RELEASE( mode );
			}
			SAFE_RELEASE( iter );
		}

		return result;
	}
Пример #3
0
    BMDDisplayMode getDisplayMode( mlt_profile profile, int vancLines )
    {
        IDeckLinkDisplayModeIterator* iter = NULL;
        IDeckLinkDisplayMode* mode = NULL;
        BMDDisplayMode result = (BMDDisplayMode) bmdDisplayModeNotSupported;

        if ( m_decklinkInput->GetDisplayModeIterator( &iter ) == S_OK )
        {
            while ( !result && iter->Next( &mode ) == S_OK )
            {
                int width = mode->GetWidth();
                int height = mode->GetHeight();
                BMDTimeValue duration;
                BMDTimeScale timescale;
                mode->GetFrameRate( &duration, &timescale );
                double fps = (double) timescale / duration;
                int p = mode->GetFieldDominance() == bmdProgressiveFrame;
                m_topFieldFirst = mode->GetFieldDominance() == bmdUpperFieldFirst;
                m_colorspace = ( mode->GetFlags() & bmdDisplayModeColorspaceRec709 ) ? 709 : 601;
                mlt_log_verbose( getProducer(), "BMD mode %dx%d %.3f fps prog %d tff %d\n", width, height, fps, p, m_topFieldFirst );

                if ( width == profile->width && p == profile->progressive
                        && ( height + vancLines == profile->height || ( height == 486 && profile->height == 480 + vancLines ) )
                        && fps == mlt_profile_fps( profile ) )
                    result = mode->GetDisplayMode();
                SAFE_RELEASE( mode );
            }
            SAFE_RELEASE( iter );
        }

        return result;
    }
Пример #4
0
	void createFrame()
	{
		m_videoFrame = 0;
		// Generate a DeckLink video frame
		if ( S_OK != m_deckLinkOutput->CreateVideoFrame( m_width, m_height,
			m_width * 2, bmdFormat8BitYUV, bmdFrameFlagDefault, &m_videoFrame ) )
		{
			mlt_log_verbose( &m_consumer, "Failed to create video frame\n" );
			stop();
			return;
		}
		
		// Make the first line black for field order correction.
		uint8_t *buffer = 0;
		if ( S_OK == m_videoFrame->GetBytes( (void**) &buffer ) && buffer )
		{
			for ( int i = 0; i < m_width; i++ )
			{
				*buffer++ = 128;
				*buffer++ = 16;
			}
		}
		mlt_log_debug( &m_consumer, "created video frame\n" );
		mlt_deque_push_back( m_videoFrameQ, m_videoFrame );
	}
Пример #5
0
	bool open( unsigned card = 0 )
	{
		IDeckLinkIterator* deckLinkIterator = CreateDeckLinkIteratorInstance();
		unsigned i = 0;
		
		if ( !deckLinkIterator )
		{
			mlt_log_verbose( NULL, "The DeckLink drivers not installed.\n" );
			return false;
		}
		
		// Connect to the Nth DeckLink instance
		do {
			if ( deckLinkIterator->Next( &m_deckLink ) != S_OK )
			{
				mlt_log_verbose( NULL, "DeckLink card not found\n" );
				deckLinkIterator->Release();
				return false;
			}
		} while ( ++i <= card );
		
		// Obtain the audio/video output interface (IDeckLinkOutput)
		if ( m_deckLink->QueryInterface( IID_IDeckLinkOutput, (void**)&m_deckLinkOutput ) != S_OK )
		{
			mlt_log_verbose( NULL, "No DeckLink cards support output\n" );
			m_deckLink->Release();
			m_deckLink = 0;
			deckLinkIterator->Release();
			return false;
		}
		
		// Provide this class as a delegate to the audio and video output interfaces
		m_deckLinkOutput->SetScheduledFrameCompletionCallback( this );
		m_deckLinkOutput->SetAudioCallback( this );
		
		pthread_mutex_init( &m_mutex, NULL );
		pthread_cond_init( &m_condition, NULL );
		m_maxAudioBuffer = bmdAudioSampleRate48kHz;
		m_videoFrameQ = mlt_deque_init();
		
		return true;
	}
Пример #6
0
static void on_jack_seek( mlt_properties owner, mlt_filter filter, mlt_position *position )
{
	mlt_properties properties = MLT_FILTER_PROPERTIES( filter );
	mlt_log_verbose( MLT_FILTER_SERVICE(filter), "%s: %d\n", __FUNCTION__, *position );
	mlt_properties_set_int( properties, "_sync_guard", 1 );
	mlt_profile profile = mlt_service_profile( MLT_FILTER_SERVICE( filter ) );
	jack_client_t *jack_client = mlt_properties_get_data( properties, "jack_client", NULL );
	jack_nframes_t jack_frame = jack_get_sample_rate( jack_client );
	jack_frame *= *position / mlt_profile_fps( profile );
	jack_transport_locate( jack_client, jack_frame );
}
Пример #7
0
static void *consumer_thread( void *arg )
{
	// Identify the arg
	consumer_sdl self = arg;

	// Get the consumer
	mlt_consumer consumer = &self->parent;

	// Get the properties
	mlt_properties properties = MLT_CONSUMER_PROPERTIES( consumer );

	// internal intialization
	mlt_frame frame = NULL;
	int last_position = -1;
	int eos = 0;
	int eos_threshold = 20;
	if ( self->play )
		eos_threshold = eos_threshold + mlt_properties_get_int( MLT_CONSUMER_PROPERTIES( self->play ), "buffer" );

	// Determine if the application is dealing with the preview
	int preview_off = mlt_properties_get_int( properties, "preview_off" );

	pthread_mutex_lock( &self->refresh_mutex );
	self->refresh_count = 0;
	pthread_mutex_unlock( &self->refresh_mutex );

	// Loop until told not to
	while( self->running )
	{
		// Get a frame from the attached producer
		frame = mlt_consumer_get_frame( consumer );

		// Ensure that we have a frame
		if ( self->running && frame != NULL )
		{
			// Get the speed of the frame
			double speed = mlt_properties_get_double( MLT_FRAME_PROPERTIES( frame ), "_speed" );

			// Lock during the operation
			mlt_service_lock( MLT_CONSUMER_SERVICE( consumer ) );

			// Get refresh request for the current frame
			int refresh = mlt_properties_get_int( properties, "refresh" );

			// Decrement refresh and clear changed
			mlt_events_block( properties, properties );
			mlt_properties_set_int( properties, "refresh", 0 );
			mlt_events_unblock( properties, properties );

			// Unlock after the operation
			mlt_service_unlock( MLT_CONSUMER_SERVICE( consumer ) );

			// Set the changed property on this frame for the benefit of still
			mlt_properties_set_int( MLT_FRAME_PROPERTIES( frame ), "refresh", refresh );

			// Make sure the recipient knows that this frame isn't really rendered
			mlt_properties_set_int( MLT_FRAME_PROPERTIES( frame ), "rendered", 0 );

			// Optimisation to reduce latency
			if ( speed == 1.0 )
			{
				if ( last_position != -1 && last_position + 1 != mlt_frame_get_position( frame ) )
					mlt_consumer_purge( self->play );
				last_position = mlt_frame_get_position( frame );
			}
			else
			{
				//mlt_consumer_purge( self->play );
				last_position = -1;
			}

			// If we aren't playing normally, then use the still
			if ( speed != 1 )
			{
				mlt_producer producer = MLT_PRODUCER( mlt_service_get_producer( MLT_CONSUMER_SERVICE( consumer ) ) );
				mlt_position duration = producer? mlt_producer_get_playtime( producer ) : -1;
				int pause = 0;

#ifndef SKIP_WAIT_EOS
				if ( self->active == self->play )
				{
					// Do not interrupt the play consumer near the end
					if ( duration - self->last_position > eos_threshold )
					{
						// Get a new frame at the sought position
						mlt_frame_close( frame );
						if ( producer )
							mlt_producer_seek( producer, self->last_position );
						frame = mlt_consumer_get_frame( consumer );
						pause = 1;
					}
					else
					{
						// Send frame with speed 0 to stop it
						if ( frame && !mlt_consumer_is_stopped( self->play ) )
						{
							mlt_consumer_put_frame( self->play, frame );
							frame = NULL;
							eos = 1;
						}

						// Check for end of stream
						if ( mlt_consumer_is_stopped( self->play ) )
						{
							// Stream has ended
							mlt_log_verbose( MLT_CONSUMER_SERVICE( consumer ), "END OF STREAM\n" );
							pause = 1;
							eos = 0; // reset eos indicator
						}
						else
						{
							// Prevent a tight busy loop
							struct timespec tm = { 0, 100000L }; // 100 usec
							nanosleep( &tm, NULL );
						}
					}
				}
#else
				pause = self->active == self->play;
#endif
				if ( pause )
				{
					// Start the still consumer
					if ( !mlt_consumer_is_stopped( self->play ) )
						mlt_consumer_stop( self->play );
					self->last_speed = speed;
					self->active = self->still;
					self->ignore_change = 0;
					mlt_consumer_start( self->still );
				}
				// Send the frame to the active child
				if ( frame && !eos )
				{
					mlt_properties_set_int( MLT_FRAME_PROPERTIES( frame ), "refresh", 1 );
					if ( self->active )
						mlt_consumer_put_frame( self->active, frame );
				}
				if ( pause && speed == 0.0 )
				{
					mlt_events_fire( properties, "consumer-sdl-paused", NULL );
				}
			}
			// Allow a little grace time before switching consumers on speed changes
			else if ( self->ignore_change -- > 0 && self->active != NULL && !mlt_consumer_is_stopped( self->active ) )
			{
				mlt_consumer_put_frame( self->active, frame );
			}
			// Otherwise use the normal player
			else
			{
				if ( !mlt_consumer_is_stopped( self->still ) )
					mlt_consumer_stop( self->still );
				if ( mlt_consumer_is_stopped( self->play ) )
				{
					self->last_speed = speed;
					self->active = self->play;
					self->ignore_change = 0;
					mlt_consumer_start( self->play );
				}
				if ( self->play )
					mlt_consumer_put_frame( self->play, frame );
			}

			// Copy the rectangle info from the active consumer
			if ( self->running && preview_off == 0 && self->active )
			{
				mlt_properties active = MLT_CONSUMER_PROPERTIES( self->active );
				mlt_service_lock( MLT_CONSUMER_SERVICE( consumer ) );
				mlt_properties_set_int( properties, "rect_x", mlt_properties_get_int( active, "rect_x" ) );
				mlt_properties_set_int( properties, "rect_y", mlt_properties_get_int( active, "rect_y" ) );
				mlt_properties_set_int( properties, "rect_w", mlt_properties_get_int( active, "rect_w" ) );
				mlt_properties_set_int( properties, "rect_h", mlt_properties_get_int( active, "rect_h" ) );
				mlt_service_unlock( MLT_CONSUMER_SERVICE( consumer ) );
			}

			if ( self->active == self->still )
			{
				pthread_mutex_lock( &self->refresh_mutex );
				if ( self->running && speed == 0 && self->refresh_count <= 0 )
				{
					mlt_events_fire( properties, "consumer-sdl-paused", NULL );
					pthread_cond_wait( &self->refresh_cond, &self->refresh_mutex );
				}
				self->refresh_count --;
				pthread_mutex_unlock( &self->refresh_mutex );
			}
		}
		else
		{
			if ( frame ) mlt_frame_close( frame );
			mlt_consumer_put_frame( self->active, NULL );
			self->running = 0;
		}
	}

	if ( self->play ) mlt_consumer_stop( self->play );
	if ( self->still ) mlt_consumer_stop( self->still );

	return NULL;
}
Пример #8
0
static int transition_get_audio( mlt_frame frame_a, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
{
	int error = 0;

	// Get the b frame from the stack
	mlt_frame frame_b = mlt_frame_pop_audio( frame_a );

	// Get the effect
	mlt_transition transition = mlt_frame_pop_audio( frame_a );

	// Get the properties of the b frame
	mlt_properties b_props = MLT_FRAME_PROPERTIES( frame_b );

	transition_mix self = transition->child;
	int16_t *buffer_b, *buffer_a;
	int frequency_b = *frequency, frequency_a = *frequency;
	int channels_b = *channels, channels_a = *channels;
	int samples_b = *samples, samples_a = *samples;

	// We can only mix s16
	*format = mlt_audio_s16;
	mlt_frame_get_audio( frame_b, (void**) &buffer_b, format, &frequency_b, &channels_b, &samples_b );
	mlt_frame_get_audio( frame_a, (void**) &buffer_a, format, &frequency_a, &channels_a, &samples_a );

	if ( buffer_b == buffer_a )
	{
		*samples = samples_b;
		*channels = channels_b;
		*buffer = buffer_b;
		*frequency = frequency_b;
		return error;
	}

	int silent = mlt_properties_get_int( MLT_FRAME_PROPERTIES( frame_a ), "silent_audio" );
	mlt_properties_set_int( MLT_FRAME_PROPERTIES( frame_a ), "silent_audio", 0 );
	if ( silent )
		memset( buffer_a, 0, samples_a * channels_a * sizeof( int16_t ) );

	silent = mlt_properties_get_int( b_props, "silent_audio" );
	mlt_properties_set_int( b_props, "silent_audio", 0 );
	if ( silent )
		memset( buffer_b, 0, samples_b * channels_b * sizeof( int16_t ) );

	// determine number of samples to process
	*samples = MIN( self->src_buffer_count + samples_b, self->dest_buffer_count + samples_a );
	*channels = MIN( MIN( channels_b, channels_a ), MAX_CHANNELS );
	*frequency = frequency_a;

	// Prevent src buffer overflow by discarding oldest samples.
	samples_b = MIN( samples_b, MAX_SAMPLES * MAX_CHANNELS / channels_b );
	size_t bytes = PCM16_BYTES( samples_b, channels_b );
	if ( PCM16_BYTES( self->src_buffer_count + samples_b, channels_b ) > MAX_BYTES ) {
		mlt_log_verbose( MLT_TRANSITION_SERVICE(transition), "buffer overflow: src_buffer_count %d\n",
					  self->src_buffer_count );
		self->src_buffer_count = MAX_SAMPLES * MAX_CHANNELS / channels_b - samples_b;
		memmove( self->src_buffer, &self->src_buffer[MAX_SAMPLES * MAX_CHANNELS - samples_b * channels_b],
				 PCM16_BYTES( samples_b, channels_b ) );
	}
	// Buffer new src samples.
	memcpy( &self->src_buffer[self->src_buffer_count * channels_b], buffer_b, bytes );
	self->src_buffer_count += samples_b;
	buffer_b = self->src_buffer;

	// Prevent dest buffer overflow by discarding oldest samples.
	samples_a = MIN( samples_a, MAX_SAMPLES * MAX_CHANNELS / channels_a );
	bytes = PCM16_BYTES( samples_a, channels_a );
	if ( PCM16_BYTES( self->dest_buffer_count + samples_a, channels_a ) > MAX_BYTES ) {
		mlt_log_verbose( MLT_TRANSITION_SERVICE(transition), "buffer overflow: dest_buffer_count %d\n",
					  self->dest_buffer_count );
		self->dest_buffer_count = MAX_SAMPLES * MAX_CHANNELS / channels_a - samples_a;
		memmove( self->dest_buffer, &self->dest_buffer[MAX_SAMPLES * MAX_CHANNELS - samples_a * channels_a],
				 PCM16_BYTES( samples_a, channels_a ) );
	}
	// Buffer the new dest samples.
	memcpy( &self->dest_buffer[self->dest_buffer_count * channels_a], buffer_a, bytes );
	self->dest_buffer_count += samples_a;
	buffer_a = self->dest_buffer;

	// Do the mixing.
	if ( mlt_properties_get_int( MLT_TRANSITION_PROPERTIES(transition), "combine" ) )
	{
		double weight = 1.0;
		if ( mlt_properties_get_int( MLT_FRAME_PROPERTIES( frame_a ), "meta.mixdown" ) )
			weight = 1.0 - mlt_properties_get_double( MLT_FRAME_PROPERTIES( frame_a ), "meta.volume" );
		combine_audio( weight, buffer_a, buffer_b, channels_a, channels_b, *channels, *samples );
	}
	else
	{
		double mix_start = 0.5, mix_end = 0.5;
		if ( mlt_properties_get( b_props, "audio.previous_mix" ) )
			mix_start = mlt_properties_get_double( b_props, "audio.previous_mix" );
		if ( mlt_properties_get( b_props, "audio.mix" ) )
			mix_end = mlt_properties_get_double( b_props, "audio.mix" );
		if ( mlt_properties_get_int( b_props, "audio.reverse" ) )
		{
			mix_start = 1.0 - mix_start;
			mix_end = 1.0 - mix_end;
		}
		mix_audio( mix_start, mix_end, buffer_a, buffer_b, channels_a, channels_b, *channels, *samples );
	}

	// Copy the audio into the frame.
	bytes = PCM16_BYTES( *samples, *channels );
	*buffer = mlt_pool_alloc( bytes );
	memcpy( *buffer, buffer_a, bytes );
	mlt_frame_set_audio( frame_a, *buffer, *format, bytes, mlt_pool_release );

	if ( mlt_properties_get_int( b_props, "_speed" ) == 0 )
	{
		// Flush the buffer when paused and scrubbing.
		samples_b = self->src_buffer_count;
		samples_a = self->dest_buffer_count;
	}
	else
	{
		// Determine the maximum amount of latency permitted in the buffer.
		int max_latency = CLAMP( *frequency / 1000, 0, MAX_SAMPLES ); // samples in 1ms
		// samples_b becomes the new target src buffer count.
		samples_b = CLAMP( self->src_buffer_count - *samples, 0, max_latency );
		// samples_b becomes the number of samples to consume: difference between actual and the target.
		samples_b = self->src_buffer_count - samples_b;
		// samples_a becomes the new target dest buffer count.
		samples_a = CLAMP( self->dest_buffer_count - *samples, 0, max_latency );
		// samples_a becomes the number of samples to consume: difference between actual and the target.
		samples_a = self->dest_buffer_count - samples_a;
	}

	// Consume the src buffer.
	self->src_buffer_count -= samples_b;
	if ( self->src_buffer_count ) {
		memmove( self->src_buffer, &self->src_buffer[samples_b * channels_b],
			PCM16_BYTES( self->src_buffer_count, channels_b ));
	}
	// Consume the dest buffer.
	self->dest_buffer_count -= samples_a;
	if ( self->dest_buffer_count ) {
		memmove( self->dest_buffer, &self->dest_buffer[samples_a * channels_a],
			PCM16_BYTES( self->dest_buffer_count, channels_a ));
	}

	return error;
}
Пример #9
0
static void on_jack_stop( mlt_properties owner, mlt_properties properties )
{
	mlt_log_verbose( NULL, "%s\n", __FUNCTION__ );
	jack_client_t *jack_client = mlt_properties_get_data( properties, "jack_client", NULL );
	jack_transport_stop( jack_client );
}
Пример #10
0
static void initialise_jack_ports( mlt_properties properties )
{
	int i;
	char mlt_name[67], rack_name[30];
	jack_port_t **port = NULL;
	jack_client_t *jack_client = mlt_properties_get_data( properties, "jack_client", NULL );
	jack_nframes_t jack_buffer_size = jack_get_buffer_size( jack_client );
	
	// Propagate these for the Jack processing callback
	int channels = mlt_properties_get_int( properties, "channels" );

	// Start JackRack
	if ( mlt_properties_get( properties, "src" ) )
	{
		snprintf( rack_name, sizeof( rack_name ), "jackrack%d", getpid() );
		jack_rack_t *jackrack = jack_rack_new( rack_name, mlt_properties_get_int( properties, "channels" ) );
		jack_rack_open_file( jackrack, mlt_properties_get( properties, "src" ) );		
		
		mlt_properties_set_data( properties, "jackrack", jackrack, 0,
			(mlt_destructor) jack_rack_destroy, NULL );
		mlt_properties_set( properties, "_rack_client_name", rack_name );
	}
	else
	{
		// We have to set this to something to prevent re-initialization.
		mlt_properties_set_data( properties, "jackrack", jack_client, 0, NULL, NULL );
	}
		
	// Allocate buffers and ports
	jack_ringbuffer_t **output_buffers = mlt_pool_alloc( sizeof( jack_ringbuffer_t *) * channels );
	jack_ringbuffer_t **input_buffers = mlt_pool_alloc( sizeof( jack_ringbuffer_t *) * channels );
	jack_port_t **jack_output_ports = mlt_pool_alloc( sizeof(jack_port_t *) * channels );
	jack_port_t **jack_input_ports = mlt_pool_alloc( sizeof(jack_port_t *) * channels );
	float **jack_output_buffers = mlt_pool_alloc( sizeof(float *) * jack_buffer_size );
	float **jack_input_buffers = mlt_pool_alloc( sizeof(float *) * jack_buffer_size );

	// Set properties - released inside filter_close
	mlt_properties_set_data( properties, "output_buffers", output_buffers,
		sizeof( jack_ringbuffer_t *) * channels, mlt_pool_release, NULL );
	mlt_properties_set_data( properties, "input_buffers", input_buffers,
		sizeof( jack_ringbuffer_t *) * channels, mlt_pool_release, NULL );
	mlt_properties_set_data( properties, "jack_output_ports", jack_output_ports,
		sizeof( jack_port_t *) * channels, mlt_pool_release, NULL );
	mlt_properties_set_data( properties, "jack_input_ports", jack_input_ports,
		sizeof( jack_port_t *) * channels, mlt_pool_release, NULL );
	mlt_properties_set_data( properties, "jack_output_buffers", jack_output_buffers,
		sizeof( float *) * channels, mlt_pool_release, NULL );
	mlt_properties_set_data( properties, "jack_input_buffers", jack_input_buffers,
		sizeof( float *) * channels, mlt_pool_release, NULL );
	
	// Register Jack ports
	for ( i = 0; i < channels; i++ )
	{
		int in;
		
		output_buffers[i] = jack_ringbuffer_create( BUFFER_LEN * sizeof(float) );
		input_buffers[i] = jack_ringbuffer_create( BUFFER_LEN * sizeof(float) );
		snprintf( mlt_name, sizeof( mlt_name ), "obuf%d", i );
		mlt_properties_set_data( properties, mlt_name, output_buffers[i],
			BUFFER_LEN * sizeof(float), (mlt_destructor) jack_ringbuffer_free, NULL );
		snprintf( mlt_name, sizeof( mlt_name ), "ibuf%d", i );
		mlt_properties_set_data( properties, mlt_name, input_buffers[i],
			BUFFER_LEN * sizeof(float), (mlt_destructor) jack_ringbuffer_free, NULL );
		
		for ( in = 0; in < 2; in++ )
		{
			snprintf( mlt_name, sizeof( mlt_name ), "%s_%d", in ? "in" : "out", i + 1);
			port = ( in ? &jack_input_ports[i] : &jack_output_ports[i] );
			
			*port =  jack_port_register( jack_client, mlt_name, JACK_DEFAULT_AUDIO_TYPE,
				( in ? JackPortIsInput : JackPortIsOutput ) | JackPortIsTerminal, 0 );
		}
	}
	
	// Start Jack processing
	pthread_mutex_lock( &g_activate_mutex );
	jack_activate( jack_client );
	pthread_mutex_unlock( &g_activate_mutex  );

	// Establish connections
	for ( i = 0; i < channels; i++ )
	{
		int in;
		for ( in = 0; in < 2; in++ )
		{
			port = ( in ? &jack_input_ports[i] : &jack_output_ports[i] );
			snprintf( mlt_name, sizeof( mlt_name ), "%s", jack_port_name( *port ) );

			snprintf( rack_name, sizeof( rack_name ), "%s_%d", in ? "in" : "out", i + 1 );
			if ( mlt_properties_get( properties, "_rack_client_name" ) )
				snprintf( rack_name, sizeof( rack_name ), "%s:%s_%d", mlt_properties_get( properties, "_rack_client_name" ), in ? "out" : "in", i + 1);
			else if ( mlt_properties_get( properties, rack_name ) )
				snprintf( rack_name, sizeof( rack_name ), "%s", mlt_properties_get( properties, rack_name ) );
			else
				snprintf( rack_name, sizeof( rack_name ), "%s:%s_%d", mlt_properties_get( properties, "client_name" ), in ? "out" : "in", i + 1);
			
			if ( in )
			{
				mlt_log_verbose( NULL, "JACK connect %s to %s\n", rack_name, mlt_name );
				jack_connect( jack_client, rack_name, mlt_name );
			}
			else
			{
				mlt_log_verbose( NULL, "JACK connect %s to %s\n", mlt_name, rack_name );
				jack_connect( jack_client, mlt_name, rack_name );
			}
		}
	}
}
Пример #11
0
static void
saved_rack_parse_plugin (jack_rack_t * jack_rack, saved_rack_t * saved_rack, saved_plugin_t * saved_plugin,
                         const char * filename, xmlNodePtr plugin)
{
  plugin_desc_t * desc;
  settings_t * settings = NULL;
  xmlNodePtr node;
  xmlNodePtr sub_node;
  xmlChar *content;
  unsigned long num;
  unsigned long control = 0;
#ifdef WIN32
  xmlFreeFunc xmlFree = NULL;
  xmlMemGet( &xmlFree, NULL, NULL, NULL);
#endif

  for (node = plugin->children; node; node = node->next)
    {
      if (xmlStrcmp (node->name, _x("id")) == 0)
        {
          content = xmlNodeGetContent (node);
          num = strtoul (_s(content), NULL, 10);
          xmlFree (content);

          desc = plugin_mgr_get_any_desc (jack_rack->plugin_mgr, num);
          if (!desc)
            {
              mlt_log_verbose( NULL, _("The file '%s' contains an unknown plugin with ID '%ld'; skipping\n"), filename, num);
              return;
            }
          
          settings = settings_new (desc, saved_rack->channels, saved_rack->sample_rate);
        }
      else if (xmlStrcmp (node->name, _x("enabled")) == 0)
        {
          content = xmlNodeGetContent (node);
          settings_set_enabled (settings, xmlStrcmp (content, _x("true")) == 0 ? TRUE : FALSE);
          xmlFree (content);
        }
      else if (xmlStrcmp (node->name, _x("wet_dry_enabled")) == 0)
        {
          content = xmlNodeGetContent (node);
          settings_set_wet_dry_enabled (settings, xmlStrcmp (content, _x("true")) == 0 ? TRUE : FALSE);
          xmlFree (content);
        }
      else if (xmlStrcmp (node->name, _x("wet_dry_locked")) == 0)
        {
          content = xmlNodeGetContent (node);
          settings_set_wet_dry_locked (settings, xmlStrcmp (content, _x("true")) == 0 ? TRUE : FALSE);
          xmlFree (content);
        }
      else if (xmlStrcmp (node->name, _x("wet_dry_values")) == 0)
        {
          unsigned long channel = 0;
          
          for (sub_node = node->children; sub_node; sub_node = sub_node->next)
            {
              if (xmlStrcmp (sub_node->name, _x("value")) == 0)
                {
                  content = xmlNodeGetContent (sub_node);
                  settings_set_wet_dry_value (settings, channel, strtod (_s(content), NULL));
                  xmlFree (content);
                  
                  channel++;
                }
            }
        }
      else if (xmlStrcmp (node->name, _x("lockall")) == 0)
        {
          content = xmlNodeGetContent (node);
          settings_set_lock_all (settings, xmlStrcmp (content, _x("true")) == 0 ? TRUE : FALSE);
          xmlFree (content);
        }
      else if (xmlStrcmp (node->name, _x("controlrow")) == 0)
        {
          gint copy = 0;

          for (sub_node = node->children; sub_node; sub_node = sub_node->next)
            {
              if (xmlStrcmp (sub_node->name, _x("lock")) == 0)
                {
                  content = xmlNodeGetContent (sub_node);
                  settings_set_lock (settings, control, xmlStrcmp (content, _x("true")) == 0 ? TRUE : FALSE);
                  xmlFree (content);
                }
              else if (xmlStrcmp (sub_node->name, _x("value")) == 0)
                {
                  content = xmlNodeGetContent (sub_node);
                  settings_set_control_value (settings, copy, control, strtod (_s(content), NULL));
                  xmlFree (content);
                  copy++;
                }
            }
          
          control++;
        }
    }
  
  if (settings)
    saved_plugin->settings = settings;
}
Пример #12
0
static void *consumer_read_ahead_thread( void *arg )
{
	// The argument is the consumer
	mlt_consumer self = arg;

	// Get the properties of the consumer
	mlt_properties properties = MLT_CONSUMER_PROPERTIES( self );

	// Get the width and height
	int width = mlt_properties_get_int( properties, "width" );
	int height = mlt_properties_get_int( properties, "height" );

	// See if video is turned off
	int video_off = mlt_properties_get_int( properties, "video_off" );
	int preview_off = mlt_properties_get_int( properties, "preview_off" );
	int preview_format = mlt_properties_get_int( properties, "preview_format" );

	// Get the audio settings
	mlt_audio_format afmt = mlt_audio_s16;
	const char *format = mlt_properties_get( properties, "mlt_audio_format" );
	if ( format )
	{
		if ( !strcmp( format, "none" ) )
			afmt = mlt_audio_none;
		else if ( !strcmp( format, "s32" ) )
			afmt = mlt_audio_s32;
		else if ( !strcmp( format, "s32le" ) )
			afmt = mlt_audio_s32le;
		else if ( !strcmp( format, "float" ) )
			afmt = mlt_audio_float;
		else if ( !strcmp( format, "f32le" ) )
			afmt = mlt_audio_f32le;
		else if ( !strcmp( format, "u8" ) )
			afmt = mlt_audio_u8;
	}
	int counter = 0;
	double fps = mlt_properties_get_double( properties, "fps" );
	int channels = mlt_properties_get_int( properties, "channels" );
	int frequency = mlt_properties_get_int( properties, "frequency" );
	int samples = 0;
	void *audio = NULL;

	// See if audio is turned off
	int audio_off = mlt_properties_get_int( properties, "audio_off" );

	// Get the maximum size of the buffer
	int buffer = mlt_properties_get_int( properties, "buffer" ) + 1;

	// General frame variable
	mlt_frame frame = NULL;
	uint8_t *image = NULL;

	// Time structures
	struct timeval ante;

	// Average time for get_frame and get_image
	int count = 0;
	int skipped = 0;
	int64_t time_process = 0;
	int skip_next = 0;
	mlt_position pos = 0;
	mlt_position start_pos = 0;
	mlt_position last_pos = 0;
	int frame_duration = mlt_properties_get_int( properties, "frame_duration" );
	int drop_max = mlt_properties_get_int( properties, "drop_max" );

	if ( preview_off && preview_format != 0 )
		self->format = preview_format;

	// Get the first frame
	frame = mlt_consumer_get_frame( self );

	if ( frame )
	{
		// Get the image of the first frame
		if ( !video_off )
		{
			mlt_events_fire( MLT_CONSUMER_PROPERTIES( self ), "consumer-frame-render", frame, NULL );
			mlt_frame_get_image( frame, &image, &self->format, &width, &height, 0 );
		}

		if ( !audio_off )
		{
			samples = mlt_sample_calculator( fps, frequency, counter++ );
			mlt_frame_get_audio( frame, &audio, &afmt, &frequency, &channels, &samples );
		}

		// Mark as rendered
		mlt_properties_set_int( MLT_FRAME_PROPERTIES( frame ), "rendered", 1 );
		last_pos = start_pos = pos = mlt_frame_get_position( frame );
	}

	// Get the starting time (can ignore the times above)
	gettimeofday( &ante, NULL );

	// Continue to read ahead
	while ( self->ahead )
	{
		// Put the current frame into the queue
		pthread_mutex_lock( &self->queue_mutex );
		while( self->ahead && mlt_deque_count( self->queue ) >= buffer )
			pthread_cond_wait( &self->queue_cond, &self->queue_mutex );
		mlt_deque_push_back( self->queue, frame );
		pthread_cond_broadcast( &self->queue_cond );
		pthread_mutex_unlock( &self->queue_mutex );

		// Get the next frame
		frame = mlt_consumer_get_frame( self );

		// If there's no frame, we're probably stopped...
		if ( frame == NULL )
			continue;
		pos = mlt_frame_get_position( frame );

		// Increment the counter used for averaging processing cost
		count ++;

		// All non-normal playback frames should be shown
		if ( mlt_properties_get_int( MLT_FRAME_PROPERTIES( frame ), "_speed" ) != 1 )
		{
#ifdef DEINTERLACE_ON_NOT_NORMAL_SPEED
			mlt_properties_set_int( MLT_FRAME_PROPERTIES( frame ), "consumer_deinterlace", 1 );
#endif
			// Indicate seeking or trick-play
			start_pos = pos;
		}

		// If skip flag not set or frame-dropping disabled
		if ( !skip_next || self->real_time == -1 )
		{
			if ( !video_off )
			{
				// Reset width/height - could have been changed by previous mlt_frame_get_image
				width = mlt_properties_get_int( properties, "width" );
				height = mlt_properties_get_int( properties, "height" );

				// Get the image
				mlt_events_fire( MLT_CONSUMER_PROPERTIES( self ), "consumer-frame-render", frame, NULL );
				mlt_frame_get_image( frame, &image, &self->format, &width, &height, 0 );
			}

			// Indicate the rendered image is available.
			mlt_properties_set_int( MLT_FRAME_PROPERTIES( frame ), "rendered", 1 );

			// Reset consecutively-skipped counter
			skipped = 0;
		}
		else // Skip image processing
		{
			// Increment the number of consecutively-skipped frames
			skipped++;

			// If too many (1 sec) consecutively-skipped frames
			if ( skipped > drop_max )
			{
				// Reset cost tracker
				time_process = 0;
				count = 1;
				mlt_log_verbose( self, "too many frames dropped - forcing next frame\n" );
			}
		}

		// Always process audio
		if ( !audio_off )
		{
			samples = mlt_sample_calculator( fps, frequency, counter++ );
			mlt_frame_get_audio( frame, &audio, &afmt, &frequency, &channels, &samples );
		}

		// Get the time to process this frame
		int64_t time_current = time_difference( &ante );

		// If the current time is not suddenly some large amount
		if ( time_current < time_process / count * 20 || !time_process || count < 5 )
		{
			// Accumulate the cost for processing this frame
			time_process += time_current;
		}
		else
		{
			mlt_log_debug( self, "current %"PRId64" threshold %"PRId64" count %d\n",
				time_current, (int64_t) (time_process / count * 20), count );
			// Ignore the cost of this frame's time
			count--;
		}

		// Determine if we started, resumed, or seeked
		if ( pos != last_pos + 1 )
			start_pos = pos;
		last_pos = pos;

		// Do not skip the first 20% of buffer at start, resume, or seek
		if ( pos - start_pos <= buffer / 5 + 1 )
		{
			// Reset cost tracker
			time_process = 0;
			count = 1;
		}

		// Reset skip flag
		skip_next = 0;

		// Only consider skipping if the buffer level is low (or really small)
		if ( mlt_deque_count( self->queue ) <= buffer / 5 + 1 )
		{
			// Skip next frame if average cost exceeds frame duration.
			if ( time_process / count > frame_duration )
				skip_next = 1;
			if ( skip_next )
				mlt_log_debug( self, "avg usec %"PRId64" (%"PRId64"/%d) duration %d\n",
					time_process/count, time_process, count, frame_duration);
		}
	}

	// Remove the last frame
	mlt_frame_close( frame );

	return NULL;
}
Пример #13
0
static mlt_frame worker_get_frame( mlt_consumer self, mlt_properties properties )
{
	// Frame to return
	mlt_frame frame = NULL;

	double fps = mlt_properties_get_double( properties, "fps" );
	int threads = abs( self->real_time );
	int buffer = mlt_properties_get_int( properties, "_buffer" );
	buffer = buffer > 0 ? buffer : mlt_properties_get_int( properties, "buffer" );
	// This is a heuristic to determine a suitable minimum buffer size for the number of threads.
	int headroom = 2 + threads * threads;
	buffer = buffer < headroom ? headroom : buffer;

	// Start worker threads if not already started.
	if ( ! self->ahead )
	{
		int prefill = mlt_properties_get_int( properties, "prefill" );
		prefill = prefill > 0 && prefill < buffer ? prefill : buffer;

		consumer_work_start( self );

		// Fill the work queue.
		int i = buffer;
		while ( self->ahead && i-- )
		{
			frame = mlt_consumer_get_frame( self );
			if ( frame )
			{
				pthread_mutex_lock( &self->queue_mutex );
				mlt_deque_push_back( self->queue, frame );
				pthread_cond_signal( &self->queue_cond );
				pthread_mutex_unlock( &self->queue_mutex );
			}
		}

		// Wait for prefill
		while ( self->ahead && first_unprocessed_frame( self ) < prefill )
		{
			pthread_mutex_lock( &self->done_mutex );
			pthread_cond_wait( &self->done_cond, &self->done_mutex );
			pthread_mutex_unlock( &self->done_mutex );
		}
		self->process_head = threads;
	}

//	mlt_log_verbose( MLT_CONSUMER_SERVICE(self), "size %d done count %d work count %d process_head %d\n",
//		threads, first_unprocessed_frame( self ), mlt_deque_count( self->queue ), self->process_head );

	// Feed the work queue
	while ( self->ahead && mlt_deque_count( self->queue ) < buffer )
	{
		frame = mlt_consumer_get_frame( self );
		if ( ! frame )
			return frame;
		pthread_mutex_lock( &self->queue_mutex );
		mlt_deque_push_back( self->queue, frame );
		pthread_cond_signal( &self->queue_cond );
		pthread_mutex_unlock( &self->queue_mutex );
	}

	// Wait if not realtime.
	mlt_frame head_frame = MLT_FRAME( mlt_deque_peek_front( self->queue ) );
	while ( self->ahead && self->real_time < 0 &&
		!( head_frame && mlt_properties_get_int( MLT_FRAME_PROPERTIES( head_frame ), "rendered" ) ) )
	{
		pthread_mutex_lock( &self->done_mutex );
		pthread_cond_wait( &self->done_cond, &self->done_mutex );
		pthread_mutex_unlock( &self->done_mutex );
	}
	
	// Get the frame from the queue.
	pthread_mutex_lock( &self->queue_mutex );
	frame = mlt_deque_pop_front( self->queue );
	pthread_mutex_unlock( &self->queue_mutex );

	// Adapt the worker process head to the runtime conditions.
	if ( self->real_time > 0 )
	{
		if ( frame && mlt_properties_get_int( MLT_FRAME_PROPERTIES( frame ), "rendered" ) )
		{
			self->consecutive_dropped = 0;
			if ( self->process_head > threads && self->consecutive_rendered >= self->process_head )
				self->process_head--;
			else
				self->consecutive_rendered++;
		}
		else
		{
			self->consecutive_rendered = 0;
			if ( self->process_head < buffer - threads && self->consecutive_dropped > threads )
				self->process_head++;
			else
				self->consecutive_dropped++;
		}
//		mlt_log_verbose( MLT_CONSUMER_SERVICE(self), "dropped %d rendered %d process_head %d\n",
//			self->consecutive_dropped, self->consecutive_rendered, self->process_head );

		// Check for too many consecutively dropped frames
		if ( self->consecutive_dropped > mlt_properties_get_int( properties, "drop_max" ) )
		{
			int orig_buffer = mlt_properties_get_int( properties, "buffer" );
			int prefill = mlt_properties_get_int( properties, "prefill" );
			mlt_log_verbose( self, "too many frames dropped - " );

			// If using a default low-latency buffer level (SDL) and below the limit
			if ( ( orig_buffer == 1 || prefill == 1 ) && buffer < (threads + 1) * 10 )
			{
				// Auto-scale the buffer to compensate
				mlt_log_verbose( self, "increasing buffer to %d\n", buffer + threads );
				mlt_properties_set_int( properties, "_buffer", buffer + threads );
				self->consecutive_dropped = fps / 2;
			}
			else
			{
				// Tell the consumer to render it
				mlt_log_verbose( self, "forcing next frame\n" );
				mlt_properties_set_int( MLT_FRAME_PROPERTIES( frame ), "rendered", 1 );
				self->consecutive_dropped = 0;
			}
		}
	}
	
	return frame;
}
Пример #14
0
    virtual HRESULT STDMETHODCALLTYPE VideoInputFrameArrived(
        IDeckLinkVideoInputFrame* video,
        IDeckLinkAudioInputPacket* audio )
    {
        if ( mlt_properties_get_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "preview" ) &&
                mlt_producer_get_speed( getProducer() ) == 0.0 && !mlt_deque_count( m_queue ))
        {
            pthread_cond_broadcast( &m_condition );
            return S_OK;
        }

        // Create mlt_frame
        mlt_frame frame = mlt_frame_init( MLT_PRODUCER_SERVICE( getProducer() ) );

        // Copy video
        if ( video )
        {
            if ( !( video->GetFlags() & bmdFrameHasNoInputSource ) )
            {
                int size = video->GetRowBytes() * ( video->GetHeight() + m_vancLines );
                void* image = mlt_pool_alloc( size );
                void* buffer = 0;
                unsigned char* p = (unsigned char*) image;
                int n = size / 2;
                \
                // Initialize VANC lines to nominal black
                while ( --n )
                {
                    *p ++ = 16;
                    *p ++ = 128;
                }

                // Capture VANC
                if ( m_vancLines > 0 )
                {
                    IDeckLinkVideoFrameAncillary* vanc = 0;
                    if ( video->GetAncillaryData( &vanc ) == S_OK && vanc )
                    {
                        for ( int i = 1; i < m_vancLines + 1; i++ )
                        {
                            if ( vanc->GetBufferForVerticalBlankingLine( i, &buffer ) == S_OK )
                                swab( (char*) buffer, (char*) image + ( i - 1 ) * video->GetRowBytes(), video->GetRowBytes() );
                            else
                                mlt_log_debug( getProducer(), "failed capture vanc line %d\n", i );
                        }
                        SAFE_RELEASE(vanc);
                    }
                }

                // Capture image
                video->GetBytes( &buffer );
                if ( image && buffer )
                {
                    size =  video->GetRowBytes() * video->GetHeight();
                    swab( (char*) buffer, (char*) image + m_vancLines * video->GetRowBytes(), size );
                    mlt_frame_set_image( frame, (uint8_t*) image, size, mlt_pool_release );
                }
                else if ( image )
                {
                    mlt_log_verbose( getProducer(), "no video\n" );
                    mlt_pool_release( image );
                }
            }
            else
            {
                mlt_log_verbose( getProducer(), "no signal\n" );
                mlt_frame_close( frame );
                frame = 0;
            }

            // Get timecode
            IDeckLinkTimecode* timecode = 0;
            if ( video->GetTimecode( bmdTimecodeVITC, &timecode ) == S_OK && timecode )
            {
                DLString timecodeString = 0;

                if ( timecode->GetString( &timecodeString ) == S_OK )
                {
                    char* s = getCString( timecodeString );
                    mlt_properties_set( MLT_FRAME_PROPERTIES( frame ), "meta.attr.vitc.markup", s );
                    mlt_log_debug( getProducer(), "timecode %s\n", s );
                    freeCString( s );
                }
                freeDLString( timecodeString );
                SAFE_RELEASE( timecode );
            }
        }
        else
        {
            mlt_log_verbose( getProducer(), "no video\n" );
            mlt_frame_close( frame );
            frame = 0;
        }

        // Copy audio
        if ( frame && audio )
        {
            int channels = mlt_properties_get_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "channels" );
            int size = audio->GetSampleFrameCount() * channels * sizeof(int16_t);
            mlt_audio_format format = mlt_audio_s16;
            void* pcm = mlt_pool_alloc( size );
            void* buffer = 0;

            audio->GetBytes( &buffer );
            if ( buffer )
            {
                memcpy( pcm, buffer, size );
                mlt_frame_set_audio( frame, pcm, format, size, mlt_pool_release );
                mlt_properties_set_int( MLT_FRAME_PROPERTIES(frame), "audio_samples", audio->GetSampleFrameCount() );
            }
            else
            {
                mlt_log_verbose( getProducer(), "no audio\n" );
                mlt_pool_release( pcm );
            }
        }
        else
        {
            mlt_log_verbose( getProducer(), "no audio\n" );
        }

        // Put frame in queue
        if ( frame )
        {
            int queueMax = mlt_properties_get_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "buffer" );
            pthread_mutex_lock( &m_mutex );
            if ( mlt_deque_count( m_queue ) < queueMax )
            {
                mlt_deque_push_back( m_queue, frame );
                pthread_cond_broadcast( &m_condition );
            }
            else
            {
                mlt_frame_close( frame );
                mlt_properties_set_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "dropped", ++m_dropped );
                mlt_log_warning( getProducer(), "frame dropped %d\n", m_dropped );
            }
            pthread_mutex_unlock( &m_mutex );
        }

        return S_OK;
    }
Пример #15
0
    bool start( mlt_profile profile = 0 )
    {
        if ( m_started )
            return false;
        try
        {
            // Initialize some members
            m_vancLines = mlt_properties_get_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "vanc" );
            if ( m_vancLines == -1 )
                m_vancLines = profile->height <= 512 ? 26 : 32;

            if ( !profile )
                profile = mlt_service_profile( MLT_PRODUCER_SERVICE( getProducer() ) );

            // Get the display mode
            BMDDisplayMode displayMode = getDisplayMode( profile, m_vancLines );
            if ( displayMode == (BMDDisplayMode) bmdDisplayModeNotSupported )
            {
                mlt_log_info( getProducer(), "profile = %dx%d %f fps %s\n", profile->width, profile->height,
                              mlt_profile_fps( profile ), profile->progressive? "progressive" : "interlace" );
                throw "Profile is not compatible with decklink.";
            }

            // Determine if supports input format detection
#ifdef WIN32
            BOOL doesDetectFormat = FALSE;
#else
            bool doesDetectFormat = false;
#endif
            IDeckLinkAttributes *decklinkAttributes = 0;
            if ( m_decklink->QueryInterface( IID_IDeckLinkAttributes, (void**) &decklinkAttributes ) == S_OK )
            {
                if ( decklinkAttributes->GetFlag( BMDDeckLinkSupportsInputFormatDetection, &doesDetectFormat ) != S_OK )
                    doesDetectFormat = false;
                SAFE_RELEASE( decklinkAttributes );
            }
            mlt_log_verbose( getProducer(), "%s format detection\n", doesDetectFormat ? "supports" : "does not support" );

            // Enable video capture
            BMDPixelFormat pixelFormat = bmdFormat8BitYUV;
            BMDVideoInputFlags flags = doesDetectFormat ? bmdVideoInputEnableFormatDetection : bmdVideoInputFlagDefault;
            if ( S_OK != m_decklinkInput->EnableVideoInput( displayMode, pixelFormat, flags ) )
                throw "Failed to enable video capture.";

            // Enable audio capture
            BMDAudioSampleRate sampleRate = bmdAudioSampleRate48kHz;
            BMDAudioSampleType sampleType = bmdAudioSampleType16bitInteger;
            int channels = mlt_properties_get_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "channels" );
            if ( S_OK != m_decklinkInput->EnableAudioInput( sampleRate, sampleType, channels ) )
                throw "Failed to enable audio capture.";

            // Start capture
            m_dropped = 0;
            mlt_properties_set_int( MLT_PRODUCER_PROPERTIES( getProducer() ), "dropped", m_dropped );
            m_started = m_decklinkInput->StartStreams() == S_OK;
            if ( !m_started )
                throw "Failed to start capture.";
        }
        catch ( const char *error )
        {
            m_decklinkInput->DisableVideoInput();
            mlt_log_error( getProducer(), "%s\n", error );
            return false;
        }
        return true;
    }
Пример #16
0
	HRESULT render( mlt_frame frame )
	{
		HRESULT result = S_OK;
		// Get the audio		
		double speed = mlt_properties_get_double( MLT_FRAME_PROPERTIES(frame), "_speed" );
		if ( speed == 1.0 )
		{
			mlt_audio_format format = mlt_audio_s16;
			int frequency = bmdAudioSampleRate48kHz;
			int samples = mlt_sample_calculator( m_fps, frequency, m_count );
			int16_t *pcm = 0;
			
			if ( !mlt_frame_get_audio( frame, (void**) &pcm, &format, &frequency, &m_channels, &samples ) )
			{
				int count = samples;
				
				if ( !m_isPrerolling )
				{
					uint32_t audioCount = 0;
					uint32_t videoCount = 0;
					
					// Check for resync
					m_deckLinkOutput->GetBufferedAudioSampleFrameCount( &audioCount );
					m_deckLinkOutput->GetBufferedVideoFrameCount( &videoCount );
					
					// Underflow typically occurs during non-normal speed playback.
					if ( audioCount < 1 || videoCount < 1 )
					{
						// Upon switching to normal playback, buffer some frames faster than realtime.
						mlt_log_info( &m_consumer, "buffer underrun: audio buf %u video buf %u frames\n", audioCount, videoCount );
						m_prerollCounter = 0;
					}
					
					// While rebuffering
					if ( isBuffering() )
					{
						// Only append audio to reach the ideal level and not overbuffer.
						int ideal = ( m_preroll - 1 ) * bmdAudioSampleRate48kHz / m_fps;
						int actual = m_fifo->used / m_channels + audioCount;
						int diff = ideal / 2 - actual;
						count = diff < 0 ? 0 : diff < count ? diff : count;
					}
				}
				if ( count > 0 )
					sample_fifo_append( m_fifo, pcm, count * m_channels );
			}
		}
		
		// Create video frames while pre-rolling
		if ( m_isPrerolling )
		{
			createFrame();
			if ( !m_videoFrame )
			{
				mlt_log_error( &m_consumer, "failed to create video frame\n" );
				return S_FALSE;
			}
		}
		
		// Get the video
		if ( mlt_properties_get_int( MLT_FRAME_PROPERTIES( frame ), "rendered") )
		{
			mlt_image_format format = mlt_image_yuv422;
			uint8_t* image = 0;
			uint8_t* buffer = 0;

			if ( !mlt_frame_get_image( frame, &image, &format, &m_width, &m_height, 0 ) )
			{
				m_videoFrame = (IDeckLinkMutableVideoFrame*) mlt_deque_pop_back( m_videoFrameQ );
				m_videoFrame->GetBytes( (void**) &buffer );
				if ( m_displayMode->GetFieldDominance() == bmdUpperFieldFirst )
					// convert lower field first to top field first
					swab( image, buffer + m_width * 2, m_width * ( m_height - 1 ) * 2 );
				else
					swab( image, buffer, m_width * m_height * 2 );
				m_deckLinkOutput->ScheduleVideoFrame( m_videoFrame, m_count * m_duration, m_duration, m_timescale );
				mlt_deque_push_front( m_videoFrameQ, m_videoFrame );
			}
		}
		else
		{
			mlt_log_verbose( &m_consumer, "dropped video frame\n" );
		}
		++m_count;

		// Check for end of pre-roll
		if ( ++m_prerollCounter > m_preroll && m_isPrerolling )
		{
			// Start audio and video output
			m_deckLinkOutput->EndAudioPreroll();
			m_deckLinkOutput->StartScheduledPlayback( 0, m_timescale, 1.0 );
			m_isPrerolling = false;
		}

		return result;
	}