示例#1
0
void Synchronizer::Init() {

	// initialize video
	if(m_output_format->m_video_enabled) {
		m_max_frames_skipped = (m_output_settings->video_allow_frame_skipping)? (MAX_FRAME_DELAY * m_output_format->m_video_frame_rate + 500000) / 1000000 : 0;
		VideoLock videolock(&m_video_data);
		videolock->m_last_timestamp = std::numeric_limits<int64_t>::min();
		videolock->m_next_timestamp = SINK_TIMESTAMP_ASAP;
	}

	// initialize audio
	if(m_output_format->m_audio_enabled) {
		AudioLock audiolock(&m_audio_data);
		audiolock->m_fast_resampler.reset(new FastResampler(m_output_format->m_audio_channels, 0.9f));
		InitAudioSegment(audiolock.get());
		audiolock->m_warn_desync = true;
	}

	// create sync diagram
	if(g_option_syncdiagram) {
		m_sync_diagram.reset(new SyncDiagram(4));
		m_sync_diagram->SetChannelName(0, SyncDiagram::tr("Video in"));
		m_sync_diagram->SetChannelName(1, SyncDiagram::tr("Audio in"));
		m_sync_diagram->SetChannelName(2, SyncDiagram::tr("Video out"));
		m_sync_diagram->SetChannelName(3, SyncDiagram::tr("Audio out"));
		m_sync_diagram->show();
	}

	// initialize shared data
	{
		SharedLock lock(&m_shared_data);

		if(m_output_format->m_audio_enabled) {
			lock->m_partial_audio_frame.Alloc(m_output_format->m_audio_frame_size * m_output_format->m_audio_channels);
			lock->m_partial_audio_frame_samples = 0;
		}
		lock->m_video_pts = 0;
		lock->m_audio_samples = 0;
		lock->m_time_offset = 0;

		InitSegment(lock.get());

		lock->m_warn_drop_video = true;

	}

	// start synchronizer thread
	m_should_stop = false;
	m_error_occurred = false;
	m_thread = std::thread(&Synchronizer::SynchronizerThread, this);

}
示例#2
0
void Synchronizer::ReadVideoFrame(unsigned int width, unsigned int height, const uint8_t* data, int stride, AVPixelFormat format, int64_t timestamp) {
	assert(m_output_format->m_video_enabled);

	// add new block to sync diagram
	if(m_sync_diagram != NULL)
		m_sync_diagram->AddBlock(0, (double) timestamp * 1.0e-6, (double) timestamp * 1.0e-6 + 1.0 / (double) m_output_format->m_video_frame_rate, QColor(255, 0, 0));

	VideoLock videolock(&m_video_data);

	// check the timestamp
	if(timestamp < videolock->m_last_timestamp) {
		if(timestamp < videolock->m_last_timestamp - 10000)
			Logger::LogWarning("[Synchronizer::ReadVideoFrame] " + Logger::tr("Warning: Received video frame with non-monotonic timestamp."));
		timestamp = videolock->m_last_timestamp;
	}

	// drop the frame if it is too early (before converting it)
	if(videolock->m_next_timestamp != SINK_TIMESTAMP_ASAP && timestamp < videolock->m_next_timestamp - (int64_t) (1000000 / m_output_format->m_video_frame_rate))
		return;

	// update the timestamps
	videolock->m_last_timestamp = timestamp;
	videolock->m_next_timestamp = std::max(videolock->m_next_timestamp + (int64_t) (1000000 / m_output_format->m_video_frame_rate), timestamp);

	// create the converted frame
	std::unique_ptr<AVFrameWrapper> converted_frame = CreateVideoFrame(m_output_format->m_video_width, m_output_format->m_video_height, m_output_format->m_video_pixel_format, NULL);

	// scale and convert the frame to the right format
	videolock->m_fast_scaler.Scale(width, height, format, &data, &stride,
			m_output_format->m_video_width, m_output_format->m_video_height, m_output_format->m_video_pixel_format,
			converted_frame->GetFrame()->data, converted_frame->GetFrame()->linesize);

	SharedLock lock(&m_shared_data);

	// avoid memory problems by limiting the video buffer size
	if(lock->m_video_buffer.size() >= MAX_VIDEO_FRAMES_BUFFERED) {
		if(lock->m_segment_audio_started) {
			if(lock->m_warn_drop_video) {
				lock->m_warn_drop_video = false;
				Logger::LogWarning("[Synchronizer::ReadVideoFrame] " + Logger::tr("Warning: Video buffer overflow, some frames will be lost. The audio input seems to be too slow."));
			}
			return;
		} else {
			// if the audio hasn't started yet, it makes more sense to drop the oldest frames
			lock->m_video_buffer.pop_front();
			assert(lock->m_video_buffer.size() > 0);
			lock->m_segment_video_start_time = lock->m_video_buffer.front()->GetFrame()->pts;
		}
	}

	// start video
	if(!lock->m_segment_video_started) {
		lock->m_segment_video_started = true;
		lock->m_segment_video_start_time = timestamp;
		lock->m_segment_video_stop_time = timestamp;
	}

	// store the frame
	converted_frame->GetFrame()->pts = timestamp;
	lock->m_video_buffer.push_back(std::move(converted_frame));

	// increase the segment stop time
	lock->m_segment_video_stop_time = timestamp + (int64_t) (1000000 / m_output_format->m_video_frame_rate);

}
示例#3
0
int64_t Synchronizer::GetNextVideoTimestamp() {
	assert(m_output_format->m_video_enabled);
	VideoLock videolock(&m_video_data);
	return videolock->m_next_timestamp;
}
示例#4
0
int64_t Synchronizer::GetNextVideoTimestamp() {
	assert(m_video_encoder != NULL);
	VideoLock videolock(&m_video_data);
	return videolock->m_next_timestamp;
}
示例#5
0
void Synchronizer::Init() {

	// initialize video
	if(m_video_encoder != NULL) {
		m_video_width = m_video_encoder->GetWidth();
		m_video_height = m_video_encoder->GetHeight();
		m_video_frame_rate = m_video_encoder->GetFrameRate();
		m_video_max_frames_skipped = (m_allow_frame_skipping)? (MAX_FRAME_DELAY * m_video_frame_rate + 500000) / 1000000 : 0;
	}

	// initialize audio
	if(m_audio_encoder != NULL) {
		m_audio_sample_rate = m_audio_encoder->GetSampleRate();
		m_audio_channels = 2; //TODO// never larger than AV_NUM_DATA_POINTERS
		m_audio_required_frame_samples = m_audio_encoder->GetRequiredFrameSamples();
		m_audio_required_sample_format = m_audio_encoder->GetRequiredSampleFormat();
		switch(m_audio_required_sample_format) {
			case AV_SAMPLE_FMT_S16:
#if SSR_USE_AVUTIL_PLANAR_SAMPLE_FMT
			case AV_SAMPLE_FMT_S16P:
#endif
				m_audio_required_sample_size = m_audio_channels * 2; break;
			case AV_SAMPLE_FMT_FLT:
#if SSR_USE_AVUTIL_PLANAR_SAMPLE_FMT
			case AV_SAMPLE_FMT_FLTP:
#endif
				m_audio_required_sample_size = m_audio_channels * 4; break;
			default: assert(false); break;
		}
	}

	// create sync diagram
	if(g_option_syncdiagram) {
		m_sync_diagram.reset(new SyncDiagram(4));
		m_sync_diagram->SetChannelName(0, SyncDiagram::tr("Video in"));
		m_sync_diagram->SetChannelName(1, SyncDiagram::tr("Audio in"));
		m_sync_diagram->SetChannelName(2, SyncDiagram::tr("Video out"));
		m_sync_diagram->SetChannelName(3, SyncDiagram::tr("Audio out"));
		m_sync_diagram->show();
	}

	// initialize video data
	{
		VideoLock videolock(&m_video_data);
		videolock->m_last_timestamp = std::numeric_limits<int64_t>::min();
		videolock->m_next_timestamp = SINK_TIMESTAMP_ASAP;
	}

	// initialize audio data
	{
		AudioLock audiolock(&m_audio_data);
		audiolock->m_fast_resampler.reset(new FastResampler(m_audio_channels, 0.9f));
		InitAudioSegment(audiolock.get());
		audiolock->m_warn_desync = true;
	}

	// initialize shared data
	{
		SharedLock lock(&m_shared_data);

		if(m_audio_encoder != NULL) {
			lock->m_partial_audio_frame.Alloc(m_audio_required_frame_samples * m_audio_channels);
			lock->m_partial_audio_frame_samples = 0;
		}
		lock->m_video_pts = 0;
		lock->m_audio_samples = 0;
		lock->m_time_offset = 0;

		InitSegment(lock.get());

		lock->m_warn_drop_video = true;

	}

	// start synchronizer thread
	m_should_stop = false;
	m_error_occurred = false;
	m_thread = std::thread(&Synchronizer::SynchronizerThread, this);

}