Example #1
0
bool FFLAVFVideo::SeekTo(int n, int SeekOffset) {
	if (SeekMode >= 0) {
		int TargetFrame = n + SeekOffset;
		if (TargetFrame < 0)
			throw FFMS_Exception(FFMS_ERROR_SEEKING, FFMS_ERROR_UNKNOWN,
			"Frame accurate seeking is not possible in this file");

		if (SeekMode < 3)
			TargetFrame = Frames.FindClosestVideoKeyFrame(TargetFrame);

		if (SeekMode == 0) {
			if (n < CurrentFrame) {
				av_seek_frame(FormatContext, VideoTrack, Frames[0].PTS, AVSEEK_FLAG_BACKWARD);
				FlushBuffers(CodecContext);
				CurrentFrame = 0;
				DelayCounter = 0;
				InitialDecode = 1;
			}
		} else {
			// 10 frames is used as a margin to prevent excessive seeking since the predicted best keyframe isn't always selected by avformat
			if (n < CurrentFrame || TargetFrame > CurrentFrame + 10 || (SeekMode == 3 && n > CurrentFrame + 10)) {
				av_seek_frame(FormatContext, VideoTrack, Frames[TargetFrame].PTS, AVSEEK_FLAG_BACKWARD);
				FlushBuffers(CodecContext);
				DelayCounter = 0;
				InitialDecode = 1;
				return true;
			}
		}
	} else if (n < CurrentFrame) {
		throw FFMS_Exception(FFMS_ERROR_SEEKING, FFMS_ERROR_INVALID_ARGUMENT,
			"Non-linear access attempted");
	}
	return false;
}
Example #2
0
FFMS_Frame *FFMatroskaVideo::GetFrame(int n) {
	GetFrameCheck(n);
	n = Frames.RealFrameNumber(n);

	if (LastFrameNum == n)
		return &LocalFrame;

	bool HasSeeked = false;
	int ClosestKF = Frames.FindClosestVideoKeyFrame(n);
	if (CurrentFrame > n || ClosestKF > CurrentFrame + 10) {
		DelayCounter = 0;
		InitialDecode = 1;
		PacketNumber = ClosestKF;
		CurrentFrame = ClosestKF;
		FlushBuffers(CodecContext);
		HasSeeked = true;
	}

	do {
		if (CurrentFrame + FFMS_CALCULATE_DELAY >= n || HasSeeked)
			CodecContext->skip_frame = AVDISCARD_DEFAULT;
		else
			CodecContext->skip_frame = AVDISCARD_NONREF;
		DecodeNextFrame();
		CurrentFrame++;
		HasSeeked = false;
	} while (CurrentFrame <= n);

	LastFrameNum = n;
	return OutputFrame(DecodeFrame);
}
Example #3
0
void Synchronizer::SynchronizerThread() {
	try {

		Logger::LogInfo("[Synchronizer::SynchronizerThread] Synchronizer thread started.");

		while(!m_should_stop) {

			{
				SharedLock lock(&m_shared_data);
				FlushBuffers(lock.get());
				if(lock->m_sync_diagram != NULL) {
					double time_in = (double) hrt_time_micro() * 1.0e-6;
					double time_out = (double) GetTotalTime(lock.get()) * 1.0e-6;
					lock->m_sync_diagram->SetCurrentTime(0, time_in);
					lock->m_sync_diagram->SetCurrentTime(1, time_in);
					lock->m_sync_diagram->SetCurrentTime(2, time_out);
					lock->m_sync_diagram->SetCurrentTime(3, time_out);
					lock->m_sync_diagram->Update();
				}
			}

			usleep(10000);

		}

		Logger::LogInfo("[Synchronizer::SynchronizerThread] Synchronizer thread stopped.");

	} catch(const std::exception& e) {
		m_error_occurred = true;
		Logger::LogError(QString("[Synchronizer::SynchronizerThread] Exception '") + e.what() + "' in synchronizer thread.");
	} catch(...) {
		m_error_occurred = true;
		Logger::LogError("[Synchronizer::SynchronizerThread] Unknown exception in synchronizer thread.");
	}
}
Example #4
0
void Synchronizer::NewSegment(SharedData* lock) {
	FlushBuffers(lock);
	if(lock->m_segment_video_started && lock->m_segment_audio_started) {
		int64_t segment_start_time, segment_stop_time;
		GetSegmentStartStop(lock, &segment_start_time, &segment_stop_time);
		lock->m_time_offset += std::max((int64_t) 0, segment_stop_time - segment_start_time);
	}
	lock->m_video_buffer.clear();
	lock->m_audio_buffer.Clear();
	InitSegment(lock);
}
Example #5
0
void cTCPLinkImpl::cLinkTlsContext::StoreReceivedData(const char * a_Data, size_t a_NumBytes)
{
	// Hold self alive for the duration of this function
	cLinkTlsContextPtr Self(m_Self);

	m_EncryptedData.append(a_Data, a_NumBytes);

	// Try to finish a pending handshake:
	TryFinishHandshaking();

	// Flush any cleartext data that can be "received":
	FlushBuffers();
}
Example #6
0
void FFMS_AudioSource::SetOutputFormat(FFMS_ResampleOptions const& opt) {
	if (opt.SampleRate != AP.SampleRate)
		throw FFMS_Exception(FFMS_ERROR_RESAMPLING, FFMS_ERROR_UNSUPPORTED,
			"Sample rate changes are currently unsupported.");

#ifndef FFMS_RESAMPLING_ENABLED
	if (opt.SampleFormat != AP.SampleFormat || opt.SampleRate != AP.SampleRate || opt.ChannelLayout != AP.ChannelLayout)
		throw FFMS_Exception(FFMS_ERROR_RESAMPLING, FFMS_ERROR_UNSUPPORTED,
			"FFMS was not built with resampling enabled. The only supported conversion is interleaving planar audio.");
#endif
#ifdef WITH_AVRESAMPLE
	if (opt.SampleFormat != AP.SampleFormat || opt.ChannelLayout != AP.ChannelLayout)
		throw FFMS_Exception(FFMS_ERROR_RESAMPLING, FFMS_ERROR_UNSUPPORTED,
			"FFMS was not built with FFMPEG resampling enabled.");
#endif

	// Cache stores audio in the output format, so clear it and reopen the file
	Cache.clear();
	PacketNumber = 0;
	ReopenFile();
	FlushBuffers(CodecContext);

	BytesPerSample = av_get_bytes_per_sample(static_cast<AVSampleFormat>(opt.SampleFormat)) * av_get_channel_layout_nb_channels(opt.ChannelLayout);
	NeedsResample =
		opt.SampleFormat != (int)CodecContext->sample_fmt ||
		opt.SampleRate != AP.SampleRate ||
		opt.ChannelLayout != AP.ChannelLayout ||
		opt.ForceResample;

#ifdef FFMS_RESAMPLING_ENABLED
	if (!NeedsResample) return;

	FFResampleContext newContext;
	SetOptions(opt, newContext, resample_options);
	av_opt_set_int(newContext, "in_sample_rate", AP.SampleRate, 0);
	av_opt_set_int(newContext, "in_sample_fmt", CodecContext->sample_fmt, 0);
	av_opt_set_int(newContext, "in_channel_layout", AP.ChannelLayout, 0);

	av_opt_set_int(newContext, "out_sample_rate", opt.SampleRate, 0);

#ifdef WITH_SWRESAMPLE
	av_opt_set_channel_layout(newContext, "out_channel_layout", opt.ChannelLayout, 0);
	av_opt_set_sample_fmt(newContext, "out_sample_fmt", (AVSampleFormat)opt.SampleFormat, 0);
#endif

	if (ffms_open(newContext))
		throw FFMS_Exception(FFMS_ERROR_RESAMPLING, FFMS_ERROR_UNKNOWN,
			"Could not open avresample context");
	newContext.swap(ResampleContext);
#endif
}
Example #7
0
FFMS_Frame *FFHaaliVideo::GetFrame(int n) {
	GetFrameCheck(n);
	n = Frames.RealFrameNumber(n);

	if (LastFrameNum == n)
		return &LocalFrame;

	bool HasSeeked = false;
	int SeekOffset = 0;

	if (n < CurrentFrame || Frames.FindClosestVideoKeyFrame(n) > CurrentFrame + 10) {
ReSeek:
		pMMC->Seek(Frames[n + SeekOffset].PTS, MMSF_PREV_KF);
		FlushBuffers(CodecContext);
		DelayCounter = 0;
		InitialDecode = 1;
		HasSeeked = true;
	}

	do {
		int64_t StartTime = -1;
		if (CurrentFrame + FFMS_CALCULATE_DELAY >= n || HasSeeked)
			CodecContext->skip_frame = AVDISCARD_DEFAULT;
		else
			CodecContext->skip_frame = AVDISCARD_NONREF;
		DecodeNextFrame(&StartTime);

		if (HasSeeked) {
			HasSeeked = false;

			if (StartTime < 0 || (CurrentFrame = Frames.FrameFromPTS(StartTime)) < 0) {
				// No idea where we are so go back a bit further
				if (n + SeekOffset == 0)
					throw FFMS_Exception(FFMS_ERROR_SEEKING, FFMS_ERROR_UNKNOWN,
						"Frame accurate seeking is not possible in this file");

				SeekOffset -= FFMIN(20, n + SeekOffset);
				goto ReSeek;
			}
		}

		CurrentFrame++;
	} while (CurrentFrame <= n);

	LastFrameNum = n;
	return OutputFrame(DecodeFrame);
}
Example #8
0
void cLuaTCPLink::cLinkSslContext::Send(const AString & a_Data)
{
	// Hold self alive for the duration of this function
	cLinkSslContextPtr Self(m_Self);

	// If the handshake hasn't completed yet, queue the data:
	if (!HasHandshaken())
	{
		m_CleartextData.append(a_Data);
		TryFinishHandshaking();
		return;
	}

	// The connection is all set up, write the cleartext data into the SSL context:
	WritePlain(a_Data.data(), a_Data.size());
	FlushBuffers();
}
Example #9
0
void cTCPLinkImpl::cLinkTlsContext::Send(const void * a_Data, size_t a_Length)
{
	// Hold self alive for the duration of this function
	cLinkTlsContextPtr Self(m_Self);

	// If the handshake hasn't completed yet, queue the data:
	if (!HasHandshaken())
	{
		m_CleartextData.append(reinterpret_cast<const char *>(a_Data), a_Length);
		TryFinishHandshaking();
		return;
	}

	// The connection is all set up, write the cleartext data into the SSL context:
	WritePlain(a_Data, a_Length);
	FlushBuffers();
}
Example #10
0
DWORD CFileClass::Write(void *pData, int size)
{
    DWORD dwWrite = 0;
    DWORD dwError = 0;
    time_t current_time;
    time(&current_time);

    if (m_hFile)
    {
        if (!WriteFile(m_hFile, pData, size, &dwWrite, NULL))
            dwError = GetLastError();
        else
        {
            if (current_time - m_last_time > m_flash_delay)
            {
                FlushBuffers();
                m_last_time = current_time;
            }
        }
    }

    return dwWrite;
}
Example #11
0
Synchronizer::~Synchronizer() {

	// disconnect
	ConnectVideoSource(NULL);
	ConnectAudioSource(NULL);

	// tell the thread to stop
	if(m_thread.joinable()) {
		Logger::LogInfo("[Synchronizer::~Synchronizer] Telling synchronizer thread to stop ...");
		m_should_stop = true;
		m_thread.join();
	}

	// flush one more time
	{
		SharedLock lock(&m_shared_data);
		FlushBuffers(lock.get());
	}

	// free everything
	Free();

}
Example #12
0
void cLuaTCPLink::OnRemoteClosed(void)
{
	// Check if we're still valid:
	if (!m_Callbacks.IsValid())
	{
		return;
	}

	// If running in SSL mode and there's data left in the SSL contect, report it:
	auto sslContext = m_SslContext;
	if (sslContext != nullptr)
	{
		sslContext->FlushBuffers();
	}

	// Call the callback:
	cPluginLua::cOperation Op(m_Plugin);
	if (!Op().Call(cLuaState::cTableRef(m_Callbacks, "OnRemoteClosed"), this))
	{
		LOGINFO("cTCPLink OnRemoteClosed() callback failed in plugin %s.", m_Plugin.GetName().c_str());
	}

	Terminated();
}
Example #13
0
void EMBeOutputNode::HandleEvent(const media_timed_event* event, bigtime_t lateness, bool realTimeEvent)
{
	switch(event -> type)
	{
		case BTimedEventQueue::B_START:
		{
			if(RunState() != B_STARTED)
			{
//				mOutputEnabled = true;
				m_vNumberOfReceivedFrames = 0;
				m_vStartingTime = TimeSource() -> Now(); //event -> event_time;
				vCount = 0;
				SendDataStatus(B_DATA_AVAILABLE, m_sOutput.destination, event -> event_time);
			}
			break;
		}
		case BTimedEventQueue::B_STOP:
		{
			FlushBuffers(EventQueue());
			SendDataStatus(B_PRODUCER_STOPPED, m_sOutput.destination, event -> event_time);
			break;
		}
		case EM_TIMED_EVENT_RESET:
		{
			FlushBuffers(EventQueue());
			m_vNumberOfReceivedFrames = 0;
//			m_vStartingTime = TimeSource() -> Now(); //event -> event_time;
			m_vStartingTime = event -> event_time;
			vCount = 0;		
		}
		case BTimedEventQueue::B_HANDLE_BUFFER:
		{
			BBuffer* opBuffer = (BBuffer*) (event -> pointer);
			if(opBuffer == NULL)
			{
				emerr << "ERROR! Received NULL BBuffer in HandleEvent!" << endl;
				break;
			}
			
			if((RunState() == BMediaEventLooper::B_STARTED) && 
				(m_sOutput.destination != media_destination::null))
			{
				int64 vFrames = EMBeMediaUtility::FramesInBuffer(opBuffer, GetConnectedEMMediaFormat(), EM_TYPE_RAW_AUDIO);
				if(mOutputEnabled) 
				{
					opBuffer -> Header() -> start_time = event -> event_time;
					opBuffer -> Header() -> type = m_sOutput.format.type;
					
					status_t err = SendBuffer(opBuffer, m_sOutput.destination);
					if(err != B_OK)
					{
						emerr << "ERROR! Could not send buffer to downstream node: " << strerror(err) << endl;
						opBuffer -> Recycle();
					}
					else
					{
						bool m_vIsCurrentlyRecording = (EMMediaEngine::Instance() -> GetMediaProject() -> IsArmed() && EMMediaEngine::Instance() -> GetMediaProject() -> IsPlaying());
//						bool m_vIsCurrentlyPlaying = (! EMMediaEngine::Instance() -> GetMediaProject() -> IsArmed() && EMMediaEngine::Instance() -> GetMediaProject() -> IsPlaying());
						if(m_vIsClockMaster && ! m_vIsCurrentlyRecording)
							EMMediaTimer::Instance() -> IncreaseNowFrame(vFrames);
					}
				}
				else
					opBuffer -> Recycle();
			}
			else
				opBuffer -> Recycle();
			break;
		}
		case EM_TIMED_EVENT_FLUSH_CASH:
		{
			FlushBuffers(EventQueue());
			m_vStartingTime = event -> event_time;
			m_vNumberOfReceivedFrames = 0;
			Notify(EM_TIMED_EVENT_FLUSH_CASH);			
			break;
		}
		default:
		{	
			;//emout_commented_out_4_release << "ERROR! EMBeOutoutNode received unknown event!" << endl;
			break;
		}
	}
}
Example #14
0
void cTCPLinkImpl::EventCallback(bufferevent * a_BufferEvent, short a_What, void * a_Self)
{
	ASSERT(a_Self != nullptr);
	cTCPLinkImplPtr Self = static_cast<cTCPLinkImpl *>(a_Self)->m_Self;
	if (Self == nullptr)
	{
		// The link has already been freed
		return;
	}

	// If an error is reported, call the error callback:
	if (a_What & BEV_EVENT_ERROR)
	{
		// Choose the proper callback to call based on whether we were waiting for connection or not:
		int err = EVUTIL_SOCKET_ERROR();
		if (Self->m_ConnectCallbacks != nullptr)
		{
			if (err == 0)
			{
				// This could be a DNS failure
				err = bufferevent_socket_get_dns_error(a_BufferEvent);
			}
			Self->m_ConnectCallbacks->OnError(err, evutil_socket_error_to_string(err));
		}
		else
		{
			Self->m_Callbacks->OnError(err, evutil_socket_error_to_string(err));
			if (Self->m_Server == nullptr)
			{
				cNetworkSingleton::Get().RemoveLink(Self.get());
			}
			else
			{
				Self->m_Server->RemoveLink(Self.get());
			}
		}
		Self->m_Self.reset();
		return;
	}

	// Pending connection succeeded, call the connection callback:
	if (a_What & BEV_EVENT_CONNECTED)
	{
		Self->UpdateLocalAddress();
		Self->UpdateRemoteAddress();
		if (Self->m_ConnectCallbacks != nullptr)
		{
			Self->m_ConnectCallbacks->OnConnected(*Self);
			// Reset the connect callbacks so that later errors get reported through the link callbacks:
			Self->m_ConnectCallbacks.reset();
			return;
		}
	}

	// If the connection has been closed, call the link callback and remove the connection:
	if (a_What & BEV_EVENT_EOF)
	{
		// If running in TLS mode and there's data left in the TLS contect, report it:
		auto tlsContext = Self->m_TlsContext;
		if (tlsContext != nullptr)
		{
			tlsContext->FlushBuffers();
		}

		Self->m_Callbacks->OnRemoteClosed();
		if (Self->m_Server != nullptr)
		{
			Self->m_Server->RemoveLink(Self.get());
		}
		else
		{
			cNetworkSingleton::Get().RemoveLink(Self.get());
		}
		Self->m_Self.reset();
		return;
	}

	// Unknown event, report it:
	LOGWARNING("cTCPLinkImpl: Unhandled LibEvent event %d (0x%x)", a_What, a_What);
	ASSERT(!"cTCPLinkImpl: Unhandled LibEvent event");
}
//刷新显示线程
void *RenderAudio(void *arg)
{
	if(!arg)
		return NULL;

	AvManager *manager = (AvManager*)arg;
	MediaPacket spkt;
	long avsize = 0;
	int ret = 0;

	//只有在没有停止信号下,才能不断地读取数据
	for(;manager->avState != STOP;)
	{
		//如果是暂停且没有快进时,才可以不断等待
		if(manager->avState == PAUSE && !manager->avSeek)
		{
			usSleep(DEFAULT_AV_WAIT);
			continue;
		}

		ret = RecvDataFromAvQueue(manager->MediaAudio,&spkt);

		if(ret < 0)
		{
			usSleep(DEFAULT_AV_WAIT);
			continue;			
		}

		if(manager->avSeek && !IsHardWareAccel())
		{
			FlushBuffers(AUDIO);
			DestroyPacket(&spkt);
			continue;
		}

		if(manager->audioDelay)
		{
			usSleep(manager->audioDelay);
			manager->audioDelay = 0;
		}

		if(spkt.data && spkt.size > 0)
		{
			void *abuf = DecodeAudioStream(&spkt,&avsize);

			//播放音乐
			if(abuf && avsize > 0)
			{
				PlayWave(abuf,avsize);

				//声音时间戳部分
				manager->audioClock = (double)avsize/(double)manager->audioBasePara;

				if(spkt.pts != AV_NOPTS_VALUE)
					manager->playerClock = AVTimeToSecond(spkt.pts,manager->audioTimeBase) + manager->audioClock;
				else
					manager->playerClock += manager->audioClock;

				manager->last_audio_pts = manager->playerClock;

			}
		}
		else
			FreeAlloc(spkt.data);		
	}

	return NULL;
}
//刷新视频解码线程
void *RenderVideo(void *arg)
{
	if(!arg)
		return NULL;

	AvManager *manager = (AvManager*)arg;
	MediaPacket spkt;
	int size = 0,ret = 0;
	long avsize = 0;

	//只有在没有停止信号下,才能不断地读取数据
	for(;manager->avState != STOP;)
	{
		if(ParseQueueBufferLength(manager->MediaPicture) > CACHE_PICTURE_BUFFER_NUM)
		{
			usSleep(DEFAULT_AV_WAIT);
			continue;
		}

		if(manager->avState == PAUSE && !manager->avSeek)
		{
			usSleep(DEFAULT_AV_WAIT);
			continue;
		}

		memset(&spkt,0,sizeof(MediaPacket));

		ret = RecvDataFromAvQueue(manager->MediaVideo,&spkt);

		if(ret < 0)
		{
			usSleep(DEFAULT_AV_WAIT);
			continue;			
		}

		if(manager->avSeek && !IsHardWareAccel())
		{
			FlushBuffers(VIDEO);
		}

		if(spkt.data && spkt.size > 0)
		{
			void *vbuf = DecodeVideoYUVStream(&spkt,&avsize);

			if(vbuf && avsize > 0)
			{
				vbuf = StartScale(vbuf,avsize,&size,VideoOutPyaddr(),NULL);

				if(vbuf && size > 0)
				{
					MediaPacket mpkt;
					memset(&mpkt,0,sizeof(MediaPacket));
			
					mpkt.data = UseAlloc(size);

					if(!mpkt.data)
						continue;

					memcpy(mpkt.data,vbuf,size);
					mpkt.size = size;

					//时间戳部分

					double v_pts = 0;

					if(spkt.pts != AV_NOPTS_VALUE)
						v_pts = AVTimeToSecond(spkt.pts,manager->videoTimeBase);
					else if(spkt.dts != AV_NOPTS_VALUE)
						v_pts = AVTimeToSecond(spkt.dts,manager->videoTimeBase);
					else 
						v_pts = 0;

					double pts = SynVideo(manager,v_pts);

					mpkt.pts = AVTimeToPts(pts,manager->videoTimeBase);

					PutDataToTail(manager->MediaPicture,&mpkt);
				}
			}
		}
		else
			FreeAlloc(spkt.data);

		if(!manager->playAudio)
			manager->playerClock = manager->videoClock;
	}

	return NULL;
}
Example #17
0
void EnhancedVtoRouter::CloseAndFlushBuffers()
{
	FlushBuffers();
	this->Close();
}