HRESULT CScreenCaptureSourcePin::OnThreadCreate(void)
{
	CAutoLock cAutoLockShared(&m_cSharedState);

	FTLASSERT(NULL == m_pScreenCaptureImpl);
	m_pScreenCaptureImpl = new CGdiScreenCaptureImpl();
	if (!m_pScreenCaptureImpl)
	{
		return E_OUTOFMEMORY;
	}
	m_pScreenCaptureImpl->SetMouseOverLay(m_bMouseOverlay);

    //m_rtSampleTime = 0;
	//m_iRepeatTime = m_rtFrameLength / 10000;  //change from 100ns to ms
	m_nFrameNumber = 0;
    //// we need to also reset the repeat time in case the system
    //// clock is turned off after m_iRepeatTime gets very big
    //m_iRepeatTime = m_iDefaultRepeatTime;
	//ATLTRACE(TEXT("CScreenCaptureSourcePin::OnThreadCreate, m_iRepeatTime=%d\n"), m_iRepeatTime);

	//m_pThreadBlockElapse = new FTL::CFBlockElapse(TEXT(__FILE__), __LINE__, TEXT(__FUNCDNAME__), FTL::_ReturnAddress());

	//HRESULT hr = E_FAIL;
	//hr = __super::OnThreadCreate();
	//return hr;

    return S_OK;
}
Пример #2
0
HRESULT	audioSource::FillBuffer(IMediaSample *pMediaSample)
{	
	_RPT2(_CRT_WARN,"FillBuffer %d %d\n",currentFrame,nr);

	long lDataLen = pMediaSample->GetSize();;
	{
		CAutoLock cAutoLockShared(&m_cSharedState);
		
		if (currentFrame >= nr || times[currentFrame]*10000000 > m_rtStop)
		{
			_RPT0(_CRT_WARN,"a stopping\n");
			done=1;
			
			if (stopGraph) return S_FALSE;
			else {
				pMediaSample->SetActualDataLength(0);
				REFERENCE_TIME rtStart,	rtStop;
				
				rtStart	= times[currentFrame-1]*10000000;
				rtStop = m_rtStop;
				pMediaSample->SetTime(&rtStart,	&rtStop);
			
				_RPT0(_CRT_WARN,"a Sleeping \n");
				Sleep(1000);
				return NOERROR;
			}
		}
		
		double* dData = (double*)frames[currentFrame];

		if (subtype == MEDIASUBTYPE_PCM)
		{
			short *pData;
			pMediaSample->GetPointer((BYTE**)&pData);
			
			for (int i=lens[currentFrame]*nrChannels-1;i>=0;i--) pData[i] = min((1<<15)-1,dData[i]*(1<<15));
		} else { // FLOAT format
			float *pData;
			pMediaSample->GetPointer((BYTE**)&pData);

			for (int i=lens[currentFrame]*nrChannels-1;i>=0;i--) pData[i] = dData[i];
		}

		REFERENCE_TIME rtStart,	rtStop;
		
		rtStart	= times[currentFrame]*10000000;
		rtStop	= rtStart + lens[currentFrame]/(rate*10000000.0);

		pMediaSample->SetActualDataLength(lens[currentFrame]*wordSize);
		_RPT4(_CRT_WARN,"a SetTime %d %d   %d %d\n",(int)(rtStart>>32),(int)rtStart,(int)(rtStop>>32),(int)rtStop);
		pMediaSample->SetTime(&rtStart,	&rtStop);
	
		currentFrame++;
	}

	pMediaSample->SetSyncPoint(TRUE);

	return NOERROR;
}
Пример #3
0
HRESULT CFLICStream::OnThreadCreate()
{
    CAutoLock cAutoLockShared(&m_cSharedState);
    m_rtSampleTime = 0;
    m_rtPosition = m_rtStart;

    return CSourceStream::OnThreadCreate();
}
//------------------------------------------------------------------------------
// FillBuffer
// This is where we set the timestamps for the samples.
// FillBuffer is called once for every sample in the stream.
HRESULT CDecklinkAudioSourcePin::FillBuffer(IMediaSample *pSample)
{
    CheckPointer(pSample, E_POINTER);

    CAutoLock cAutoLockShared(&m_cSharedState);

    // Check that we're still using audio
    ASSERT(m_mt.formattype == FORMAT_WaveFormatEx);

	WAVEFORMATEX* pwfex = (WAVEFORMATEX*)m_mt.pbFormat;
    if (WAVE_FORMAT_PCM == pwfex->wFormatTag)
    {
		unsigned long ulSampleCount = pSample->GetActualDataLength() * 8 / pwfex->wBitsPerSample / pwfex->nChannels;

		// Set the sample timestamp.  Pretty bloody important.
		// DShow is governed by sample timestamps.  As this push source
		// will deliver at a variable rate, determined by the rate at
		// which the external app can deliver frames, use the current stream time
		// for the sample timestamp of the FIRST sample.  Every subsequent sample
		// is timestamped at intervals of the number of audio samples written.

		CRefTime rtStream;
		m_pFilter->StreamTime(rtStream);

		// timestamp the first sample with the current stream time.
		if (0 == m_iFrameNumber)
		{
			m_rtNextValidStart = rtStream;
		}
	    REFERENCE_TIME rtStart = m_rtNextValidStart;
	    REFERENCE_TIME rtStop  = rtStart + (UNITS * ulSampleCount / pwfex->nSamplesPerSec);
	    m_rtNextValidStart = rtStop;

		pSample->SetTime(&rtStart, &rtStop);
		static long lastTime = 0;
		long time = timeGetTime();
		DbgLog((LOG_TRACE, DBG_AUD, TEXT("AUD_PUSH: %I64d [%I64d  %I64d] %ld  %I1064d"), rtStream.m_time, rtStart, rtStop, time - lastTime, m_rtMediaTime));
		lastTime = time;

		// set sample media times
		rtStart = m_rtMediaTime;
		rtStop = rtStart + ulSampleCount;
		pSample->SetMediaTime(&rtStart, &rtStop);
		m_rtMediaTime = rtStop;

	    m_iFrameNumber++;

		// Set TRUE on every sample for uncompressed frames
		pSample->SetSyncPoint(TRUE);
	}

    return S_OK;
}
Пример #5
0
// This is where we insert the DIB bits into the video stream.
// FillBuffer is called once for every sample in the stream.
HRESULT CVCamPin::FillBuffer(IMediaSample *pSample)
{
	BYTE *pData;
    long cbData;

    CheckPointer(pSample, E_POINTER);

    CAutoLock cAutoLockShared(&m_cSharedState);

    // Access the sample's data buffer
    pSample->GetPointer(&pData);
    cbData = pSample->GetSize();

    // Check that we're still using video
    ASSERT(m_mt.formattype == FORMAT_VideoInfo);

    VIDEOINFOHEADER *pVih = (VIDEOINFOHEADER*)m_mt.pbFormat;

	// Copy the DIB bits over into our filter's output buffer.
    // Since sample size may be larger than the image size, bound the copy size.
    int nSize = min(pVih->bmiHeader.biSizeImage, (DWORD) cbData);
    //HDIB hDib = CopyScreenToBitmap(&m_rScreen, pData, (BITMAPINFO *)&(pVih->bmiHeader), m_hCursor);

// 	if (!FillScreenData_Fast(&m_rScreen, pData, (BITMAPINFO *)&(pVih->bmiHeader)))
// 	{
// 		OutputDebugStringA("VCam FillBuffer fail!");
// 		return E_FAIL;
// 	}

	// Set the timestamps that will govern playback frame rate.
	// If this file is getting written out as an AVI,
	// then you'll also need to configure the AVI Mux filter to 
	// set the Average Time Per Frame for the AVI Header.
    // The current time is the sample's start.
    REFERENCE_TIME rtStart = m_iFrameNumber * m_rtFrameLength;
    REFERENCE_TIME rtStop  = rtStart + m_rtFrameLength;

    pSample->SetTime(&rtStart, &rtStop);
    m_iFrameNumber++;

	// Set TRUE on every sample for uncompressed frames
    pSample->SetSyncPoint(TRUE);

	if(!FillScreenData_Fast(&m_rScreen, pData, (BITMAPINFO *)&(pVih->bmiHeader)))
	{
		OutputDebugStringA("VCam FillBuffer fail!");
	}

    return S_OK;
}
Пример #6
0
// This is where we insert the DIB bits into the video stream.
// FillBuffer is called once for every sample in the stream.
HRESULT FCapturePin::FillBuffer(IMediaSample *pSample)
{
	uint8 *pData;
	long cbData;

	CheckPointer(pSample, E_POINTER);

	CAutoLock cAutoLockShared(&SharedState);

	// Access the sample's data buffer
	pSample->GetPointer(&pData);
	cbData = pSample->GetSize();

	// Check that we're still using video
	check(m_mt.formattype == FORMAT_VideoInfo);

	VIDEOINFOHEADER *pVih = (VIDEOINFOHEADER*)m_mt.pbFormat;

	if (pData)
	{
		uint32 sizeInBytes = FAVIWriter::GetInstance()->GetWidth() * FAVIWriter::GetInstance()->GetHeight() * sizeof(FColor);
		const TArray<FColor>& Buffer = FAVIWriter::GetInstance()->GetColorBuffer();
		uint32 smallest = FMath::Min((uint32)pVih->bmiHeader.biSizeImage, (uint32)cbData);
		// Copy the DIB bits over into our filter's output buffer.
		// Since sample size may be larger than the image size, bound the copy size
		FMemory::Memcpy(pData, Buffer.GetData(), FMath::Min(smallest, sizeInBytes));
	}

	// Set the timestamps that will govern playback frame rate.
	// set the Average Time Per Frame for the AVI Header.
	// The current time is the sample's start.

	int32 FrameNumber = FAVIWriter::GetInstance()->GetFrameNumber();
	UE_LOG(LogMovieCapture, Log, TEXT(" FillBuffer: FrameNumber = %d  FramesWritten = %d"), FrameNumber, FramesWritten);

	REFERENCE_TIME Start = FramesWritten * FrameLength;
	REFERENCE_TIME Stop  = Start + FrameLength;
	FramesWritten++;

	UE_LOG(LogMovieCapture, Log, TEXT(" FillBuffer: (%d, %d)"), Start, Stop);
	UE_LOG(LogMovieCapture, Log, TEXT("-----------------END------------------"));

	pSample->SetTime(&Start, &Stop);

	// Set true on every sample for uncompressed frames
	pSample->SetSyncPoint(true);

	return S_OK;
}
Пример #7
0
// This is where we insert the DIB bits into the video stream.
// FillBuffer is called once for every sample in the stream.
HRESULT CPushPinBitmapSet::FillBuffer(IMediaSample *pSample)
{
    BYTE *pData;
    long cbData;

    // If the bitmap files were not loaded, just fail here.
    if (!m_bFilesLoaded)
        return E_FAIL;

    CheckPointer(pSample, E_POINTER);
    CAutoLock cAutoLockShared(&m_cSharedState);

    // Access the sample's data buffer
    pSample->GetPointer(&pData);
    cbData = pSample->GetSize();

    // Check that we're still using video
    ASSERT(m_mt.formattype == FORMAT_VideoInfo);

    VIDEOINFOHEADER *pVih = (VIDEOINFOHEADER*)m_mt.pbFormat;

    // Copy the DIB bits over into our filter's output buffer.
    // Since sample size may be larger than the image size, bound the copy size.
    // Remember that the new data has the same format that we specified in GetMediaType.
    memcpy(pData, m_pImage[m_iCurrentBitmap], min(pVih->bmiHeader.biSizeImage, (DWORD) cbData));

    // Set the timestamps that will govern playback frame rate.
    // If this file is getting written out as an AVI,
    // then you'll also need to configure the AVI Mux filter to 
    // set the Average Time Per Frame for the AVI Header.
    // The current time is the sample's start
    REFERENCE_TIME rtStart = m_iFrameNumber * m_rtFrameLength;
    REFERENCE_TIME rtStop  = rtStart + m_rtFrameLength;

    pSample->SetTime(&rtStart, &rtStop);
    m_iFrameNumber++;

    // Set TRUE on every sample for uncompressed frames
    pSample->SetSyncPoint(TRUE);

    // Increment the current buffer so that the next FillBuffer() call 
    // will use the bits from the next bitmap in the set.
    m_iCurrentBitmap++;
    m_iCurrentBitmap %= NUM_FILES;

    return S_OK;
}
Пример #8
0
//------------------------------------------------------------------------------
// FillBuffer
// This is where we set the timestamps for the samples.
// FillBuffer is called once for every sample in the stream.
HRESULT CVideoSourcePin::FillBuffer(IMediaSample *pSample)
{
    CheckPointer(pSample, E_POINTER);

    CAutoLock cAutoLockShared(&m_cSharedState);

    // Check that we're still using video
    ASSERT((m_mt.formattype == FORMAT_VideoInfo) || (m_mt.formattype == FORMAT_VideoInfo2));

	// Set the sample timestamp.  Pretty bloody important.
	// DShow is governed by sample timestamps.  As this push source
	// will deliver at a variable rate, determined by the rate at
	// which the external app can deliver frames, use the current stream time
	// for the sample timestamp of the FIRST sample.  Every subsequent sample
	// is timestamped at intervals of m_rtFrameLength.

	CRefTime rtStream;
    m_pFilter->StreamTime(rtStream);

	// timestamp the first sample with the current stream time.
	if (0 == m_iFrameNumber)
	{
		m_rtNextValidStart = rtStream;
	}
    REFERENCE_TIME rtStart = m_rtNextValidStart;
    REFERENCE_TIME rtStop  = rtStart + m_rtFrameLength;
		m_rtNextValidStart = rtStop;

    pSample->SetTime(&rtStart, &rtStop);
	static long lastTime = 0;
	long time = timeGetTime();
	DbgLog((LOG_TRACE, DBG_VID, TEXT("VID_PUSH: %I64d [%I64d  %I64d] %ld"), rtStream.m_time, rtStart, rtStop, time - lastTime));
	lastTime = time;

	// set sample media times
	rtStart = m_iFrameNumber;
	rtStop = rtStart + 1;
	pSample->SetMediaTime(&rtStart, &rtStop);

    m_iFrameNumber++;

    // Set TRUE on every sample for uncompressed frames
    pSample->SetSyncPoint(TRUE);

    return S_OK;
}
Пример #9
0
HRESULT CBaseStream::FillBuffer(IMediaSample* pSample)
{
    {
        HRESULT hr;
        CAutoLock cAutoLockShared(&m_cSharedState);

        if (m_rtPosition >= m_rtStop) {
            return S_FALSE;
        }

        BYTE* pOut = nullptr;
        if (FAILED(hr = pSample->GetPointer(&pOut)) || !pOut) {
            return S_FALSE;
        }

        int nFrame = (int)(m_rtPosition / m_AvgTimePerFrame);

        long len = pSample->GetSize();

        hr = FillBuffer(pSample, nFrame, pOut, len);
        if (hr != S_OK) {
            return hr;
        }

        pSample->SetActualDataLength(len);

        REFERENCE_TIME rtStart, rtStop;
        // The sample times are modified by the current rate.
        rtStart = static_cast<REFERENCE_TIME>(m_rtSampleTime / m_dRateSeeking);
        rtStop  = rtStart + static_cast<int>(m_AvgTimePerFrame / m_dRateSeeking);
        pSample->SetTime(&rtStart, &rtStop);

        m_rtSampleTime += m_AvgTimePerFrame;
        m_rtPosition += m_AvgTimePerFrame;
    }

    pSample->SetSyncPoint(TRUE);

    if (m_bDiscontinuity) {
        pSample->SetDiscontinuity(TRUE);
        m_bDiscontinuity = FALSE;
    }

    return S_OK;
}
Пример #10
0
HRESULT CSubtitleStream::OnThreadCreate()
{
	CAutoLock cAutoLockShared(&m_cSharedState);

	if (m_mt.majortype == MEDIATYPE_Video && m_mt.subtype == MEDIASUBTYPE_ARGB32) {
		m_nPosition = m_rtStart/_ATPF;
	} else if (m_mt.majortype == MEDIATYPE_Video && m_mt.subtype == MEDIASUBTYPE_RGB32) {
		int m_nSegments = 0;
		if (!m_rts.SearchSubs((int)(m_rtStart/10000), 10000000/_ATPF, &m_nPosition, &m_nSegments)) {
			m_nPosition = m_nSegments;
		}
	} else {
		m_nPosition = m_rts.SearchSub((int)(m_rtStart/10000), 25);
		if (m_nPosition < 0) {
			m_nPosition = 0;
		} else if (m_rts[m_nPosition].end <= (int)(m_rtStart/10000)) {
			m_nPosition++;
		}
	}

	return CSourceStream::OnThreadCreate();
}
HRESULT CScreenCaptureSourcePin::FillBuffer(IMediaSample *pSample)
{
	FTL::FTLThreadWaitType waitType = _GetWaitType(INFINITE);
	switch (waitType)
	{
	case FTL::ftwtStop:
		return S_FALSE;	//quit
	case FTL::ftwtError:
		return E_UNEXPECTED;
	//case FTL::ftwtContinue:
	//case FTL::ftwtTimeOut:
	default:
		//just continue
		break;
	}

#if 0
	//FUNCTION_BLOCK_TRACE(1);
	CheckPointer(pSample, E_POINTER);
	ASSERT(m_mt.formattype == FORMAT_VideoInfo);

	m_nFrameNumber++;

	//make the samples scheduling
	HRESULT hr = S_OK;
	REFERENCE_TIME	rtLatency = 0;
	if (FAILED(GetLatency(&rtLatency)))
	{
		rtLatency = UNITS / DEFAULT_FPS ;
	}
	REFERENCE_TIME rtStart, rtStop;
	BOOL bShouldDeliver = FALSE;
	do 
	{
		if (m_dwAdviseToken == 0)
		{
			DX_VERIFY(m_pClock->GetTime(&m_rtClockStart));
			//fixed frame rate, so can use AdvisePeriodic
			DX_VERIFY(m_pClock->AdvisePeriodic(m_rtClockStart + rtLatency, 
				rtLatency, (HSEMAPHORE)m_hSemaphore, &m_dwAdviseToken));
		}
		else
		{
			DWORD dwResult = WaitForSingleObject(m_hSemaphore, INFINITE);
		}

		bShouldDeliver = TRUE;
		rtStart = m_rtStart;
		rtStop = m_rtStart + 1;
		DX_VERIFY(pSample->SetTime(&rtStart, &rtStop));
		FTLASSERT(m_pScreenCaptureImpl);
		if (m_pScreenCaptureImpl)
		{
			LPBYTE pBuffer = NULL;
			DX_VERIFY(pSample->GetPointer(&pBuffer));
			VIDEOINFOHEADER *pVih = (VIDEOINFOHEADER*)m_mt.pbFormat;
			//int nSize = min(pVih->bmiHeader.biSizeImage, (DWORD)cbData);
			HBITMAP hDIB = m_pScreenCaptureImpl->CopyScreenToBitmap(&m_rcCapture, pBuffer, (BITMAPINFO *) &(pVih->bmiHeader));
			DeleteObject(hDIB);
		}

		DX_VERIFY(m_pClock->GetTime(&m_rtClockStop));
		DX_VERIFY(pSample->GetTime(&rtStart, &rtStop));

		if (rtLatency > 0 && rtLatency * 3 < m_rtClockStop - m_rtClockStart)
		{
			//Why?
			m_rtClockStop = m_rtClockStart + rtLatency;
		}
		rtStop = rtStart + (m_rtClockStop - m_rtClockStart);
		m_rtStart = rtStop;
		//lock (m_csPinLock)
		{
			rtStart -= m_rtStreamOffset;
			rtStop -= m_rtStreamOffset;
		}

		DX_VERIFY(pSample->SetMediaTime(&m_nFrameNumber, &m_nFrameNumber));
		DX_VERIFY(pSample->SetTime(&rtStart, &rtStop));
		m_rtClockStart = m_rtClockStop;

		bShouldDeliver = ((rtStart >= 0) && (rtStop >= 0));
		if (bShouldDeliver)
		{
			//lock (m_csPinLock)
			if (m_rtStartAt != -1)
			{
				if (m_rtStartAt > rtStart)
				{
					bShouldDeliver = FALSE;
				}
				else
				{
					if (m_dwStartCookie != 0 && !m_bStartNotified)
					{
						m_bStartNotified = TRUE;
						DX_VERIFY(m_pFilter->NotifyEvent(EC_STREAM_CONTROL_STARTED, (LONG_PTR)this, m_dwStartCookie));
						if (FAILED(hr)) 
						{
							return hr;
						}
					}
				}
			}
			if (!bShouldDeliver)
			{
				//Why?
				continue;
			}
			if (m_rtStopAt != -1)
			{
				if (m_rtStopAt < rtStart)
				{
					if (!m_bStopNotified)
					{
						m_bStopNotified = TRUE;
						if (m_dwStopCookie != 0)
						{
							DX_VERIFY(m_pFilter->NotifyEvent(EC_STREAM_CONTROL_STOPPED, (LONG_PTR)this, m_dwStopCookie));
							if (FAILED(hr))
							{
								return hr;
							}
						}
						bShouldDeliver = m_bShouldFlush;
					}
					else
					{
						bShouldDeliver = FALSE;
					}
					// EOS -- EndOfStream
					if (!bShouldDeliver)
					{
						return S_FALSE;
					}
				}
			}
		}

	} while (!bShouldDeliver);
	return hr;

	//DX_VERIFY(m_pFilter->StreamTime(rtStart));

	//LONGLONG llStartTime = m_ElapseCounter.GetElapseTime();
	//REFERENCE_TIME rtStreamTime = m_rtSampleTime;// llStartTime / 100; // rfStreamTime.GetUnits();
	
//loop:   
	//REFERENCE_TIME rtStart = rtStreamTime; //m_iFrameNumber * m_rtFrameLength;    
	//REFERENCE_TIME rtStop  = rtStart + m_rtFrameLength;
	
	//if (rtStreamTime > rtStop)
	//{
	//	OutputDebugString(L"lost capture \r\n");
	//	++m_iFrameNumber;
	//	goto loop;
	//}
	//while (rtStreamTime < rtStart)
	//{
	//	m_pFilter->StreamTime(rfStreamTime);
	//	rtStreamTime = rfStreamTime.GetUnits();
	//	// REFERENCE_TIME rtWaitTime = rtStart - rtStreamTime;
	//	// ::WaitForSingleObject(m_hWaitEvent, rtWaitTime/10000);
	//}

	BYTE *pData = NULL;
	long cbData = 0;

	{
		DX_VERIFY(pSample->GetPointer(&pData));
		cbData = pSample->GetSize();
		//if (m_bZeroMemory)
		//{
		//	ZeroMemory(pData, cbData);
		//}
		{
			CAutoLock cAutoLockShared(&m_cSharedState);

			ASSERT(m_mt.formattype == FORMAT_VideoInfo);
			VIDEOINFOHEADER *pVih = (VIDEOINFOHEADER*)m_mt.pbFormat;

			int nSize = min(pVih->bmiHeader.biSizeImage, (DWORD)cbData);

			//*
			HBITMAP hDib = m_pScreenCaptureImpl->CopyScreenToBitmap(&m_rcCapture, pData, (BITMAPINFO *) &(pVih->bmiHeader));
			if (hDib)
			{
				DeleteObject(hDib);
			}
			//CRefTime rtStart = rfStreamTime; //m_rtSampleTime;
			//m_rtSampleTime +=   (LONG) m_iRepeatTime;
			CRefTime rtStop;// = m_ElapseCounter.GetElapseTime() / 100;
			
			DX_VERIFY(m_pFilter->StreamTime(rtStop));

			//m_rtSampleTime = rtStop;

			//ATLTRACE(TEXT("CScreenCaptureSourcePin::FillBuffer , start=%lld(%f ms), stop=%lld(%f ms)\n"),
			//	rtStart, float(rtStart) / 10000,  rtStop, float(rtStop) / 10000);

			DX_VERIFY(pSample->SetTime((REFERENCE_TIME *)&rtStart, (REFERENCE_TIME *)&rtStop));

			//每一帧都是一个同步点
			DX_VERIFY(pSample->SetSyncPoint(TRUE));


			BOOL bWait = FALSE;
			DWORD dwWillWaitTime = 0;
			//LONGLONG llElapseTime = rtStop.GetUnits() - rtStart.GetUnits();
			//	//m_ElapseCounter.GetElapseTime() -  llStartTime;
			//if ( llElapseTime < MILLISECONDS_TO_100NS_UNITS(m_iRepeatTime))
			//{
			//	bWait = TRUE;
			//	dwWillWaitTime = (MILLISECONDS_TO_100NS_UNITS(m_iRepeatTime) - llElapseTime) / 10000;
			//	if (dwWillWaitTime > 1)
			//	{
			//		//WaitForSingleObject(m_hStopEvent, dwWillWaitTime );
			//	}
			//}

		}
	}

	//FTLTRACE(TEXT("llElapseTime = %lld, bWait=%d, dwWillWaitTime=%d\n"), llElapseTime, bWait, dwWillWaitTime);
#endif

	CheckPointer(pSample, E_POINTER);

	HRESULT hr = E_FAIL;

	CRefTime rfStreamTime;
	{
		//CAutoLock cObjectLock(m_pLock);
		DX_VERIFY(m_pFilter->StreamTime(rfStreamTime));
	}
	REFERENCE_TIME rtStreamTime = rfStreamTime.GetUnits();
	if (m_rfMaxRecordTime != 0 && rtStreamTime > m_rfMaxRecordTime)
	{
		//max time over

		//if there is preview window, just return S_FALSE is OK
		//if there is NOT preview window, can not stop graph automatic
		m_pFilter->NotifyEvent(TIME_OVER, static_cast<LONG_PTR>(m_rfMaxRecordTime / (UNITS / MILLISECONDS)), 0);
		return S_FALSE;
	}
	REFERENCE_TIME rtStart = 0; 
	REFERENCE_TIME rtStop = 0;

	do 
	{
		rtStart = m_nFrameNumber * m_nAvgTimePerFrame;
		rtStop = rtStart + m_nAvgTimePerFrame;
		if( rtStreamTime > rtStop)
		{
			OutputDebugString(L"lost capture \r\n");
			++m_nFrameNumber;
		}
	} while (rtStreamTime > rtStop);

	while (rtStreamTime < rtStart)
	{
		m_pFilter->StreamTime(rfStreamTime);
		rtStreamTime = rfStreamTime.GetUnits();
		// REFERENCE_TIME rtWaitTime = rtStart - rtStreamTime;
		// ::WaitForSingleObject(m_hWaitEvent, rtWaitTime/10000);
	}

	BYTE *pData = NULL;
	long cbData = 0;
	CAutoLock cAutoLockShared(&m_cSharedState);
	DX_VERIFY(pSample->GetPointer(&pData));
	cbData = pSample->GetSize();

	ASSERT(m_mt.formattype == FORMAT_VideoInfo);
	VIDEOINFOHEADER *pVih = (VIDEOINFOHEADER*)m_mt.pbFormat;


	int nSize = min(pVih->bmiHeader.biSizeImage, (DWORD)cbData);
	HBITMAP hDib = m_pScreenCaptureImpl->CopyScreenToBitmap(&m_rcCapture, pData, (BITMAPINFO *) &(pVih->bmiHeader));    

	if (hDib)
	{
		if (m_bFirstFrame)
		{
			m_bFirstFrame = FALSE;
			DX_VERIFY(m_pFilter->NotifyEvent(FIRST_FRAME, (LONG_PTR)(hDib), NULL));
		}
		else
		{
			DeleteObject(hDib);
		}
	}  

	//REFERENCE_TIME rtTemp;
	//IReferenceClock *pClock;
	//m_pFilter->GetSyncSource(&pClock);
	//pClock->GetTime(&rtTemp);
	//pClock->Release();    

	//ST_FRAME_TIME *pTmp = new ST_FRAME_TIME();
	//pTmp->llStartTime = rtStart;
	//pTmp->llStopTime = rtStop;
	//pTmp->nFrameIndex = m_iFrameNumber;    
	//m_pFilter->NotifyEvent(FRAME_TIME, (LONG_PTR)pTmp, NULL);

	DX_VERIFY(pSample->SetTime(&rtStart, &rtStop));
	m_nFrameNumber++;

	DX_VERIFY(pSample->SetSyncPoint(TRUE));

	return S_OK;

	return hr;
}
Пример #12
0
HRESULT CFLICStream::FillBuffer(IMediaSample* pSample)
{
	HRESULT hr;

	{
		CAutoLock cAutoLockShared(&m_cSharedState);

        if(m_rtPosition >= m_rtStop)
			return S_FALSE;

		BYTE* pDataIn = m_pFrameBuffer;
		BYTE* pDataOut = NULL;
		if(!pDataIn || FAILED(hr = pSample->GetPointer(&pDataOut)) || !pDataOut)
			return S_FALSE;

		AM_MEDIA_TYPE* pmt;
		if(SUCCEEDED(pSample->GetMediaType(&pmt)) && pmt)
		{
			CMediaType mt(*pmt);
			SetMediaType(&mt);

			DeleteMediaType(pmt);
		}

		int w, h, bpp;
		if(m_mt.formattype == FORMAT_VideoInfo)
		{
			w = ((VIDEOINFOHEADER*)m_mt.pbFormat)->bmiHeader.biWidth;
			h = abs(((VIDEOINFOHEADER*)m_mt.pbFormat)->bmiHeader.biHeight);
			bpp = ((VIDEOINFOHEADER*)m_mt.pbFormat)->bmiHeader.biBitCount;
		}
		else if(m_mt.formattype == FORMAT_VideoInfo2)
		{
			w = ((VIDEOINFOHEADER2*)m_mt.pbFormat)->bmiHeader.biWidth;
			h = abs(((VIDEOINFOHEADER2*)m_mt.pbFormat)->bmiHeader.biHeight);
			bpp = ((VIDEOINFOHEADER2*)m_mt.pbFormat)->bmiHeader.biBitCount;
		}
		else
		{
			return S_FALSE;
		}

		int pitchIn = m_hdr.x;
		int pitchOut = w*bpp>>3;

		int nFrame = m_rtPosition / m_AvgTimePerFrame; // (int)(1.0 * m_rtPosition / m_AvgTimePerFrame + 0.5);

		{
			SeekToNearestKeyFrame(nFrame);

			while(m_nLastFrameNum < nFrame && !m_bFlushing)
				ExtractFrame(++m_nLastFrameNum);

			for(int y = 0, p = min(pitchIn, pitchOut); 
				y < h; 
				y++, pDataIn += pitchIn, pDataOut += pitchOut)
			{
				BYTE* src = pDataIn;
				BYTE* end = src + p;
				DWORD* dst = (DWORD*)pDataOut;
				while(src < end) *dst++ = m_pPalette[*src++];
			}
		}

		pSample->SetActualDataLength(pitchOut*h);

		REFERENCE_TIME rtStart, rtStop;
        // The sample times are modified by the current rate.
        rtStart = static_cast<REFERENCE_TIME>(m_rtSampleTime / m_dRateSeeking);
        rtStop  = rtStart + static_cast<int>(m_AvgTimePerFrame / m_dRateSeeking);
        pSample->SetTime(&rtStart, &rtStop);

        m_rtSampleTime += m_AvgTimePerFrame;
        m_rtPosition += m_AvgTimePerFrame;
	}

	pSample->SetSyncPoint(TRUE);

	if(m_bDiscontinuity) 
    {
		pSample->SetDiscontinuity(TRUE);
		m_bDiscontinuity = FALSE;
	}

	return S_OK;
}
Пример #13
0
void mmSource::SetStopGraph(int stopGraph)
{
	CAutoLock cAutoLockShared(&m_cSharedState);
	this->stopGraph = stopGraph;
}
Пример #14
0
HRESULT UVCamStream::FillBuffer(IMediaSample *pms) {

	CheckPointer(pms,E_POINTER);
	HRESULT hr;

	{
		CAutoLock cAutoLockShared(&m_cSharedState);

		int wt = 0;
		double now = 0;
		bool ok = false;
		char *at = NULL;
		int at_width = 0;
		int at_height = 0;

		while (wt<10 && !ok) {
			wt++;
			bus.beginRead();
			ShmemImageHeader header;
			ShmemImageRaw cp(bus);
			cp.getHeader(header);
			at = cp.getImage();

			FILETIME tm;
			GetSystemTimeAsFileTime(&tm);

			ULONGLONG ticks = (((ULONGLONG) tm.dwHighDateTime) << 32) + 
				tm.dwLowDateTime;
			now = ((double)ticks)/(1000.0*1000.0*10.0);
			if (firstTime<0) {
				firstTime = now;
			}
			now -= firstTime;

			at_width = header.get(IMGHDR_W);
			at_height = header.get(IMGHDR_H);
			int id = header.get(IMGHDR_TICK);
			if (!haveId) {
				lastId = id;
				haveId = true;
			} else {
				if (id!=lastId) {
					lastChange = now;
					wt = 500;
					ok = true;
				}
			}
			lastId = id;

			if (!ok) {
				bus.endRead();
				Sleep(5);
			}
		}

		if (ok) {
			ok = (now-lastChange<2);
		}

		BYTE *pData = NULL;
		long lDataLen;
		if (FAILED(hr=pms->GetPointer(&pData)))  {
			bus.endRead();
			return S_FALSE;
		}

		lDataLen = pms->GetSize();

		AM_MEDIA_TYPE *nmt = NULL;
		if (pms->GetMediaType(&nmt)==S_OK) {
			m_mt = *nmt;
		}

		ASSERT(m_mt.formattype == FORMAT_VideoInfo);

		/*
		REFERENCE_TIME rtNow;
		REFERENCE_TIME avgFrameTime = ((VIDEOINFOHEADER*)m_mt.pbFormat)->AvgTimePerFrame;
		rtNow = m_rtLastTime;
		m_rtLastTime += avgFrameTime;
		pms->SetTime(&rtNow, &m_rtLastTime);
		*/

		int ww = ((VIDEOINFOHEADER*)m_mt.pbFormat)->bmiHeader.biWidth;
		int hh = ((VIDEOINFOHEADER*)m_mt.pbFormat)->bmiHeader.biHeight;
		int bb = ((VIDEOINFOHEADER*)m_mt.pbFormat)->bmiHeader.biBitCount;

		DllDbg out; out.init("uvcam",true); out.say("FillBuffer output dimensions", ww, hh);
		out.say("FillBuffer input dimensions", at_width, at_height);
		out.say("FillBuffer sequence, time", (int)at, (int)now);

		// compatibility with current ucanvcam binaries
		if (at_width==0&&at_height==0) {
			at_width = 320;  at_height = 240;
		}

		int pp = bb/8;
		int stride = (ww * pp + 3) & ~3;
		int gap = (at_width-ww)*3;
		if (gap<0) gap = 0;
		if (at==NULL || !ok) {
			// BGR order, apparently
			ct = (ct+1)%256;
			for (int y=0; y<hh; y++) {
				BYTE *base = pData + y*stride;
				for (int x=0; x<ww; x++) {
					base[0] = 255; //(y+ct)%256;
					base[1] = ((y+x+ct)%20==0)?128:0; //ct;
					base[2] = 0; //(x+ct)%256;
					base += pp;
				}
			}
		} else {
			for (int y=(hh<at_width)?hh:at_height-1; y>=0; y--) {
				BYTE *base = pData + y*stride;
				for (int x=0; x<ww && x<at_width; x++) {
					base[2] = *at; at++;
					base[1] = *at; at++;
					base[0] = *at; at++;
					base += pp;
				}
				at += gap;
			}
		}
		bus.endRead();

		REFERENCE_TIME avgFrameTime = ((VIDEOINFOHEADER*)m_mt.pbFormat)->AvgTimePerFrame;
		CRefTime rnow;
		m_pParent->StreamTime(rnow);
		REFERENCE_TIME endThisFrame = rnow + avgFrameTime;
		pms->SetTime((REFERENCE_TIME *) &rnow, &endThisFrame);

	}

	pms->SetSyncPoint(TRUE);

	return NOERROR;
} // FillBuffer
//
// FillBuffer
//
// Stuffs the buffer with data
// "they" call this, every so often...
HRESULT CVCamStream::FillBuffer(IMediaSample *pms) 
{	
	// I don't expect these...the parent controls this/us and doesn't call us when it is stopped I guess, so we should always be active...
	ShowOutput("requested audio frame");
	//assert(m_pParent->IsActive()); // one of these can cause freezing on "stop button" in FME
	//assert(!m_pParent->IsStopped());

    CheckPointer(pms,E_POINTER);
    BYTE *pData;
    HRESULT hr = pms->GetPointer(&pData);
    if (FAILED(hr)) {
		ShowOutput("fail 1");
		assert(false);
        return hr;
    }
    
	LONG totalWrote = -1;
	// the real meat -- get all the incoming data
	hr = LoopbackCaptureTakeFromBuffer(pData, pms->GetSize(), NULL, &totalWrote);
	if(FAILED(hr)) {
		// this one can return false during shutdown, so it's actually ok to just return from here...
		// assert(false);
		ShowOutput("shutdown 1");
		return hr;
	}

	CAutoLock cAutoLockShared(&gSharedState); // for the bFirstPacket boolean control, except there's probably still some odd race conditions er other...

	hr = pms->SetActualDataLength(totalWrote);
	if(FAILED(hr)) {
  	  	assert(false);
		return hr;
	}

    // Now set the sample's start and end time stamps...

	WAVEFORMATEX* pwfexCurrent = (WAVEFORMATEX*)m_mt.Format();
	CRefTime sampleTimeUsed = (REFERENCE_TIME)(UNITS * pms->GetActualDataLength()) / 
                     (REFERENCE_TIME)pwfexCurrent->nAvgBytesPerSec;
    CRefTime rtStart;
	if(bFirstPacket) { // either have bFirstPacket or true here...true seemed to help that one guy...
      m_pParent->StreamTime(rtStart); // gets current graph ref time [now] as its "start", as normal "capture" devices would, just in case that's better...
	  if(bFirstPacket)
	    ShowOutput("got an audio first packet or discontinuity detected");
	} else {
		// since there hasn't been discontinuity, I think we should be safe to tell it
		// that this packet starts where the previous packet ended off
		// since that's theoretically accurate...
		// exept that it ends up being bad [?]
		// I don't "think" this will hurt graphs that have no reference clock...hopefully...

		rtStart = m_rtPreviousSampleEndTime;

        // CRefTime cur_time;
	    // m_pParent->StreamTime(cur_time);
	    // rtStart = max(rtStart, cur_time);
		// hopefully this avoids this message/error:
		// [libmp3lame @ 00670aa0] Que input is backward in time
        // Audio timestamp 329016 < 329026 invalid, cliping00:05:29.05 bitrate= 738.6kbits/s
        // [libmp3lame @ 00670aa0] Que input is backward in time
	}

	// I once tried to change it to always have monotonicity of timestamps at this point, but it didn't fix any problems, and seems to do all right without it so maybe ok [?]
    m_rtPreviousSampleEndTime = rtStart + sampleTimeUsed;

	// NB that this *can* set it to a negative start time...hmm...which apparently is "ok" when a graph is just starting up it's expected...
    hr = pms->SetTime((REFERENCE_TIME*) &rtStart, (REFERENCE_TIME*) &m_rtPreviousSampleEndTime);
	if (FAILED(hr)) {
		assert(false);
        return hr;
    }
	// if we do SetTime(NULL, NULL) here then VLC can "play" it with directshows buffers of size 0ms.
	// however, then VLC cannot then stream it at all.  So we leave it set to some time, and just require you to have VLC buffers of at least 40 or 50 ms
	// [a possible VLC bug?] http://forum.videolan.org/viewtopic.php?f=14&t=92659&hilit=+50ms

	// whatever SetMediaTime even does...
    // hr = pms->SetMediaTime((REFERENCE_TIME*)&rtStart, (REFERENCE_TIME*)&m_rtPreviousSampleEndTime);
    //m_llSampleMediaTimeStart = m_rtSampleEndTime;

	if (FAILED(hr)) {
		assert(false);
        return hr;
    }

    // Set the sample's properties.
    hr = pms->SetPreroll(FALSE); // tell it that this isn't preroll, so to actually use it...I think.
    if (FAILED(hr)) {
		assert(false);
        return hr;
    }

    hr = pms->SetMediaType(NULL);
    if (FAILED(hr)) {
		assert(false);
        return hr;
    }
   
    hr = pms->SetDiscontinuity(bFirstPacket); // true for the first
    if (FAILED(hr)) {
		assert(false);
        return hr;
    }
    
	// Set TRUE on every sample for PCM audio http://msdn.microsoft.com/en-us/library/windows/desktop/dd407021%28v=vs.85%29.aspx
    hr = pms->SetSyncPoint(TRUE);
	if (FAILED(hr)) {
		assert(false);
        return hr;
    }
	FILTER_STATE State;
	m_pParent->GetState(0, &State);
	
	ShowOutput("sent audio frame, %d blips, state %d", totalBlips, State);

	bFirstPacket = false;
    return S_OK;

} // end FillBuffer
Пример #16
0
HRESULT	videoSource::FillBuffer(IMediaSample *pMediaSample)
{
	_RPT2(_CRT_WARN,"FillBuffer %d %d\n",currentFrame,nr);

	BYTE *pData;
	long lDataLen = pMediaSample->GetSize();;
	pMediaSample->GetPointer(&pData);
	
	if (lDataLen < height*scanwidth*3) return E_INVALIDARG;
	
	{
		CAutoLock cAutoLockShared(&m_cSharedState);
		if (currentFrame >= nr || times[currentFrame]*10000000 > m_rtStop) 
		{
			_RPT0(_CRT_WARN,"v stopping\n");
			done=1;
			
			if (stopGraph) return S_FALSE;
			else {
				pMediaSample->SetActualDataLength(0);
				REFERENCE_TIME rtStart,	rtStop;
				
				rtStart	= times[currentFrame-1]*10000000;
				rtStop = m_rtStop;
				pMediaSample->SetTime(&rtStart,	&rtStop);
			
				_RPT0(_CRT_WARN,"v Sleeping \n");
				Sleep(1000);
				return NOERROR;
			}
		}
	
		//height,width,color => flip color,width, flip height
		BYTE* frame = (BYTE*)frames[currentFrame];

		for (int c=0; c<3; c++)
		{
			int c1 = height*width*c;
			int c2 = 2-c;
			for (int w=0; w<width; w++)
			{
				int w1 = height*w;
				int w2 = 3*w;
				for (int h=0; h<height; h++)
				{
					pData[c2+w2+3*scanwidth*(height-1-h)] = frame[c1+w1+h];
				}
			}
		}
	
		REFERENCE_TIME rtStart,	rtStop;
		
		rtStart	= times[currentFrame]*10000000;
		rtStop	= (currentFrame<nr-1)?times[currentFrame+1]*10000000-1:rtStart;
	
		pMediaSample->SetActualDataLength(height*width*3);
		_RPT4(_CRT_WARN,"v SetTime %d %d   %d %d\n",(int)(rtStart>>32),(int)rtStart,(int)(rtStop>>32),(int)rtStop);
		pMediaSample->SetTime(&rtStart,	&rtStop);
	
		currentFrame++;
	}

	pMediaSample->SetSyncPoint(TRUE);
	
	return NOERROR;
}
Пример #17
0
// This is where we insert the DIB bits into the video stream.
// FillBuffer is called once for every sample in the stream.
HRESULT CPushPinAudio::FillBuffer(IMediaSample *pSample)
{
	BYTE *pData;
    long cbData;

	CheckPointer(pSample, E_POINTER);

    CAutoLock cAutoLockShared(&m_cSharedState);

    // Access the sample's data buffer
    pSample->GetPointer(&pData);
    cbData = pSample->GetSize();

    // Check that we're still using Audio
    ASSERT(m_mt.formattype == FORMAT_WaveFormatEx);

	WAVEFORMATEX *pwfx = (WAVEFORMATEX *)m_mt.Format();


	// 一時停止処理
	while(m_blPaused){
		Sleep(10);  //CPU負荷減少
		FILTER_STATE fs;
		HRESULT hr = m_pFilter->GetState(100, &fs);
		if (hr != S_OK) {
			break;
		}
		if (fs != State_Running) {
			break;
		}
	}

	// 入力バッファ状態チェック
	if (m_pListOfBuffer == NULL)  {
		Sleep(10);  //CPU負荷減少
		goto NODATA_SECTION;
	}
	long lngCnt = (long)m_pListOfBuffer->GetCount();
	// 競合を防ぐため、バッファを1つ残して処理(バッファの生成待ち)
	if (lngCnt <= 1) {
		Sleep(10);  //CPU負荷減少
		goto NODATA_SECTION;
	}

	POSITION pos = m_pListOfBuffer->GetHeadPosition();
	if (pos == NULL) goto NODATA_SECTION;
	CWaveBuffer *pBuf = (CWaveBuffer *)m_pListOfBuffer->GetAt(pos);
	if (!pBuf) goto NODATA_SECTION;
	if (pBuf->m_blActive) goto NODATA_SECTION;		// バッファ書き込み中
	// 入力バッファをストリームバッファにコピー
	long lngSz = pBuf->GetNumSamples() * pBuf->GetSampleSize();		// 録音終了時では小さくなる
	if (!pBuf->m_blDead) {
		BYTE *pDat = (BYTE *)pBuf->GetBuffer();
		CopyMemory(pData, pBuf->GetBuffer(), lngSz);
	}
	// バッファの解放
	if (pBuf) {
		delete pBuf;
		pBuf = NULL;
	}
	m_pListOfBuffer->RemoveHead();

	// フレームの開始時間を計算
	DWORD dwFrame = lngSz * 1000 / pwfx->nAvgBytesPerSec;	// 1バッファのミリ秒
	DWORD dwNow = timeGetTime();
	REFERENCE_TIME rtStart = m_rtSampleTime;
    REFERENCE_TIME rtStop  = rtStart + MILLISECONDS_TO_100NS_UNITS(dwFrame);
	m_rtSampleTime = rtStop;

    DbgLog((LOG_TRACE, 1, "rtStart:%ld", rtStart ));
    DbgLog((LOG_TRACE, 1, "rtStop:%ld", rtStop ));
TRACE(_T("audio rtStart:%ld\n"), rtStart);
TRACE(_T("audio rtStop:%ld\n"), rtStop);
	
	pSample->SetTime(&rtStart, &rtStop);
    pSample->SetDiscontinuity(FALSE);
    pSample->SetSyncPoint(TRUE);
    pSample->SetActualDataLength(lngSz);

	return S_OK;

NODATA_SECTION:
	REFERENCE_TIME rtTime = m_rtSampleTime;
	pSample->SetTime(&rtTime, &rtTime);
    pSample->SetDiscontinuity(FALSE);
    pSample->SetSyncPoint(FALSE);
    pSample->SetActualDataLength(0);
	return S_OK;
}
HRESULT propagateBufferOnce() {
	HRESULT hr = S_OK;

    // grab next audio chunk...
	int gotAnyAtAll = FALSE;
	DWORD start_time = timeGetTime();
    while (!shouldStop) {
        UINT32 nNextPacketSize;
        hr = pAudioCaptureClient->GetNextPacketSize(&nNextPacketSize); // get next packet, if one is ready...
        if (FAILED(hr)) {
            ShowOutput("IAudioCaptureClient::GetNextPacketSize failed after %u frames: hr = 0x%08x\n", pnFrames, hr);
            pAudioClient->Stop();
            AvRevertMmThreadCharacteristics(hTask);
            pAudioCaptureClient->Release();
            pAudioClient->Release();            
            return hr;
        }

        if (0 == nNextPacketSize) {
            // no data yet, we're waiting, between incoming chunks of audio. [occurs even with silence on the line--it just means no new data yet]

			DWORD millis_to_fill = (DWORD) (1.0/SECOND_FRACTIONS_TO_GRAB*1000); // truncate is ok :) -- 1s
			assert(millis_to_fill > 1); // sanity
			DWORD current_time = timeGetTime();
			if((current_time - start_time > millis_to_fill)) {
				// I don't think we ever get to here anymore...thankfully, since it's mostly broken code probably, anyway
				if(!gotAnyAtAll) {
				  // We get here [?]
				  assert(false); // want to know if this ever happens...it never should since we are using silence...
				}
			} else {
			  Sleep(1); // doesn't seem to hurt cpu--"sleep x ms"
			  continue;
			}
        } else {
		  gotAnyAtAll = TRUE;
		  totalSuccessFullyread++;
		}
		
        // get the captured data
        BYTE *pData;
        UINT32 nNumFramesToRead;
        DWORD dwFlags;

		// I guess it gives us...as much audio as possible to read...probably

        hr = pAudioCaptureClient->GetBuffer(
            &pData,
            &nNumFramesToRead,
            &dwFlags,
            NULL,
            NULL
        ); // ACTUALLY GET THE BUFFER which I assume it reads in the format of the fella we passed in
        
        
        if (FAILED(hr)) {
            ShowOutput("IAudioCaptureClient::GetBuffer failed after %u frames: hr = 0x%08x\n", pnFrames, hr);
            pAudioClient->Stop();
            AvRevertMmThreadCharacteristics(hTask);
            pAudioCaptureClient->Release();
            pAudioClient->Release();            
            return hr;            
        }

		{
  		  CAutoLock cAutoLockShared(&gSharedState);

			if( dwFlags == 0 ) {
			  // the good case, got audio packet
			  // we'll let fillbuffer set bFirstPacket = false; since it uses it to know if the next packet should restart, etc.
			} else if (bFirstPacket && AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY == dwFlags) {
				ShowOutput("Probably spurious glitch reported on first packet, or two discontinuity errors occurred before it read from the cached buffer\n");
				bFirstPacket = true; // won't hurt, even if it is a real first packet :)
				// LODO it should probably clear the buffers if it ever gets discontinuity
				// or "let" it clear the buffers then send the new data on
				// as we have any left-over data that will be assigned a wrong timestamp
				// but it won't be too far wrong, compared to what it would otherwise be with always
				// assigning it the current graph timestamp, like we used to...
			} else if (AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY == dwFlags) {
				  ShowOutput("IAudioCaptureClient::discontinuity GetBuffer set flags to 0x%08x after %u frames\n", dwFlags, pnFrames);
				  // expected your CPU gets behind or what not. I guess.
				  /*pAudioClient->Stop();
				  AvRevertMmThreadCharacteristics(hTask);
				  pAudioCaptureClient->Release();		  
				  pAudioClient->Release();            
				  return E_UNEXPECTED;*/
				  bFirstPacket = true;
			} else if (AUDCLNT_BUFFERFLAGS_SILENT == dwFlags) {
     		  // ShowOutput("IAudioCaptureClient::silence (just) from GetBuffer after %u frames\n", pnFrames);
			  // expected if there's silence (i.e. nothing playing), since we now include the "silence generator" work-around...
			} else {
			  // probably silence + discontinuity
     		  ShowOutput("IAudioCaptureClient::unknown discontinuity GetBuffer set flags to 0x%08x after %u frames\n", dwFlags, pnFrames);
			  bFirstPacket = true; // probably is some type of discontinuity :P
			}

			if(bFirstPacket)
				totalBlips++;

			if (0 == nNumFramesToRead) {
				ShowOutput("death failure: IAudioCaptureClient::GetBuffer said to read 0 frames after %u frames\n", pnFrames);
				pAudioClient->Stop();
				AvRevertMmThreadCharacteristics(hTask);
				pAudioCaptureClient->Release();
				pAudioClient->Release();
				return E_UNEXPECTED;            
			}

			pnFrames += nNumFramesToRead; // increment total count...		

			// lBytesToWrite typically 1792 bytes...
			LONG lBytesToWrite = nNumFramesToRead * nBlockAlign; // nBlockAlign is "audio block size" or frame size, for one audio segment...
			{
			  CAutoLock cObjectLock(&csMyLock);  // Lock the critical section, releases scope after block is over...

			  if(pBufLocalCurrentEndLocation > expectedMaxBufferSize) { 
				// this happens during VLC pauses...
				// I have no idea what I'm doing here... this doesn't fix it, but helps a bit... TODO FINISH THIS
				// it seems like if you're just straight recording then you want this big...otherwise you want it like size 0 and non-threaded [pausing with graphedit, for example]... [?]
				// if you were recording skype, you'd want it non realtime...hmm...
				// it seems that if the cpu is loaded, we run into this if it's for the next packet...hmm...
				// so basically we don't accomodate realtime at all currently...hmmm...
	  			ShowOutput("overfilled buffer, cancelling/flushing."); //over flow overflow appears VLC just keeps reading though, when paused [?] but not graphedit...or does it?
				pBufLocalCurrentEndLocation = 0;
				totalOverflows++;
				bFirstPacket = true;
			  }

			  for(INT i = 0; i < lBytesToWrite && pBufLocalCurrentEndLocation < expectedMaxBufferSize; i++) {
				pBufLocal[pBufLocalCurrentEndLocation++] = pData[i];
			  }
			}
		}
        
        hr = pAudioCaptureClient->ReleaseBuffer(nNumFramesToRead);
        if (FAILED(hr)) {
            ShowOutput("IAudioCaptureClient::ReleaseBuffer failed after %u frames: hr = 0x%08x\n", pnFrames, hr);
            pAudioClient->Stop();
            AvRevertMmThreadCharacteristics(hTask);
            pAudioCaptureClient->Release();
            pAudioClient->Release();            
            return hr;            
        }
        
		return hr;
    } // while !got anything && should continue loop

	return S_OK; // stop was called...

}
Пример #19
0
HRESULT CSubtitleStream::FillBuffer(IMediaSample* pSample)
{
	HRESULT hr;

	{
		CAutoLock cAutoLockShared(&m_cSharedState);

		BYTE* pData = NULL;
		if (FAILED(hr = pSample->GetPointer(&pData)) || !pData) {
			return S_FALSE;
		}

		AM_MEDIA_TYPE* pmt;
		if (SUCCEEDED(pSample->GetMediaType(&pmt)) && pmt) {
			CMediaType mt(*pmt);
			SetMediaType(&mt);
			DeleteMediaType(pmt);
		}

		int len = 0;
		REFERENCE_TIME rtStart, rtStop;

		if (m_mt.majortype == MEDIATYPE_Video && m_mt.subtype == MEDIASUBTYPE_ARGB32) {
			rtStart = (REFERENCE_TIME)((m_nPosition*_ATPF - m_rtStart) / m_dRateSeeking);
			rtStop = (REFERENCE_TIME)(((m_nPosition+1)*_ATPF - m_rtStart) / m_dRateSeeking);
			if (m_rtStart+rtStart >= m_rtDuration) {
				return S_FALSE;
			}

			BITMAPINFOHEADER& bmi = ((VIDEOINFOHEADER*)m_mt.pbFormat)->bmiHeader;

			SubPicDesc spd;
			spd.w = _WIDTH;
			spd.h = _HEIGHT;
			spd.bpp = 32;
			spd.pitch = bmi.biWidth*4;
			spd.bits = pData;

			len = spd.h*spd.pitch;

			for (int y = 0; y < spd.h; y++) {
				memsetd((DWORD*)(pData + spd.pitch*y), 0xff000000, spd.w*4);
			}

			RECT bbox;
			m_rts.Render(spd, m_nPosition*_ATPF, 10000000.0/_ATPF, bbox);

			for (int y = 0; y < spd.h; y++) {
				DWORD* p = (DWORD*)(pData + spd.pitch*y);
				for (int x = 0; x < spd.w; x++, p++) {
					*p = (0xff000000-(*p&0xff000000))|(*p&0xffffff);
				}
			}
		} else if (m_mt.majortype == MEDIATYPE_Video && m_mt.subtype == MEDIASUBTYPE_RGB32) {
			const STSSegment* stss = m_rts.GetSegment(m_nPosition);
			if (!stss) {
				return S_FALSE;
			}

			BITMAPINFOHEADER& bmi = ((VIDEOINFOHEADER*)m_mt.pbFormat)->bmiHeader;

			SubPicDesc spd;
			spd.w = _WIDTH;
			spd.h = _HEIGHT;
			spd.bpp = 32;
			spd.pitch = bmi.biWidth*4;
			spd.bits = pData;

			len = spd.h*spd.pitch;

			for (int y = 0; y < spd.h; y++) {
				DWORD c1 = 0xff606060, c2 = 0xffa0a0a0;
				if (y&32) {
					c1 ^= c2, c2 ^= c1, c1 ^= c2;
				}
				DWORD* p = (DWORD*)(pData + spd.pitch*y);
				for (int x = 0; x < spd.w; x+=32, p+=32) {
					memsetd(p, (x&32) ? c1 : c2, min(spd.w-x,32)*4);
				}
			}

			RECT bbox;
			m_rts.Render(spd, 10000i64*(stss->start+stss->end)/2, 10000000.0/_ATPF, bbox);

			rtStart = (REFERENCE_TIME)((10000i64*stss->start - m_rtStart) / m_dRateSeeking);
			rtStop = (REFERENCE_TIME)((10000i64*stss->end - m_rtStart) / m_dRateSeeking);
		} else {
			if ((size_t)m_nPosition >= m_rts.GetCount()) {
				return S_FALSE;
			}

			STSEntry& stse = m_rts[m_nPosition];

			if (stse.start >= m_rtStop/10000) {
				return S_FALSE;
			}

			if (m_mt.majortype == MEDIATYPE_Subtitle && m_mt.subtype == MEDIASUBTYPE_UTF8) {
				CStringA str = UTF16To8(m_rts.GetStrW(m_nPosition, false));
				memcpy((char*)pData, str, len = str.GetLength());
			} else if (m_mt.majortype == MEDIATYPE_Subtitle && (m_mt.subtype == MEDIASUBTYPE_SSA || m_mt.subtype == MEDIASUBTYPE_ASS)) {
				CStringW line;
				line.Format(L"%d,%d,%s,%s,%d,%d,%d,%s,%s",
							stse.readorder, stse.layer, CStringW(stse.style), CStringW(stse.actor),
							stse.marginRect.left, stse.marginRect.right, (stse.marginRect.top+stse.marginRect.bottom)/2,
							CStringW(stse.effect), m_rts.GetStrW(m_nPosition, true));

				CStringA str = UTF16To8(line);
				memcpy((char*)pData, str, len = str.GetLength());
			} else if (m_mt.majortype == MEDIATYPE_Text && m_mt.subtype == MEDIASUBTYPE_NULL) {
				CStringA str = m_rts.GetStrA(m_nPosition, false);
				memcpy((char*)pData, str, len = str.GetLength());
			} else {
				return S_FALSE;
			}

			rtStart = (REFERENCE_TIME)((10000i64*stse.start - m_rtStart) / m_dRateSeeking);
			rtStop = (REFERENCE_TIME)((10000i64*stse.end - m_rtStart) / m_dRateSeeking);
		}

		pSample->SetTime(&rtStart, &rtStop);
		pSample->SetActualDataLength(len);

		m_nPosition++;
	}

	pSample->SetSyncPoint(TRUE);

	if (m_bDiscontinuity) {
		pSample->SetDiscontinuity(TRUE);
		m_bDiscontinuity = FALSE;
	}

	return S_OK;
}
Пример #20
0
HRESULT CTMReceiverOutputPin::FillBuffer(IMediaSample *pms)
{
	//TODO: Fill buffer with the decoded frames.
	CTMReceiverSrc* pFilter = (CTMReceiverSrc*)m_pFilter;
	AVPacket pkt, pktForRecord;
	AVPicture pic;
	BYTE *pData;
	long lDataLen;
	lDataLen = pms->GetSize();
	if (m_pData==NULL)
	{
		m_pData = new BYTE[lDataLen];
	}
	if(pFilter->m_queueBuffer.nb_packets<=0)
	{
		REFERENCE_TIME rtStart, rtStop, rtMediaStart, rtMediaStop;
		// The sample times are modified by the current rate.
		rtStart = static_cast<REFERENCE_TIME>(m_rtSampleTime);
		rtStop  = rtStart + static_cast<int>(m_rtAvgTimePerFrame );
		rtMediaStart = static_cast<REFERENCE_TIME>(m_rtPosition);
		rtMediaStop  = rtMediaStart + static_cast<int>(m_rtAvgTimePerFrame );
		pms->SetTime(&rtStart, &rtStop);
		pms->SetMediaTime(&rtMediaStart, &rtMediaStop);
		m_rtSampleTime = m_rtSampleTime + static_cast<int>(m_rtAvgTimePerFrame );
		m_rtPosition = m_rtPosition + m_rtAvgTimePerFrame;
		pms->SetSyncPoint(TRUE);
		Sleep(10);
		//char tmp[1024];
		//sprintf(tmp,"====================No Data!====================\n");
		//OutputDebugStringA(tmp);
		return S_OK;
	}
	av_init_packet(&pkt);
	int maxPktNum = m_bGetAvgFrameTime ? 12 : 7;
	while (pFilter->m_queueBuffer.nb_packets > maxPktNum)
	{
		for(int itmp=1; itmp<=5; itmp++)
		{
			CAutoLock lock(&m_csBuffer);
			pFilter->m_queueBuffer.Get(&pkt,1);
			av_free_packet(&pkt);
		}
		char tmp[1024];
		sprintf(tmp," ===================================Too Many Packets! Pop %d good Packet!\n",pFilter->m_queueBuffer.nb_packets);
		OutputDebugStringA(tmp);
	}

	{
		CAutoLock lock(&m_csBuffer);
		pFilter->m_queueBuffer.Get(&pkt, 1);
		if(pkt.flags & AV_PKT_FLAG_KEY)
		{
	/*char tmp[1024];
	sprintf(tmp,"Key Frame!\n");
	OutputDebugStringA(tmp);*/
		}
	}
	int ret = -1;
	//Record Video
	if(m_bRecordStatus == TRUE)
	{
		if(pkt.flags & AV_PKT_FLAG_KEY)
		{
			m_bFindKeyFrame = TRUE;
		}
		if(m_bFindKeyFrame)
		{
			av_init_packet(&pktForRecord);
			pktForRecord.size = pkt.size;
			pktForRecord.flags = pkt.flags;
			pktForRecord.pts = pts;
			pktForRecord.dts = pts;
			pktForRecord.data = new uint8_t[pktForRecord.size];
			memcpy(pktForRecord.data, pkt.data, pktForRecord.size);
			ret = av_interleaved_write_frame(m_fileSaverCtx, &pktForRecord);
			delete [] pktForRecord.data;
			pktForRecord.data = NULL;
			pktForRecord.size = 0;
			av_init_packet(&pktForRecord);
			av_free_packet(&pktForRecord);
			//pts += m_rtAvgTimePerFrame/1000*9;
			pts++;
		}
	}

	// BEFORE DECODE CB
	TMFrame beforeDecodeFrame;
	beforeDecodeFrame.data = (char *)pkt.data;
	beforeDecodeFrame.len = pkt.size;
	beforeDecodeFrame.decoded = FALSE;
	beforeDecodeFrame.error = FALSE;
	pFilter->CallBeforeDecodeCB(&beforeDecodeFrame);

	ret = -1;
	{
		CAutoLock lock(&m_csDecoder);
		if (m_pDecoder!=NULL)
		{			
			ret = m_pDecoder->DecodeFrame(&pic, m_pData, pkt.data, pkt.size);
		}		
	}

	// AFTER DECODE CB
	TMFrame afterDecodeFrame;
	afterDecodeFrame.data = (char *)pkt.data;
	afterDecodeFrame.len = pkt.size;
	afterDecodeFrame.decoded = TRUE;
	afterDecodeFrame.error = ret <= 0 ? TRUE : FALSE;
	// TODO: construct the pic
	for(int ptr_i=0; ptr_i<AV_NUM_DATA_POINTERS; ptr_i++)
	{
		afterDecodeFrame.pic.data[ptr_i] = pic.data[ptr_i];
		afterDecodeFrame.pic.linesize[ptr_i] = pic.linesize[ptr_i];
	}
	pFilter->CallAfterDecodeCB(&afterDecodeFrame);

	if(ret <=0)
	{
		char tmp[1024];
		sprintf(tmp," ===================================Decode BAD£¬rtSampleTime:%lld\n",m_rtSampleTime);
		OutputDebugStringA(tmp);
		REFERENCE_TIME rtStart, rtStop, rtMediaStart, rtMediaStop;
		// The sample times are modified by the current rate.
		rtStart = static_cast<REFERENCE_TIME>(m_rtSampleTime);
		rtStop  = rtStart + static_cast<int>(m_rtAvgTimePerFrame );
		rtMediaStart = static_cast<REFERENCE_TIME>(m_rtPosition);
		rtMediaStop  = rtMediaStart + static_cast<int>(m_rtAvgTimePerFrame );
		pms->SetTime(&rtStart, &rtStop);
		pms->SetMediaTime(&rtMediaStart, &rtMediaStop);
		m_rtSampleTime = rtStop;
		m_rtPosition = m_rtPosition + m_rtAvgTimePerFrame;
		pms->SetSyncPoint(TRUE);
		return S_OK;
	}


	pms->GetPointer(&pData);

	USES_CONVERSION;
	ZeroMemory(pData, lDataLen);	
	{
		CAutoLock cAutoLockShared(&m_cSharedState);	
		memcpy(pData,m_pData,lDataLen);
		//hack the 1920*1088, the last 8 line should be set to 0.
		if(pFilter->GetImageHeight() == 1088)
		{
			memset(pData, 0, pFilter->GetImageWidth()*8*sizeof(RGBQUAD));
		}
		//hack the 720*576, the first and last 2 lines should be set to 0.
		if(pFilter->GetImageHeight() == 576)
		{
			memset(pData, 0, pFilter->GetImageWidth()*2*sizeof(RGBQUAD));
			memset(pData + pFilter->GetImageWidth()*(pFilter->GetImageHeight()-2)*sizeof(RGBQUAD), 0, pFilter->GetImageWidth()*2*sizeof(RGBQUAD));
		}
		REFERENCE_TIME rtStart, rtStop, rtMediaStart, rtMediaStop;
		// The sample times are modified by the current rate.
		//rtStart = static_cast<REFERENCE_TIME>(m_rtSampleTime);
		if(m_rtFirstFrameTime == 0)
		{
			m_rtFirstFrameTime = pkt.pts ;
		}
		rtStart = (pkt.pts  - m_rtFirstFrameTime)*100/9*10 - 1000;
		if(rtStart > 0 && !m_bGetAvgFrameTime)
		{
			m_rtAvgTimePerFrame = rtStart - 0;
			m_bGetAvgFrameTime = TRUE;
		}
		//Guess FPS
		if(m_bGetAvgFrameTime && !m_bFPSGuessed)
		{
			CTMReceiverSrc *pFilter = (CTMReceiverSrc *)m_pFilter;
			AVCodecContext *pCodecCtx = pFilter->m_pFormatContext->streams[pFilter->m_videoStreamIndex]->codec;
			if(pCodecCtx->time_base.den > 0 && pCodecCtx->time_base.num > 0 && pCodecCtx->ticks_per_frame > 0 && m_bGetAvgFrameTime > 0)
			{
				FPS = pCodecCtx->time_base.den / (pCodecCtx->time_base.num * pCodecCtx->ticks_per_frame * m_bGetAvgFrameTime);
			}
			m_bFPSGuessed = TRUE;
		}
		rtStart = rtStart < m_rtPosition ? rtStart : m_rtPosition;
		rtStop  = rtStart + static_cast<int>(m_rtAvgTimePerFrame );
		rtMediaStart = static_cast<REFERENCE_TIME>(m_rtPosition);
		rtMediaStop  = rtMediaStart + static_cast<int>(m_rtAvgTimePerFrame );
		pms->SetTime(&rtStart, &rtStop);
		pms->SetMediaTime(&rtMediaStart, &rtMediaStop);
		m_rtSampleTime = rtStop;
		m_rtPosition = m_rtPosition + m_rtAvgTimePerFrame; 
		//char tmp[1024];
		//sprintf(tmp," Src Filter:Channel:%d__PTS:%lld__rtStart:%lld\n", pFilter->m_relatedChannel, pkt.pts, rtStart);
		//OutputDebugStringA(tmp);
	}
	pms->SetSyncPoint(TRUE);

//For debug
//char tmp2[1024];
//sprintf(tmp2,"Channel %d__Fill Buffer Finallly %d!\n", pFilter->m_relatedChannel, frame_count);
//OutputDebugStringA(tmp2);

	av_free_packet(&pkt);

	//CallBack
	//DecodeCallback decodeCB = NULL;
	//void *pCBParam = NULL;
	//HRESULT hr = pFilter->m_pConfigManager->GetDecodeCB(pFilter->m_relatedChannel, &decodeCB, &pCBParam);
	//if(SUCCEEDED(hr) && decodeCB != NULL)
	//{
	//	decodeCB(m_pData, lDataLen, pCBParam);
	//}
	return S_OK;
}