Ejemplo n.º 1
0
bool spk2mt(Speakers spk, CMediaType &mt, int i)
{
  if (spk.format == FORMAT_SPDIF)
  {
    // SPDIF media types
    if (i < 0 || i >= 2)
      return false;

    std::auto_ptr<WAVEFORMATEX> wfe(spk2wfe(spk, 0));
    if (!wfe.get())
      return false;

    mt.SetType(&MEDIATYPE_Audio);
    mt.SetSubtype(i == 0? &MEDIASUBTYPE_DOLBY_AC3_SPDIF: &MEDIASUBTYPE_PCM);
    mt.SetFormatType(&FORMAT_WaveFormatEx);
    mt.SetFormat((BYTE*)wfe.get(), sizeof(WAVEFORMATEX) + wfe->cbSize);
    return true;
  }
  else if (FORMAT_MASK(spk.format) & FORMAT_CLASS_PCM)
  {
    // PCM media types
    std::auto_ptr<WAVEFORMATEX> wfe(spk2wfe(spk, i));
    if (!wfe.get())
      return false;

    mt.SetType(&MEDIATYPE_Audio);
    mt.SetSubtype(&MEDIASUBTYPE_PCM);
    mt.SetFormatType(&FORMAT_WaveFormatEx);
    mt.SetFormat((BYTE*)wfe.get(), sizeof(WAVEFORMATEX) + wfe->cbSize);
    return true;
  }
  else
    return false;
}
Ejemplo n.º 2
0
void CDeMultiplexer::GetAudioStreamPMT(CMediaType& pmt)
{
  // Fake audio in use
  if (m_AudioStreamType == NO_STREAM)
  {
    pmt.InitMediaType();
    pmt.SetType(&MEDIATYPE_Audio);
    pmt.SetSubtype(&MEDIASUBTYPE_DOLBY_AC3);
    pmt.SetSampleSize(1);
    pmt.SetTemporalCompression(FALSE);
    pmt.SetVariableSize();
    pmt.SetFormatType(&FORMAT_WaveFormatEx);
    pmt.SetFormat(AC3AudioFormat, sizeof(AC3AudioFormat));
  }
  else
    pmt = m_audioParser->pmt;
}
Ejemplo n.º 3
0
Packet* CClip::GenerateFakeAudio(REFERENCE_TIME rtStart)
{
  if (rtStart + FAKE_AUDIO_DURATION - 1 > playlistFirstPacketTime + clipDuration) 
    superceeded |= SUPERCEEDED_AUDIO_RETURN;
  
  if (superceeded&SUPERCEEDED_AUDIO_RETURN) 
    return NULL;
  
  if (!FakeAudioAvailable()) 
    return NULL;

  Packet* packet = new Packet();
  packet->nClipNumber = nClip;
    
  packet->SetCount(AC3_FRAME_LENGTH);
  packet->SetData(ac3_sample, AC3_FRAME_LENGTH);
  packet->rtStart = rtStart;
  packet->rtStop = packet->rtStart + 1;

  if (firstAudio)
  {
    CMediaType pmt;
    pmt.InitMediaType();
    pmt.SetType(&MEDIATYPE_Audio);
    pmt.SetSubtype(&MEDIASUBTYPE_DOLBY_AC3);
    pmt.SetSampleSize(1);
    pmt.SetTemporalCompression(FALSE);
    pmt.SetVariableSize();
    pmt.SetFormatType(&FORMAT_WaveFormatEx);
    pmt.SetFormat(AC3AudioFormat, sizeof(AC3AudioFormat));
    WAVEFORMATEXTENSIBLE* wfe = (WAVEFORMATEXTENSIBLE*)pmt.pbFormat;
    wfe->Format.nChannels = 6;
    wfe->Format.nSamplesPerSec = 48000;
    wfe->Format.wFormatTag = WAVE_FORMAT_DOLBY_AC3;

    packet->pmt = CreateMediaType(&pmt);
  }
  
  audioPlaybackPosition += FAKE_AUDIO_DURATION;
  lastAudioPosition += FAKE_AUDIO_DURATION;

  return packet;
}
HRESULT CFSTSplitterFilter::Initialize(IPin *pPin, IAsyncReader *pReader)
{
	// Check and validate the pointer
	CheckPointer(pReader, E_POINTER);
	ValidateReadPtr(pReader, sizeof(IAsyncReader));

	// Read file header
	FST_HEADER header;
	HRESULT hr = pReader->SyncRead(0, sizeof(header), (BYTE*)&header);
	if (hr != S_OK)
		return hr;

	// Verify file header
	if (header.dwID != FST_ID_2TSF)
		return VFW_E_INVALID_FILE_FORMAT;

	// Protect the filter data
	CAutoLock datalock(&m_csData);

	// Set video stream info
	m_nVideoFrames		= header.nVideoFrames;
	m_nFramesPerSecond	= header.nFramesPerSecond;

	// Allocate frame table
	m_pFrameTable = (FST_FRAME_ENTRY*)CoTaskMemAlloc(m_nVideoFrames * sizeof(FST_FRAME_ENTRY));
	if (m_pFrameTable == NULL) {
		Shutdown();
		return E_OUTOFMEMORY;
	}

	// Read in the frame table
	hr = pReader->SyncRead(sizeof(header), m_nVideoFrames * sizeof(FST_FRAME_ENTRY), (BYTE*)m_pFrameTable);
	if (hr != S_OK) {
		Shutdown();
		return hr;
	}

	// Walk the frame table and determine the maximum frame data sizes
	DWORD cbMaxImage = 0, cbMaxSound = 0;
	for (DWORD iFrame = 0; iFrame < m_nVideoFrames; iFrame++) {
		if (m_pFrameTable[iFrame].cbImage > cbMaxImage)
			cbMaxImage = m_pFrameTable[iFrame].cbImage;
		if (m_pFrameTable[iFrame].cbSound > cbMaxSound)
			cbMaxSound = m_pFrameTable[iFrame].cbSound;
	}

	// Set file positions
	m_llDefaultStart	= (LONGLONG)sizeof(FST_HEADER) + m_nVideoFrames * sizeof(FST_FRAME_ENTRY);	// Right after the header and frame table
	m_llDefaultStop		= MAXLONGLONG; // Defaults to file end

	// Decide on the input pin properties
	m_cbInputAlign	= 1;
	m_cbInputBuffer	= cbMaxImage + cbMaxSound;

	// Protect the output pins state
	CAutoLock pinlock(&m_csPins);

	// Decide on the output pins count
	m_nOutputPins = 1;	// Video is always present

	// Check if we have soundtrack
	if (
		(header.dwAudioSampleRate	!= 0) &&
		(header.nAudioBits			!= 0) &&
		(m_pFrameTable[0].cbSound	!= 0)
	)
		m_nOutputPins++;

	// Create output pins array
	ASSERT(m_ppOutputPin == NULL);
	m_ppOutputPin = new CParserOutputPin*[m_nOutputPins];
	if (m_ppOutputPin == NULL) {
		m_nOutputPins = 0;
		Shutdown();
		return E_OUTOFMEMORY;
	}

	// Reset the output pin array elements to NULLs
	for (int i = 0; i < m_nOutputPins; i++)
		m_ppOutputPin[i] = NULL;

	// Allocate video media type
	CMediaType *pmtVideo = new CMediaType();
	if (pmtVideo == NULL) {
		Shutdown();
		return E_OUTOFMEMORY;
	}

	// Initialize the video media type
	pmtVideo->InitMediaType();
	pmtVideo->SetType(&MEDIATYPE_Video);
	pmtVideo->SetSubtype(&MEDIASUBTYPE_FSTVideo);
	pmtVideo->SetSampleSize(0);
	pmtVideo->SetTemporalCompression(TRUE);
	pmtVideo->SetFormatType(&FORMAT_FSTVideo);
	if (!pmtVideo->SetFormat((BYTE*)&header, sizeof(header))) {
		delete pmtVideo;
		Shutdown();
		return E_FAIL;
	}

	// Allocate the video allocator properties
	ALLOCATOR_PROPERTIES *papVideo = (ALLOCATOR_PROPERTIES*)CoTaskMemAlloc(sizeof(ALLOCATOR_PROPERTIES));
	if (papVideo == NULL) {
		delete pmtVideo;
		Shutdown();
		return E_OUTOFMEMORY;
	}

	// Set the video allocator properties
	papVideo->cbAlign	= 0;	// No matter
	papVideo->cbPrefix	= 0;	// No matter
	papVideo->cBuffers	= 4;	// TEMP: No need to set larger value?
	papVideo->cbBuffer	= cbMaxImage;

	// Allocate time formats array. If we fail here, it's not an error, 
	// we'll just set zero seeker parameters and may proceed
	DWORD dwVideoCapabilities = 0;
	int nVideoTimeFormats = 0;
	GUID *pVideoTimeFormats = (GUID*)CoTaskMemAlloc(3 * sizeof(GUID));
	if (pVideoTimeFormats) {

		nVideoTimeFormats = 3;

		// Fill in the time formats array
		pVideoTimeFormats[0] = TIME_FORMAT_MEDIA_TIME;
		pVideoTimeFormats[1] = TIME_FORMAT_FRAME;
		pVideoTimeFormats[2] = TIME_FORMAT_SAMPLE;

		dwVideoCapabilities =	AM_SEEKING_CanGetCurrentPos	|
								AM_SEEKING_CanGetStopPos	|
								AM_SEEKING_CanGetDuration;
	}

	// Create video output pin (always the first one!)
	hr = NOERROR;
	m_ppOutputPin[0] = new CParserOutputPin(
		NAME("FST Splitter Video Output Pin"),
		this,
		&m_csFilter,
		pmtVideo,
		papVideo,
		dwVideoCapabilities,
		nVideoTimeFormats,
		pVideoTimeFormats,
		&hr,
		wszFSTVideoOutputName
	);
	if (
		(FAILED(hr)) ||
		(m_ppOutputPin[0] == NULL)
	) {
		if (m_ppOutputPin[0]) {
			delete m_ppOutputPin[0];
			m_ppOutputPin[0] = NULL;
		} else {
			delete pmtVideo;
			CoTaskMemFree(papVideo);
			if (pVideoTimeFormats)
				CoTaskMemFree(pVideoTimeFormats);
		}
		Shutdown();

		if (FAILED(hr))
			return hr;
		else
			return E_OUTOFMEMORY;
	}
	// Hold a reference on the video output pin
	m_ppOutputPin[0]->AddRef();

	// We've created a new pin -- so increment pin version
	IncrementPinVersion();

	if (m_nOutputPins > 1) {

		// Allocate audio media type
		CMediaType *pmtAudio = new CMediaType();
		if (pmtAudio == NULL) {
			Shutdown();
			return E_OUTOFMEMORY;
		}

		// Initialize the audio media type
		pmtAudio->InitMediaType();
		pmtAudio->SetType(&MEDIATYPE_Audio);
		pmtAudio->SetSubtype(&MEDIASUBTYPE_PCM);
		pmtAudio->SetSampleSize(header.nAudioBits * (header.wAudioChannels + 1) / 8);
		pmtAudio->SetTemporalCompression(FALSE);
		pmtAudio->SetFormatType(&FORMAT_WaveFormatEx);
		WAVEFORMATEX *pFormat = (WAVEFORMATEX*)pmtAudio->AllocFormatBuffer(sizeof(WAVEFORMATEX));
		if (pFormat == NULL) {
			delete pmtAudio;
			Shutdown();
			return E_OUTOFMEMORY;
		}

		// Fill in the audio format block
		pFormat->wFormatTag			= WAVE_FORMAT_PCM;
		pFormat->nChannels			= header.wAudioChannels + 1; // TEMP: Is it really so?
		pFormat->nSamplesPerSec		= header.dwAudioSampleRate;
		pFormat->nAvgBytesPerSec	= pFormat->nChannels * header.nAudioBits * header.dwAudioSampleRate / 8;
		pFormat->nBlockAlign		= pFormat->nChannels * header.nAudioBits / 8;
		pFormat->wBitsPerSample		= header.nAudioBits;
		pFormat->cbSize				= 0;

		// Allocate the audio allocator properties
		ALLOCATOR_PROPERTIES *papAudio = (ALLOCATOR_PROPERTIES*)CoTaskMemAlloc(sizeof(ALLOCATOR_PROPERTIES));
		if (papAudio == NULL) {
			delete pmtAudio;
			Shutdown();
			return E_OUTOFMEMORY;
		}

		// Set the audio allocator properties
		papAudio->cbAlign	= 0;	// No matter
		papAudio->cbPrefix	= 0;	// No matter
		papAudio->cBuffers	= 4;	// No use to set different from video value
		papAudio->cbBuffer	= cbMaxSound;

		// Set the wave format parameters needed for the calculation
		// of sample stream and media duration
		m_nSampleSize		= pFormat->nBlockAlign;
		m_nAvgBytesPerSec	= pFormat->nAvgBytesPerSec;

		// Allocate time formats array. If we fail here, it's not an error, 
		// we'll just set zero seeker parameters and may proceed
		DWORD dwAudioCapabilities = 0;
		int nAudioTimeFormats = 0;
		GUID *pAudioTimeFormats = (GUID*)CoTaskMemAlloc(3 * sizeof(GUID));
		if (pAudioTimeFormats) {

			nAudioTimeFormats = 3;

			// Fill in the time formats array
			pAudioTimeFormats[0] = TIME_FORMAT_MEDIA_TIME;
			pAudioTimeFormats[1] = TIME_FORMAT_SAMPLE;
			pAudioTimeFormats[2] = TIME_FORMAT_BYTE;

			dwAudioCapabilities	=	AM_SEEKING_CanGetCurrentPos	|
									AM_SEEKING_CanGetStopPos	|
									AM_SEEKING_CanGetDuration;
		}

		// Create audio output pin
		hr = NOERROR;
		m_ppOutputPin[1] = new CParserOutputPin(
			NAME("FST Splitter Audio Output Pin"),
			this,
			&m_csFilter,
			pmtAudio,
			papAudio,
			dwAudioCapabilities,
			nAudioTimeFormats,
			pAudioTimeFormats,
			&hr,
			wszFSTAudioOutputName
		);
		if (
			(FAILED(hr)) ||
			(m_ppOutputPin[1] == NULL)
		) {
			if (m_ppOutputPin[1]) {
				delete m_ppOutputPin[1];
				m_ppOutputPin[1] = NULL;
			} else {
				delete pmtAudio;
				CoTaskMemFree(papAudio);
				if (pAudioTimeFormats)
					CoTaskMemFree(pAudioTimeFormats);
			}
			Shutdown();

			if (FAILED(hr))
				return hr;
			else
				return E_OUTOFMEMORY;
		}
		// Hold a reference on the audio output pin
		m_ppOutputPin[1]->AddRef();

		// We've created a new pin -- so increment pin version
		IncrementPinVersion();
	}

	// Scope for the locking
	{
		// Protect media content information
		CAutoLock infolock(&m_csInfo);

		// Set the media content strings
		m_wszAuthorName = (OLECHAR*)CoTaskMemAlloc(sizeof(OLECHAR) * (lstrlenW(wszFSTAuthorName) + 1));
		if (m_wszAuthorName)
			lstrcpyW(m_wszAuthorName, wszFSTAuthorName);
		m_wszDescription = (OLECHAR*)CoTaskMemAlloc(sizeof(OLECHAR) * (lstrlenW(wszFSTDescription) + 1));
		if (m_wszDescription)
			lstrcpyW(m_wszDescription, wszFSTDescription);
	}

	return NOERROR;
}
Ejemplo n.º 5
0
HRESULT XnVideoStream::GetStreamCapability(int iIndex, CMediaType& mediaType, VIDEO_STREAM_CONFIG_CAPS& vscc)
{
	// check bounds
	if(iIndex < 0 || iIndex >= int(m_aSupportedModes.GetSize()))
	{
		xnLogVerbose(XN_MASK_FILTER, "GetStreamCapability() - Index %d is out of bounds!", iIndex);
		return S_FALSE;
	}

	VIDEOINFO *pvi = (VIDEOINFO*)mediaType.AllocFormatBuffer(sizeof(VIDEOINFO));
	if(NULL == pvi)
		return(E_OUTOFMEMORY);

	ZeroMemory(pvi, sizeof(VIDEOINFO));

	int xRes = m_aSupportedModes[iIndex].OutputMode.nXRes;
	int yRes = m_aSupportedModes[iIndex].OutputMode.nYRes;

	XnUInt64 nFrameTime = 10000000 / m_aSupportedModes[iIndex].OutputMode.nFPS;

	if (m_aSupportedModes[iIndex].Format == XN_PIXEL_FORMAT_RGB24)
	{
		pvi->bmiHeader.biCompression = BI_RGB;
	}
	else if (m_aSupportedModes[iIndex].Format == XN_PIXEL_FORMAT_MJPEG)
	{
		pvi->bmiHeader.biCompression = 'GPJM';
	}
	else
	{
		xnLogError(XN_MASK_FILTER, "Unknown format type!");
		return E_UNEXPECTED;
	}

	pvi->bmiHeader.biBitCount	= 24;
	pvi->bmiHeader.biSize       = sizeof(BITMAPINFOHEADER);
	pvi->bmiHeader.biWidth      = xRes;
	pvi->bmiHeader.biHeight     = yRes;
	pvi->bmiHeader.biPlanes     = 1;
	pvi->bmiHeader.biSizeImage  = GetBitmapSize(&pvi->bmiHeader);
	pvi->bmiHeader.biClrImportant = 0;

	SetRectEmpty(&(pvi->rcSource)); // we want the whole image area rendered.
	SetRectEmpty(&(pvi->rcTarget)); // no particular destination rectangle

	pvi->dwBitRate = 
		GetBitmapSize(&pvi->bmiHeader) * // bytes per frame
		m_aSupportedModes[iIndex].OutputMode.nFPS * // frames per second
		8; // bits per byte

	pvi->dwBitErrorRate = 0; // assume no errors
	pvi->AvgTimePerFrame = nFrameTime;

	mediaType.SetType(&MEDIATYPE_Video);
	mediaType.SetFormatType(&FORMAT_VideoInfo);
	mediaType.SetTemporalCompression(FALSE);

	// Work out the GUID for the subtype from the header info.
	const GUID SubTypeGUID = GetBitmapSubtype(&pvi->bmiHeader);
	mediaType.SetSubtype(&SubTypeGUID);
	mediaType.SetSampleSize(pvi->bmiHeader.biSizeImage);

	vscc.guid = FORMAT_VideoInfo;
	vscc.VideoStandard = AnalogVideo_None;
	vscc.InputSize.cx = xRes;
	vscc.InputSize.cy = yRes;
	vscc.MinCroppingSize.cx = xRes;
	vscc.MinCroppingSize.cy = yRes;
	vscc.MaxCroppingSize.cx = xRes;
	vscc.MaxCroppingSize.cy = yRes;
	vscc.CropGranularityX = 1;
	vscc.CropGranularityY = 1;
	vscc.CropAlignX = 1;
	vscc.CropAlignY = 1;

	vscc.MinOutputSize.cx = xRes;
	vscc.MinOutputSize.cy = yRes;
	vscc.MaxOutputSize.cx = xRes;
	vscc.MaxOutputSize.cy = yRes;
	vscc.OutputGranularityX = 1;
	vscc.OutputGranularityY = 1;
	vscc.StretchTapsX = 0;
	vscc.StretchTapsY = 0;
	vscc.ShrinkTapsX = 0;
	vscc.ShrinkTapsY = 0;
	// Frame interval is in 100 nanosecond units
	vscc.MinFrameInterval = nFrameTime;
	vscc.MaxFrameInterval = nFrameTime;
	vscc.MinBitsPerSecond = 
		mediaType.GetSampleSize() * // bytes in frame
		m_aSupportedModes[iIndex].OutputMode.nFPS * // frames per second
		8; // bits per byte
	vscc.MaxBitsPerSecond = vscc.MinBitsPerSecond;

	return S_OK;
}
Ejemplo n.º 6
0
HRESULT CConvert::AddVideoGroup(double dSourceFramerate, long nSourceWidth, long nSourceHeight)
{

    // make the root group/composition
	HRESULT hr = S_OK;
    hr = m_pTimeline->CreateEmptyNode(&m_pVideoGroupObj, TIMELINE_MAJOR_TYPE_GROUP);
    if(FAILED( hr )) 
    {
        return hr;
    }

	CComQIPtr<IAMTimelineGroup, &IID_IAMTimelineGroup> pVideoGroup(m_pVideoGroupObj);
 
	//// Set Media Type
	CMediaType VideoGroupType;
	VideoGroupType.SetType(&MEDIATYPE_Video);
	VideoGroupType.SetSubtype(&MEDIASUBTYPE_RGB24);
	VideoGroupType.SetFormatType(&FORMAT_VideoInfo);
	
	VIDEOINFOHEADER *pVideoHeader = (VIDEOINFOHEADER*)VideoGroupType.AllocFormatBuffer(sizeof(VIDEOINFOHEADER));
	ZeroMemory(pVideoHeader, sizeof(VIDEOINFOHEADER));
	
	pVideoHeader->bmiHeader.biBitCount = 24;
	pVideoHeader->bmiHeader.biWidth = nSourceWidth;
	pVideoHeader->bmiHeader.biHeight = nSourceHeight;
	pVideoHeader->bmiHeader.biPlanes = 1;
	pVideoHeader->bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
	pVideoHeader->bmiHeader.biSizeImage = DIBSIZE(pVideoHeader->bmiHeader);

	VideoGroupType.SetSampleSize(DIBSIZE(pVideoHeader->bmiHeader));
	
	hr = pVideoGroup->SetMediaType(&VideoGroupType);
    if(FAILED( hr )) 
    {
        return hr;
    }

	
//	double dRequiredInputFramerate = 0;
//	m_pMPEGWriterProps->SetSourceFramerate(dSourceFramerate, &dRequiredInputFramerate);

// 	hr = pVideoGroup->SetOutputFPS(15);


	/*
	if (GetOutputFPS() != 0)
	{
		// the user set a framerate
		hr = pVideoGroup->SetOutputFPS(GetOutputFPS());
		GetMPEGWriterProps()->OverrideSourceFPS(GetOutputFPS());
	}
	else if (IsFrameRateSupported((float)dSourceFramerate))
	{
		// the user did not set a framerate. If the source
		// framerate is supported, we use it.
		hr = pVideoGroup->SetOutputFPS(dSourceFramerate);
		GetMPEGWriterProps()->OverrideSourceFPS((float)dSourceFramerate);
	}
	else
	{
		// the user did not want a framerate, and the framerate
		// of the file is not supported. We use 25fps
		hr = pVideoGroup->SetOutputFPS(25);
		GetMPEGWriterProps()->OverrideSourceFPS(25);
	}
	*/

	hr = m_pTimeline->AddGroup(m_pVideoGroupObj);

	return hr;
}
Ejemplo n.º 7
0
const bool CMediaViewer::OpenViewer(HWND hOwnerHwnd, HWND hMessageDrainHwnd,
			CVideoRenderer::RendererType RendererType,
			LPCWSTR pszVideoDecoder, LPCWSTR pszAudioDevice)
{
	CTryBlockLock Lock(&m_DecoderLock);
	if (!Lock.TryLock(LOCK_TIMEOUT)) {
		SetError(TEXT("タイムアウトエラーです。"));
		return false;
	}

	if (m_bInit) {
		SetError(TEXT("既にフィルタグラフが構築されています。"));
		return false;
	}

	TRACE(TEXT("CMediaViewer::OpenViewer() フィルタグラフ作成開始\n"));

	HRESULT hr=S_OK;

	IPin *pOutput=NULL;
	IPin *pOutputVideo=NULL;
	IPin *pOutputAudio=NULL;

	try {
		// フィルタグラフマネージャを構築する
		hr=::CoCreateInstance(CLSID_FilterGraph,NULL,CLSCTX_INPROC_SERVER,
				IID_IGraphBuilder,pointer_cast<LPVOID*>(&m_pFilterGraph));
		if (FAILED(hr)) {
			throw CBonException(hr,TEXT("フィルタグラフマネージャを作成できません。"));
		}
#ifdef _DEBUG
		AddToRot(m_pFilterGraph, &m_dwRegister);
#endif

		// IMediaControlインタフェースのクエリー
		hr=m_pFilterGraph->QueryInterface(IID_IMediaControl, pointer_cast<void**>(&m_pMediaControl));
		if (FAILED(hr)) {
			throw CBonException(hr,TEXT("メディアコントロールを取得できません。"));
		}

		Trace(TEXT("ソースフィルタの接続中..."));

		/* CBonSrcFilter */
		{
			// インスタンス作成
			m_pSrcFilter = static_cast<CBonSrcFilter*>(CBonSrcFilter::CreateInstance(NULL, &hr));
			if (m_pSrcFilter == NULL || FAILED(hr))
				throw CBonException(hr, TEXT("ソースフィルタを作成できません。"));
			m_pSrcFilter->SetOutputWhenPaused(RendererType == CVideoRenderer::RENDERER_DEFAULT);
			// フィルタグラフに追加
			hr = m_pFilterGraph->AddFilter(m_pSrcFilter, L"BonSrcFilter");
			if (FAILED(hr))
				throw CBonException(hr, TEXT("ソースフィルタをフィルタグラフに追加できません。"));
			// 出力ピンを取得
			pOutput = DirectShowUtil::GetFilterPin(m_pSrcFilter, PINDIR_OUTPUT);
			if (pOutput==NULL)
				throw CBonException(TEXT("ソースフィルタの出力ピンを取得できません。"));
			m_pSrcFilter->EnableSync(m_bEnablePTSSync);
		}

		Trace(TEXT("MPEG-2 Demultiplexerフィルタの接続中..."));

		/* MPEG-2 Demultiplexer */
		{
			CMediaType MediaTypeVideo;
			CMediaType MediaTypeAudio;
			IMpeg2Demultiplexer *pMpeg2Demuxer;

			hr=::CoCreateInstance(CLSID_MPEG2Demultiplexer,NULL,
					CLSCTX_INPROC_SERVER,IID_IBaseFilter,
					pointer_cast<LPVOID*>(&m_pMp2DemuxFilter));
			if (FAILED(hr))
				throw CBonException(hr,TEXT("MPEG-2 Demultiplexerフィルタを作成できません。"),
									TEXT("MPEG-2 Demultiplexerフィルタがインストールされているか確認してください。"));
			hr=DirectShowUtil::AppendFilterAndConnect(m_pFilterGraph,
								m_pMp2DemuxFilter,L"Mpeg2Demuxer",&pOutput);
			if (FAILED(hr))
				throw CBonException(hr,TEXT("MPEG-2 Demultiplexerをフィルタグラフに追加できません。"));
			// この時点でpOutput==NULLのはずだが念のため
			SAFE_RELEASE(pOutput);

			// IMpeg2Demultiplexerインタフェースのクエリー
			hr=m_pMp2DemuxFilter->QueryInterface(IID_IMpeg2Demultiplexer,
												 pointer_cast<void**>(&pMpeg2Demuxer));
			if (FAILED(hr))
				throw CBonException(hr,TEXT("MPEG-2 Demultiplexerインターフェースを取得できません。"),
									TEXT("互換性のないスプリッタの優先度がMPEG-2 Demultiplexerより高くなっている可能性があります。"));

			// 映像メディアフォーマット設定
			hr = SetVideoMediaType(&MediaTypeVideo, 1920, 1080);
			if (FAILED(hr))
				throw CBonException(TEXT("メモリが確保できません。"));
			// 映像出力ピン作成
			hr = pMpeg2Demuxer->CreateOutputPin(&MediaTypeVideo, L"Video", &pOutputVideo);
			if (FAILED(hr)) {
				pMpeg2Demuxer->Release();
				throw CBonException(hr, TEXT("MPEG-2 Demultiplexerの映像出力ピンを作成できません。"));
			}
			// 音声メディアフォーマット設定
			MediaTypeAudio.InitMediaType();
			MediaTypeAudio.SetType(&MEDIATYPE_Audio);
			MediaTypeAudio.SetSubtype(&MEDIASUBTYPE_NULL);
			MediaTypeAudio.SetVariableSize();
			MediaTypeAudio.SetTemporalCompression(TRUE);
			MediaTypeAudio.SetSampleSize(0);
			MediaTypeAudio.SetFormatType(&FORMAT_None);
			// 音声出力ピン作成
			hr=pMpeg2Demuxer->CreateOutputPin(&MediaTypeAudio,L"Audio",&pOutputAudio);
			pMpeg2Demuxer->Release();
			if (FAILED(hr))
				throw CBonException(hr,TEXT("MPEG-2 Demultiplexerの音声出力ピンを作成できません。"));
			// 映像出力ピンのIMPEG2PIDMapインタフェースのクエリー
			hr=pOutputVideo->QueryInterface(__uuidof(IMPEG2PIDMap),pointer_cast<void**>(&m_pMp2DemuxVideoMap));
			if (FAILED(hr))
				throw CBonException(hr,TEXT("映像出力ピンのIMPEG2PIDMapを取得できません。"));
			// 音声出力ピンのIMPEG2PIDMapインタフェースのクエリ
			hr=pOutputAudio->QueryInterface(__uuidof(IMPEG2PIDMap),pointer_cast<void**>(&m_pMp2DemuxAudioMap));
			if (FAILED(hr))
				throw CBonException(hr,TEXT("音声出力ピンのIMPEG2PIDMapを取得できません。"));
		}

#ifndef BONTSENGINE_H264_SUPPORT
		Trace(TEXT("MPEG-2パーサフィルタの接続中..."));

		/* CMpeg2ParserFilter */
		{
			// インスタンス作成
			m_pMpeg2Parser = static_cast<CMpeg2ParserFilter*>(CMpeg2ParserFilter::CreateInstance(NULL, &hr));
			if ((!m_pMpeg2Parser) || FAILED(hr))
				throw CBonException(hr,TEXT("MPEG-2パーサフィルタを作成できません。"));
			m_pMpeg2Parser->SetVideoInfoCallback(OnVideoInfo,this);
			// madVR は映像サイズの変化時に MediaType を設定しないと新しいサイズが適用されない
			m_pMpeg2Parser->SetAttachMediaType(RendererType==CVideoRenderer::RENDERER_madVR);
			// フィルタの追加と接続
			hr=DirectShowUtil::AppendFilterAndConnect(
				m_pFilterGraph,m_pMpeg2Parser,L"Mpeg2ParserFilter",&pOutputVideo);
			if (FAILED(hr))
				throw CBonException(hr,TEXT("MPEG-2パーサフィルタをフィルタグラフに追加できません。"));
		}
#else
		Trace(TEXT("H.264パーサフィルタの接続中..."));

		/* CH264ParserFilter */
		{
			// インスタンス作成
			m_pH264Parser = static_cast<CH264ParserFilter*>(CH264ParserFilter::CreateInstance(NULL, &hr));
			if ((!m_pH264Parser) || FAILED(hr))
				throw CBonException(TEXT("H.264パーサフィルタを作成できません。"));
			m_pH264Parser->SetVideoInfoCallback(OnVideoInfo,this);
			m_pH264Parser->SetAdjustTime(m_bAdjustVideoSampleTime);
			m_pH264Parser->SetAdjustFrameRate(m_bAdjustFrameRate);
			// madVR は映像サイズの変化時に MediaType を設定しないと新しいサイズが適用されない
			m_pH264Parser->SetAttachMediaType(RendererType==CVideoRenderer::RENDERER_madVR);
			// フィルタの追加と接続
			hr=DirectShowUtil::AppendFilterAndConnect(
				m_pFilterGraph,m_pH264Parser,L"H264ParserFilter",&pOutputVideo);
			if (FAILED(hr))
				throw CBonException(hr,TEXT("H.264パーサフィルタをフィルタグラフに追加できません。"));
		}
#endif	// BONTSENGINE_H264_SUPPORT

		Trace(TEXT("音声デコーダの接続中..."));

#if 1
		/* CAudioDecFilter */
		{
			// CAudioDecFilterインスタンス作成
			m_pAudioDecoder = static_cast<CAudioDecFilter*>(CAudioDecFilter::CreateInstance(NULL, &hr));
			if (!m_pAudioDecoder || FAILED(hr))
				throw CBonException(hr,TEXT("音声デコーダフィルタを作成できません。"));
			// フィルタの追加と接続
			hr=DirectShowUtil::AppendFilterAndConnect(
				m_pFilterGraph,m_pAudioDecoder,L"AudioDecFilter",&pOutputAudio);
			if (FAILED(hr))
				throw CBonException(hr,TEXT("音声デコーダフィルタをフィルタグラフに追加できません。"));

			m_pAudioDecoder->SetJitterCorrection(m_bAdjustAudioStreamTime);
			if (m_pAudioStreamCallback)
				m_pAudioDecoder->SetStreamCallback(m_pAudioStreamCallback,
												   m_pAudioStreamCallbackParam);
		}
#else
		/*
			外部AACデコーダを利用すると、チャンネル数が切り替わった際に音が出なくなる、
			デュアルモノラルがステレオとして再生される、といった問題が出る
		*/

		/* CAacParserFilter */
		{
			CAacParserFilter *m_pAacParser;
			// CAacParserFilterインスタンス作成
			m_pAacParser=static_cast<CAacParserFilter*>(CAacParserFilter::CreateInstance(NULL, &hr));
			if (!m_pAacParser || FAILED(hr))
				throw CBonException(hr,TEXT("AACパーサフィルタを作成できません。"));
			// フィルタの追加と接続
			hr=DirectShowUtil::AppendFilterAndConnect(
				m_pFilterGraph,m_pAacParser,L"AacParserFilter",&pOutputAudio);
			if (FAILED(hr))
				throw CBonException(TEXT("AACパーサフィルタをフィルタグラフに追加できません。"));
			m_pAacParser->Release();
		}

		/* AACデコーダー */
		{
			CDirectShowFilterFinder FilterFinder;

			// 検索
			if(!FilterFinder.FindFilter(&MEDIATYPE_Audio,&MEDIASUBTYPE_AAC))
				throw CBonException(TEXT("AACデコーダが見付かりません。"),
									TEXT("AACデコーダがインストールされているか確認してください。"));

			WCHAR szAacDecoder[128];
			CLSID idAac;
			bool bConnectSuccess=false;
			IBaseFilter *pAacDecFilter=NULL;

			for (int i=0;i<FilterFinder.GetFilterCount();i++){
				if (FilterFinder.GetFilterInfo(i,&idAac,szAacDecoder,128)) {
					if (pszAudioDecoder!=NULL && pszAudioDecoder[0]!='\0'
							&& ::lstrcmpi(szAacDecoder,pszAudioDecoder)!=0)
						continue;
					hr=DirectShowUtil::AppendFilterAndConnect(m_pFilterGraph,
							idAac,szAacDecoder,&pAacDecFilter,
							&pOutputAudio);
					if (SUCCEEDED(hr)) {
						TRACE(TEXT("AAC decoder connected : %s\n"),szAacDecoder);
						bConnectSuccess=true;
						break;
					}
				}
			}
			// どれかのフィルタで接続できたか
			if (bConnectSuccess) {
				SAFE_RELEASE(pAacDecFilter);
				//m_pszAacDecoderName=StdUtil::strdup(szAacDecoder);
			} else {
				throw CBonException(TEXT("AACデコーダフィルタをフィルタグラフに追加できません。"),
									TEXT("設定で有効なAACデコーダが選択されているか確認してください。"));
			}
		}
#endif

		/* ユーザー指定の音声フィルタの接続 */
		if (m_pszAudioFilterName) {
			Trace(TEXT("音声フィルタの接続中..."));

			// 検索
			bool bConnectSuccess=false;
			CDirectShowFilterFinder FilterFinder;
			if (FilterFinder.FindFilter(&MEDIATYPE_Audio,&MEDIASUBTYPE_PCM)) {
				WCHAR szAudioFilter[128];
				CLSID idAudioFilter;

				for (int i=0;i<FilterFinder.GetFilterCount();i++) {
					if (FilterFinder.GetFilterInfo(i,&idAudioFilter,szAudioFilter,128)) {
						if (::lstrcmpi(m_pszAudioFilterName,szAudioFilter)!=0)
							continue;
						hr=DirectShowUtil::AppendFilterAndConnect(m_pFilterGraph,
								idAudioFilter,szAudioFilter,&m_pAudioFilter,
								&pOutputAudio,NULL,true);
						if (SUCCEEDED(hr)) {
							TRACE(TEXT("音声フィルタ接続 : %s\n"),szAudioFilter);
							bConnectSuccess=true;
						}
						break;
					}
				}
			}
			if (!bConnectSuccess) {
				throw CBonException(hr,
					TEXT("音声フィルタをフィルタグラフに追加できません。"),
					TEXT("音声フィルタが利用できないか、音声デバイスに対応していない可能性があります。"));
			}
		}

#ifndef BONTSENGINE_H264_SUPPORT
		Trace(TEXT("MPEG-2デコーダの接続中..."));

		/* Mpeg2デコーダー */
		{
			CDirectShowFilterFinder FilterFinder;

			// 検索
			if(!FilterFinder.FindFilter(&MEDIATYPE_Video,&MEDIASUBTYPE_MPEG2_VIDEO))
				throw CBonException(TEXT("MPEG-2デコーダが見付かりません。"),
									TEXT("MPEG-2デコーダがインストールされているか確認してください。"));

			WCHAR szMpeg2Decoder[128];
			CLSID idMpeg2Vid;
			bool bConnectSuccess=false;

			for (int i=0;i<FilterFinder.GetFilterCount();i++){
				if (FilterFinder.GetFilterInfo(i,&idMpeg2Vid,szMpeg2Decoder,128)) {
					if (pszVideoDecoder!=NULL && pszVideoDecoder[0]!='\0'
							&& ::lstrcmpi(szMpeg2Decoder,pszVideoDecoder)!=0)
						continue;
					hr=DirectShowUtil::AppendFilterAndConnect(m_pFilterGraph,
							idMpeg2Vid,szMpeg2Decoder,&m_pVideoDecoderFilter,
							&pOutputVideo,NULL,true);
					if (SUCCEEDED(hr)) {
						bConnectSuccess=true;
						break;
					}
				}
			}
			// どれかのフィルタで接続できたか
			if (bConnectSuccess) {
				m_pszVideoDecoderName=StdUtil::strdup(szMpeg2Decoder);
			} else {
				throw CBonException(hr,TEXT("MPEG-2デコーダフィルタをフィルタグラフに追加できません。"),
					TEXT("設定で有効なMPEG-2デコーダが選択されているか確認してください。\nまた、レンダラを変えてみてください。"));
			}
		}

#ifndef MPEG2PARSERFILTER_INPLACE
		/*
			CyberLinkのデコーダとデフォルトレンダラの組み合わせで
			1080x1080(4:3)の映像が正方形に表示される問題に対応
			…しようと思ったが変になるので保留
		*/
		if (::StrStrI(m_pszVideoDecoderName, TEXT("CyberLink")) != NULL)
			m_pMpeg2Parser->SetFixSquareDisplay(true);
#endif
#else	// ndef BONTSENGINE_H264_SUPPORT
		Trace(TEXT("H.264デコーダの接続中..."));

		/* H.264デコーダー */
		{
			CDirectShowFilterFinder FilterFinder;

			// 検索
			if(!FilterFinder.FindFilter(&MEDIATYPE_Video,&MEDIASUBTYPE_H264))
				throw CBonException(TEXT("H.264デコーダが見付かりません。"),
									TEXT("H.264デコーダがインストールされているか確認してください。"));

			WCHAR szH264Decoder[128];
			CLSID idH264Decoder;
			bool bConnectSuccess=false;

			for (int i=0;i<FilterFinder.GetFilterCount();i++){
				if (FilterFinder.GetFilterInfo(i,&idH264Decoder,szH264Decoder,128)) {
					if (pszVideoDecoder!=NULL && pszVideoDecoder[0]!='\0'
							&& ::lstrcmpi(szH264Decoder,pszVideoDecoder)!=0)
						continue;
					hr=DirectShowUtil::AppendFilterAndConnect(m_pFilterGraph,
							idH264Decoder,szH264Decoder,&m_pVideoDecoderFilter,
							&pOutputVideo,NULL,true);
					if (SUCCEEDED(hr)) {
						bConnectSuccess=true;
						break;
					}
				}
			}
			// どれかのフィルタで接続できたか
			if (bConnectSuccess) {
				m_pszVideoDecoderName=StdUtil::strdup(szH264Decoder);
			} else {
				throw CBonException(hr,TEXT("H.264デコーダフィルタをフィルタグラフに追加できません。"),
					TEXT("設定で有効なH.264デコーダが選択されているか確認してください。\nまた、レンダラを変えてみてください。"));
			}
		}
#endif	// BONTSENGINE_H264_SUPPORT

		Trace(TEXT("映像レンダラの構築中..."));

		if (!CVideoRenderer::CreateRenderer(RendererType,&m_pVideoRenderer)) {
			throw CBonException(TEXT("映像レンダラを作成できません。"),
								TEXT("設定で有効なレンダラが選択されているか確認してください。"));
		}
		if (!m_pVideoRenderer->Initialize(m_pFilterGraph,pOutputVideo,
										  hOwnerHwnd,hMessageDrainHwnd)) {
			throw CBonException(m_pVideoRenderer->GetLastErrorException());
		}
		m_VideoRendererType=RendererType;

		Trace(TEXT("音声レンダラの構築中..."));

		// 音声レンダラ構築
		{
			bool fOK = false;

			if (pszAudioDevice != NULL && pszAudioDevice[0] != '\0') {
				CDirectShowDeviceEnumerator DevEnum;

				if (DevEnum.CreateFilter(CLSID_AudioRendererCategory,
										 pszAudioDevice, &m_pAudioRenderer)) {
					m_pszAudioRendererName=StdUtil::strdup(pszAudioDevice);
					fOK = true;
				}
			}
			if (!fOK) {
				hr = ::CoCreateInstance(CLSID_DSoundRender, NULL,
										CLSCTX_INPROC_SERVER, IID_IBaseFilter,
										pointer_cast<LPVOID*>(&m_pAudioRenderer));
				if (SUCCEEDED(hr)) {
					m_pszAudioRendererName=StdUtil::strdup(TEXT("DirectSound Renderer"));
					fOK = true;
				}
			}
			if (fOK) {
				hr = DirectShowUtil::AppendFilterAndConnect(m_pFilterGraph,
						m_pAudioRenderer, L"Audio Renderer", &pOutputAudio);
				if (SUCCEEDED(hr)) {
#ifdef _DEBUG
					if (pszAudioDevice != NULL && pszAudioDevice[0] != '\0')
						TRACE(TEXT("音声デバイス %s を接続\n"), pszAudioDevice);
#endif
					if (m_bUseAudioRendererClock) {
						IMediaFilter *pMediaFilter;

						if (SUCCEEDED(m_pFilterGraph->QueryInterface(IID_IMediaFilter,
								pointer_cast<void**>(&pMediaFilter)))) {
							IReferenceClock *pReferenceClock;

							if (SUCCEEDED(m_pAudioRenderer->QueryInterface(IID_IReferenceClock,
									pointer_cast<void**>(&pReferenceClock)))) {
								pMediaFilter->SetSyncSource(pReferenceClock);
								pReferenceClock->Release();
								TRACE(TEXT("グラフのクロックに音声レンダラを選択\n"));
							}
							pMediaFilter->Release();
						}
					}
					fOK = true;
				} else {
					fOK = false;
				}
				if (!fOK) {
					hr = m_pFilterGraph->Render(pOutputAudio);
					if (FAILED(hr))
						throw CBonException(hr, TEXT("音声レンダラを接続できません。"),
							TEXT("設定で有効な音声デバイスが選択されているか確認してください。"));
				}
			} else {
				// 音声デバイスが無い?
				// Nullレンダラを繋げておく
				hr = ::CoCreateInstance(CLSID_NullRenderer, NULL,
										CLSCTX_INPROC_SERVER, IID_IBaseFilter,
										pointer_cast<LPVOID*>(&m_pAudioRenderer));
				if (SUCCEEDED(hr)) {
					hr = DirectShowUtil::AppendFilterAndConnect(m_pFilterGraph,
						m_pAudioRenderer, L"Null Audio Renderer", &pOutputAudio);
					if (FAILED(hr)) {
						throw CBonException(hr, TEXT("Null音声レンダラを接続できません。"));
					}
					m_pszAudioRendererName=StdUtil::strdup(TEXT("Null Renderer"));
					TRACE(TEXT("Nullレンダラを接続\n"));
				}
			}
		}

		/*
			デフォルトでMPEG-2 Demultiplexerがグラフのクロックに
			設定されるらしいが、一応設定しておく
		*/
		if (!m_bUseAudioRendererClock) {
			IMediaFilter *pMediaFilter;

			if (SUCCEEDED(m_pFilterGraph->QueryInterface(
					IID_IMediaFilter,pointer_cast<void**>(&pMediaFilter)))) {
				IReferenceClock *pReferenceClock;

				if (SUCCEEDED(m_pMp2DemuxFilter->QueryInterface(
						IID_IReferenceClock,pointer_cast<void**>(&pReferenceClock)))) {
					pMediaFilter->SetSyncSource(pReferenceClock);
					pReferenceClock->Release();
					TRACE(TEXT("グラフのクロックにMPEG-2 Demultiplexerを選択\n"));
				}
				pMediaFilter->Release();
			}
		}

		// オーナウィンドウ設定
		m_hOwnerWnd = hOwnerHwnd;
		RECT rc;
		::GetClientRect(hOwnerHwnd, &rc);
		m_wVideoWindowX = (WORD)rc.right;
		m_wVideoWindowY = (WORD)rc.bottom;

		m_bInit=true;

		ULONG PID;
		if (m_wVideoEsPID != PID_INVALID) {
			PID = m_wVideoEsPID;
			if (FAILED(m_pMp2DemuxVideoMap->MapPID(1, &PID, MEDIA_ELEMENTARY_STREAM)))
				m_wVideoEsPID = PID_INVALID;
		}
		if (m_wAudioEsPID != PID_INVALID) {
			PID = m_wAudioEsPID;
			if (FAILED(m_pMp2DemuxAudioMap->MapPID(1, &PID, MEDIA_ELEMENTARY_STREAM)))
				m_wAudioEsPID = PID_INVALID;
		}
	} catch (CBonException &Exception) {
		SetError(Exception);
		if (Exception.GetErrorCode()!=0) {
			TCHAR szText[MAX_ERROR_TEXT_LEN+32];
			int Length;

			Length=::AMGetErrorText(Exception.GetErrorCode(),szText,MAX_ERROR_TEXT_LEN);
			::wsprintf(szText+Length,TEXT("\nエラーコード(HRESULT) 0x%08X"),Exception.GetErrorCode());
			SetErrorSystemMessage(szText);
		}

		SAFE_RELEASE(pOutput);
		SAFE_RELEASE(pOutputVideo);
		SAFE_RELEASE(pOutputAudio);
		CloseViewer();

		TRACE(TEXT("フィルタグラフ構築失敗 : %s\n"), GetLastErrorText());
		return false;
	}

	SAFE_RELEASE(pOutputVideo);
	SAFE_RELEASE(pOutputAudio);

	ClearError();

	TRACE(TEXT("フィルタグラフ構築成功\n"));
	return true;
}
Ejemplo n.º 8
0
CTSParserFilter::CTSParserFilter(LPUNKNOWN pUnk,
                           HRESULT *phr) 
: CBaseFilter(NAME("CTSParser"), pUnk, &m_Lock, CLSID_TSParser)

{
    ASSERT(phr);
	m_wOutputFormat = MPEG_PS;
    // Create the single input pin

	m_pInputPin = new CTSParserInputPin(   
		                                this,
                                        GetOwner(),
                                        &m_Lock,
                                        &m_ReceiveLock,
                                        phr);
    if(m_pInputPin == NULL)
    {
        if (phr)
            *phr = E_OUTOFMEMORY;
    }

    m_pOutputPin = new CTSParserOutputPin(   
										L"TSPin",
		                                this,
                                        GetOwner(),
                                        &m_Lock,
                                        phr);
    if( m_pOutputPin == NULL )
    {
        if (phr)
            *phr = E_OUTOFMEMORY;
		delete m_pInputPin;
    }
    
    m_pVideoPin = new CTSParserOutputPin(   
										L"VideoPin",
		                                this,
                                        GetOwner(),
                                        &m_Lock,
                                        phr);
    if( m_pVideoPin == NULL )
    {
        if (phr)
            *phr = E_OUTOFMEMORY;
		delete m_pInputPin;
		delete m_pOutputPin;
    }

    m_pAudioPin = new CTSParserOutputPin(   
										L"AudioPin",
		                                this,
                                        GetOwner(),
                                        &m_Lock,
                                        phr);
    if( m_pAudioPin == NULL )
    {
        if (phr)
            *phr = E_OUTOFMEMORY;
		delete m_pInputPin;
		delete m_pOutputPin;
		delete m_pVideoPin;
    }
    m_pPassThrusPin = new CTSParserOutputPin(   
										L"PassThrusPin",
		                                this,
                                        GetOwner(),
                                        &m_Lock,
                                        phr);
    if( m_pPassThrusPin == NULL )
    {
        if (phr)
            *phr = E_OUTOFMEMORY;
		delete m_pInputPin;
		delete m_pOutputPin;
		delete m_pVideoPin;
		delete m_pAudioPin;
    }
    
    m_pDumpPin= new CTSParserOutputPin(   
										L"DumpPin",
		                                this,
                                        GetOwner(),
                                        &m_Lock,
                                        phr);
    if( m_pDumpPin == NULL )
    {
        if (phr)
            *phr = E_OUTOFMEMORY;
		delete m_pInputPin;
		delete m_pOutputPin;
		delete m_pVideoPin;
		delete m_pAudioPin;
		delete m_pPassThrusPin;
    }

	CMediaType mt;
	//mt.AllocFormatBuffer( TS_PACKET_LENGTH * TS_BUFFER_PACKETS );
	//mt.SetSampleSize( TS_PACKET_LENGTH * TS_BUFFER_PACKETS );
    //mt.SetTemporalCompression(FALSE);

	mt.SetType(&MEDIATYPE_Stream);
	mt.SetSubtype(&MEDIASUBTYPE_MPEG2_TRANSPORT);
	m_pPassThrusPin->DefaultMediaType( &mt );
	m_pDumpPin->DefaultMediaType( &mt );

	mt.SetType(&MEDIATYPE_Video);
	mt.SetSubtype(&MEDIASUBTYPE_MPEG2_VIDEO);
    mt.SetFormatType(&FORMAT_VideoInfo);
	m_pVideoPin->DefaultMediaType( &mt );

	mt.SetType(&MEDIATYPE_Audio);
	mt.SetSubtype(&MEDIASUBTYPE_DOLBY_AC3);
    mt.SetFormatType(&FORMAT_DolbyAC3);
	m_pAudioPin->DefaultMediaType( &mt );

	m_dwDumpedBytes = 0;
	m_dwDumpSize = 0;
	m_dwInputBytes = 0;
	m_dwOutputBytes = 0;
	m_bRebultPMT = FALSE;
	m_bRebuildTSPMTEnabled = FALSE;
	memset( m_szDebugFileSource, 0, sizeof(m_szDebugFileSource) );
	m_hDebugFileSource = NULL;
	m_bPassThroughEnabled = FALSE;
	m_bParserEnabled = TRUE;
	m_bFilterRunning = FALSE;
	m_bDebugFlag = FALSE;
	m_dwSelectProgram = -1;
	m_dwSelectChannel = -1;
	m_dwSelectTSID = 0;
	m_dwPIDDumpDisable = 0;
	m_bDisableClockTimeout = TRUE;
	m_bSubtitleEnable = TRUE;

	m_wFormat = 0;
	m_wSubFormat = 0;

}