QVideoSurfaceFormat MFTransform::videoFormatForMFMediaType(IMFMediaType *mediaType, int *bytesPerLine)
{
    UINT32 stride;
    if (FAILED(mediaType->GetUINT32(MF_MT_DEFAULT_STRIDE, &stride))) {
        *bytesPerLine = 0;
        return QVideoSurfaceFormat();
    }

    *bytesPerLine = (int)stride;

    QSize size;
    UINT32 width, height;
    if (FAILED(MFGetAttributeSize(mediaType, MF_MT_FRAME_SIZE, &width, &height)))
        return QVideoSurfaceFormat();

    size.setWidth(width);
    size.setHeight(height);

    GUID subtype = GUID_NULL;
    if (FAILED(mediaType->GetGUID(MF_MT_SUBTYPE, &subtype)))
        return QVideoSurfaceFormat();

    QVideoFrame::PixelFormat pixelFormat = formatFromSubtype(subtype);
    QVideoSurfaceFormat format(size, pixelFormat);

    UINT32 num, den;
    if (SUCCEEDED(MFGetAttributeRatio(mediaType, MF_MT_PIXEL_ASPECT_RATIO, &num, &den))) {
        format.setPixelAspectRatio(num, den);
    }
    if (SUCCEEDED(MFGetAttributeRatio(mediaType, MF_MT_FRAME_RATE, &num, &den))) {
        format.setFrameRate(qreal(num)/den);
    }

    return format;
}
STDMETHODIMP   IsOptimizedPlanarVideoInputImageOutputPair(
    _In_ IMFMediaType *inMediaType,
    _In_ IMFMediaType *outMediaType,
    _Out_ bool *optimized,
    _Out_ bool *optimizedxvpneeded )
{
    HRESULT         hr                  = S_OK;
    GUID            guidInputSubType    = GUID_NULL;
    GUID            guidOutputSubType   = GUID_NULL;
    UINT32          uWidthIn = 0, uHeightIn = 0, uWidthOut = 0, uHeightOut = 0;


    DMFTCHECKHR_GOTO(inMediaType->GetGUID(MF_MT_SUBTYPE, &guidInputSubType), done);
    
    DMFTCHECKHR_GOTO(outMediaType->GetGUID(MF_MT_SUBTYPE, &guidOutputSubType), done);

    *optimized = false;         //Assume we aren't optimized . Optimized = (ip = YU12|NV12 and  op = JPEG) 
    *optimizedxvpneeded = true; //Assume we need xvps

    if (IsEqualGUID(guidInputSubType, MFVideoFormat_YV12) || IsEqualGUID(guidInputSubType, MFVideoFormat_NV12))
    {
        if (IsEqualGUID(guidOutputSubType, GUID_ContainerFormatJpeg))
        {
            *optimized = true;
        }
    }

    if (!*optimized)
    {
        goto done;
    }

    DMFTCHECKHR_GOTO(MFGetAttributeSize(inMediaType, MF_MT_FRAME_SIZE, &uWidthIn, &uHeightIn), done);
    DMFTCHECKHR_GOTO(MFGetAttributeSize(outMediaType, MF_MT_FRAME_SIZE, &uWidthOut, &uHeightOut), done);

    if ((uWidthIn == uWidthOut) && (uHeightIn == uHeightOut))
    {
        *optimizedxvpneeded = false;
    }
    if (!*optimizedxvpneeded)
    {
        UINT32 nominalRange;
        hr = inMediaType->GetUINT32(MF_MT_VIDEO_NOMINAL_RANGE, &nominalRange);

        if (FAILED(hr) || nominalRange != MFNominalRange_0_255)
        {
            //XVP needed since nominal range is not 0-255 for YV12 or NV12 fed into WIC
            *optimizedxvpneeded = true;
        }
        hr = S_OK;
    }

done:
    return hr;
}
Beispiel #3
0
int CaptureClass::scanMediaTypes(unsigned int aWidth, unsigned int aHeight)
{
	HRESULT hr;
	HRESULT nativeTypeErrorCode = S_OK;
	DWORD count = 0;
	int besterror = 0xfffffff;
	int bestfit = 0;

	while (nativeTypeErrorCode == S_OK && besterror)
	{
		IMFMediaType * nativeType = NULL;
		nativeTypeErrorCode = mReader->GetNativeMediaType(
			(DWORD)MF_SOURCE_READER_FIRST_VIDEO_STREAM,
			count,
			&nativeType);
		ScopedRelease<IMFMediaType> nativeType_s(nativeType);

		if (nativeTypeErrorCode != S_OK) continue;

		// get the media type 
		GUID nativeGuid = { 0 };
		hr = nativeType->GetGUID(MF_MT_SUBTYPE, &nativeGuid);

		if (FAILED(hr)) return bestfit;

		if (isMediaOk(nativeType, count))
		{
			UINT32 width, height;
			hr = MFGetAttributeSize(nativeType, MF_MT_FRAME_SIZE, &width, &height);

			if (FAILED(hr)) return bestfit;

			int error = 0;

			// prefer (hugely) to get too much than too little data..

			if (aWidth < width) error += (width - aWidth);
			if (aHeight < height) error += (height - aHeight);
			if (aWidth > width) error += (aWidth - width) * 2;
			if (aHeight > height) error += (aHeight - height) * 2;

			if (aWidth == width && aHeight == height) // ..but perfect match is a perfect match
				error = 0;

			if (besterror > error)
			{
				besterror = error;
				bestfit = count;
			}
			/*
			char temp[1024];
			sprintf(temp, "%d x %d, %x:%x:%x:%x %d %d\n", width, height, nativeGuid.Data1, nativeGuid.Data2, nativeGuid.Data3, nativeGuid.Data4, bestfit == count, besterror);
			OutputDebugStringA(temp);
			*/
		}

		count++;
	}
	return bestfit;
}
Beispiel #4
0
HRESULT
WMFVideoMFTManager::ConfigureVideoFrameGeometry()
{
  RefPtr<IMFMediaType> mediaType;
  HRESULT hr = mDecoder->GetOutputMediaType(mediaType);
  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);

  // Verify that the video subtype is what we expect it to be.
  // When using hardware acceleration/DXVA2 the video format should
  // be NV12, which is DXVA2's preferred format. For software decoding
  // we use YV12, as that's easier for us to stick into our rendering
  // pipeline than NV12. NV12 has interleaved UV samples, whereas YV12
  // is a planar format.
  GUID videoFormat;
  hr = mediaType->GetGUID(MF_MT_SUBTYPE, &videoFormat);
  NS_ENSURE_TRUE(videoFormat == MFVideoFormat_NV12 || !mUseHwAccel, E_FAIL);
  NS_ENSURE_TRUE(videoFormat == MFVideoFormat_YV12 || mUseHwAccel, E_FAIL);

  nsIntRect pictureRegion;
  hr = GetPictureRegion(mediaType, pictureRegion);
  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);

  UINT32 width = 0, height = 0;
  hr = MFGetAttributeSize(mediaType, MF_MT_FRAME_SIZE, &width, &height);
  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);

  uint32_t aspectNum = 0, aspectDenom = 0;
  hr = MFGetAttributeRatio(mediaType,
                           MF_MT_PIXEL_ASPECT_RATIO,
                           &aspectNum,
                           &aspectDenom);
  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);

  // Calculate and validate the picture region and frame dimensions after
  // scaling by the pixel aspect ratio.
  nsIntSize frameSize = nsIntSize(width, height);
  nsIntSize displaySize = nsIntSize(pictureRegion.width, pictureRegion.height);
  ScaleDisplayByAspectRatio(displaySize, float(aspectNum) / float(aspectDenom));
  if (!IsValidVideoRegion(frameSize, pictureRegion, displaySize)) {
    // Video track's frame sizes will overflow. Ignore the video track.
    return E_FAIL;
  }

  // Success! Save state.
  mVideoInfo.mDisplay = displaySize;
  mVideoInfo.mHasVideo = true;
  GetDefaultStride(mediaType, &mVideoStride);
  mVideoWidth = width;
  mVideoHeight = height;
  mPictureRegion = pictureRegion;

  LOG("WMFVideoMFTManager frame geometry frame=(%u,%u) stride=%u picture=(%d, %d, %d, %d) display=(%d,%d) PAR=%d:%d",
      width, height,
      mVideoStride,
      mPictureRegion.x, mPictureRegion.y, mPictureRegion.width, mPictureRegion.height,
      displaySize.width, displaySize.height,
      aspectNum, aspectDenom);

  return S_OK;
}
bool FFmpegDecodeServices::VerifyVideoMediaType(IMFMediaType* pMediaType)
{
	GUID subType = GUID_NULL;
	pMediaType->GetGUID(MF_MT_SUBTYPE, &subType);

	UINT32 width = 0, height = 0;
	MFGetAttributeSize(pMediaType, MF_MT_FRAME_SIZE, &width, &height);
	if (width == 0 ||
		height == 0)
		return false;

	if (subType == MFVideoFormat_H264 || subType == MFVideoFormat_HEVC ||
		subType == MFVideoFormat_MPG1 || subType == MFVideoFormat_MPEG2) {
		if ((MFGetAttributeUINT32(pMediaType, MF_MT_MPEG2_PROFILE, 0) == 0 ||
			MFGetAttributeUINT32(pMediaType, MF_MT_MPEG2_LEVEL, 0) == 0) &&
			subType != MFVideoFormat_MPG1)
			return false;

		UINT32 seqSize = 0;
		pMediaType->GetBlobSize(MF_MT_MPEG_SEQUENCE_HEADER, &seqSize);
		if (seqSize == 0)
			return false;
	}
	return true;
}
Beispiel #6
0
HRESULT
GetDefaultStride(IMFMediaType *aType, uint32_t* aOutStride)
{
  // Try to get the default stride from the media type.
  HRESULT hr = aType->GetUINT32(MF_MT_DEFAULT_STRIDE, aOutStride);
  if (SUCCEEDED(hr)) {
    return S_OK;
  }

  // Stride attribute not set, calculate it.
  GUID subtype = GUID_NULL;
  uint32_t width = 0;
  uint32_t height = 0;

  hr = aType->GetGUID(MF_MT_SUBTYPE, &subtype);
  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);

  hr = MFGetAttributeSize(aType, MF_MT_FRAME_SIZE, &width, &height);
  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);

  hr = wmf::MFGetStrideForBitmapInfoHeader(subtype.Data1, width, (LONG*)(aOutStride));
  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);

  return hr;
}
Beispiel #7
0
//-------------------------------------------------------------------
// getVideoFormat:  Gets format information for the video stream.
//
HRESULT VidReader::getVideoFormat()
{
    HRESULT hr = S_OK;
    IMFMediaType *pType = NULL;
	GUID subtype = { 0 };

    // Get the media type from the stream.
    hr = m_pReader->GetCurrentMediaType((DWORD)MF_SOURCE_READER_FIRST_VIDEO_STREAM, &pType);
    if (FAILED(hr)) goto done;

    // Make sure it is a video format.
    
    hr = pType->GetGUID(MF_MT_SUBTYPE, &subtype);
    if (subtype != MFVideoFormat_RGB32)
    {
        hr = E_UNEXPECTED;
        goto done;
    }

    // Get the width and height
    hr = MFGetAttributeSize(pType, MF_MT_FRAME_SIZE, &m_imagewidth, &m_imageheight);
    if (FAILED(hr)) goto done;

	// Get the frame rate
	UINT32 frN, frD;
	hr = MFGetAttributeRatio(pType, MF_MT_FRAME_RATE, &frN, &frD);   
	if (FAILED(hr)) goto done;
	m_framerate = (double)frN / (double)frD;

done:
    
	SafeRelease(&pType);
    return hr;
}
Beispiel #8
0
// Gets the sub-region of the video frame that should be displayed.
// See: http://msdn.microsoft.com/en-us/library/windows/desktop/bb530115(v=vs.85).aspx
HRESULT
GetPictureRegion(IMFMediaType* aMediaType, nsIntRect& aOutPictureRegion)
{
  // Determine if "pan and scan" is enabled for this media. If it is, we
  // only display a region of the video frame, not the entire frame.
  BOOL panScan = MFGetAttributeUINT32(aMediaType, MF_MT_PAN_SCAN_ENABLED, FALSE);

  // If pan and scan mode is enabled. Try to get the display region.
  HRESULT hr = E_FAIL;
  MFVideoArea videoArea;
  memset(&videoArea, 0, sizeof(MFVideoArea));
  if (panScan) {
    hr = aMediaType->GetBlob(MF_MT_PAN_SCAN_APERTURE,
                             (UINT8*)&videoArea,
                             sizeof(MFVideoArea),
                             nullptr);
  }

  // If we're not in pan-and-scan mode, or the pan-and-scan region is not set,
  // check for a minimimum display aperture.
  if (!panScan || hr == MF_E_ATTRIBUTENOTFOUND) {
    hr = aMediaType->GetBlob(MF_MT_MINIMUM_DISPLAY_APERTURE,
                             (UINT8*)&videoArea,
                             sizeof(MFVideoArea),
                             nullptr);
  }

  if (hr == MF_E_ATTRIBUTENOTFOUND) {
    // Minimum display aperture is not set, for "backward compatibility with
    // some components", check for a geometric aperture.
    hr = aMediaType->GetBlob(MF_MT_GEOMETRIC_APERTURE,
                             (UINT8*)&videoArea,
                             sizeof(MFVideoArea),
                             nullptr);
  }

  if (SUCCEEDED(hr)) {
    // The media specified a picture region, return it.
    aOutPictureRegion = nsIntRect(MFOffsetToInt32(videoArea.OffsetX),
                                  MFOffsetToInt32(videoArea.OffsetY),
                                  videoArea.Area.cx,
                                  videoArea.Area.cy);
    return S_OK;
  }

  // No picture region defined, fall back to using the entire video area.
  UINT32 width = 0, height = 0;
  hr = MFGetAttributeSize(aMediaType, MF_MT_FRAME_SIZE, &width, &height);
  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
  aOutPictureRegion = nsIntRect(0, 0, width, height);
  return S_OK;
}
Beispiel #9
0
HRESULT MediaInfo::InternalInitVideo(IMFMediaType* mediaType, StreamInfo& info)
{
	MFGetAttributeSize(mediaType, MF_MT_FRAME_SIZE, &info.video.width, &info.video.height);
	MFGetAttributeRatio(mediaType, MF_MT_PIXEL_ASPECT_RATIO, &info.video.pixelAR0, &info.video.pixelAR1);

	UINT32 fps_den = 0, fps_num = 0;
	MFGetAttributeRatio(mediaType, MF_MT_FRAME_RATE, &fps_num, &fps_den);
	info.video.frameRate = float(fps_num) / float(fps_den);
	info.video.fps_den = fps_den;
	info.video.fps_num = fps_num;

	info.video.profile = MFGetAttributeUINT32(mediaType, MF_MT_MPEG2_PROFILE, 0);
	info.video.profileLevel = MFGetAttributeUINT32(mediaType, MF_MT_MPEG2_LEVEL, 0);
	return S_OK;
}
// Called by the DecoderMF class when the media type
// changes.
//
// Thread context: decoder thread
bool PreviewWindow::SetMediaType(IMFMediaType* mediaType)
{
	HRESULT			hr;
	bool			ret = false;
	GUID			subtype;
	UINT			width, height;
	LONG			defaultStride;
	MFRatio			PAR = { 0 };

	EnterCriticalSection(&m_criticalSection);

		hr = mediaType->GetGUID(MF_MT_SUBTYPE, &subtype);
		if (FAILED(hr))
			goto bail;

		hr = MFGetAttributeSize(mediaType, MF_MT_FRAME_SIZE, &width, &height);
		if (FAILED(hr))
			goto bail;

		// TODO: get if it's interlaced / progressive (MF_MT_INTERLACE_MODE)

		hr = GetDefaultStride(mediaType, &defaultStride);
		if (FAILED(hr))
			goto bail;

		// Get the pixel aspect ratio. Default: Assume square pixels (1:1)
		hr = MFGetAttributeRatio(mediaType, MF_MT_PIXEL_ASPECT_RATIO,
			(UINT32*)&PAR.Numerator,
			(UINT32*)&PAR.Denominator);

		if (FAILED(hr))
		{
			PAR.Numerator = PAR.Denominator = 1;
		}

		// Creates a new RGBA (32bpp) buffer for the converted frame
		m_width = width;
		m_height = height;
		m_defaultStride = defaultStride;
		m_newTextureInBuffer = false;

		ret = true;

bail:
	LeaveCriticalSection(&m_criticalSection);

	return ret;
}
Beispiel #11
0
HRESULT GetDefaultStride(IMFMediaType *pType, LONG *plStride)
{
	LONG lStride = 0;

	// Try to get the default stride from the media type.
	HRESULT hr = pType->GetUINT32(MF_MT_DEFAULT_STRIDE, (UINT32*)&lStride);
	if (FAILED(hr))
	{
		// Attribute not set. Try to calculate the default stride.

		GUID subtype = GUID_NULL;

		UINT32 width = 0;
		UINT32 height = 0;

		// Get the subtype and the image size.
		hr = pType->GetGUID(MF_MT_SUBTYPE, &subtype);
		if (FAILED(hr))
		{
			goto done;
		}

		hr = MFGetAttributeSize(pType, MF_MT_FRAME_SIZE, &width, &height);
		if (FAILED(hr))
		{
			goto done;
		}

		hr = MFGetStrideForBitmapInfoHeader(subtype.Data1, width, &lStride);
		if (FAILED(hr))
		{
			goto done;
		}

		// Set the attribute for later reference.
		(void)pType->SetUINT32(MF_MT_DEFAULT_STRIDE, UINT32(lStride));
	}

	if (SUCCEEDED(hr))
	{
		*plStride = lStride;
	}

done:
	return hr;
}
Beispiel #12
0
HRESULT CaptureClass::setVideoType(IMFMediaType *aType)
{
	HRESULT hr = S_OK;
	GUID subtype = { 0 };

	// Find the video subtype.
	hr = aType->GetGUID(MF_MT_SUBTYPE, &subtype);

	DO_OR_DIE;

	// Choose a conversion function.
	// (This also validates the format type.)

	hr = setConversionFunction(subtype);

	DO_OR_DIE;

	//
	// Get some video attributes.
	//

	subtype = GUID_NULL;

	UINT32 width = 0;
	UINT32 height = 0;

	// Get the subtype and the image size.
	hr = aType->GetGUID(MF_MT_SUBTYPE, &subtype);

	DO_OR_DIE;

	hr = MFGetAttributeSize(aType, MF_MT_FRAME_SIZE, &width, &height);

	DO_OR_DIE;

	hr = MFGetStrideForBitmapInfoHeader(subtype.Data1, width, &mDefaultStride);

	mCaptureBuffer = new unsigned int[width * height];
	mCaptureBufferWidth = width;
	mCaptureBufferHeight = height;

	DO_OR_DIE;

	return hr;
}
Beispiel #13
0
/*
List all the media modes available on the device.
*/
void FindVideoMode(IMFSourceReader *pReader, const GUID mediaSubType, int width, int height, /* out */ IMFMediaType *&foundpType)
{
	HRESULT hr = NULL;
	DWORD dwMediaTypeIndex = 0;

	while (SUCCEEDED(hr))
	{
		IMFMediaType *pType = NULL;
		hr = pReader->GetNativeMediaType(0, dwMediaTypeIndex, &pType);
		if (hr == MF_E_NO_MORE_TYPES)
		{
			hr = S_OK;
			break;
		}
		else if (SUCCEEDED(hr))
		{
			// Examine the media type. (Not shown.)
			/*CMediaTypeTrace *nativeTypeMediaTrace = new CMediaTypeTrace(pType);
			printf("Native media type: %s.\n", nativeTypeMediaTrace->GetString());*/

			GUID videoSubType;
			UINT32 pWidth = 0, pHeight = 0;

			hr = pType->GetGUID(MF_MT_SUBTYPE, &videoSubType);
			MFGetAttributeSize(pType, MF_MT_FRAME_SIZE, &pWidth, &pHeight);

			if (SUCCEEDED(hr))
			{
				//printf("Video subtype %s, width=%i, height=%i.\n", STRING_FROM_GUID(videoSubType), pWidth, pHeight);

				if (videoSubType == mediaSubType && pWidth == width && pHeight == height)
				{
					foundpType = pType;
					printf("Media type successfully located.\n");
					break;
				}
			}

			pType->Release();
		}
		++dwMediaTypeIndex;
	}
}
HRESULT CMediaController::CreateBitmapForKeyFrame(BYTE* pPixelData, IMFMediaType* pMediaType)
{
    if(!pPixelData || !pMediaType)
    {
        return E_INVALIDARG;
    }

    HRESULT hr = S_OK;

    INT32 stride = 0;

    //Get the Frame size and stride through Media Type attributes

    CHECK_HR (hr = MFGetAttributeSize(pMediaType, MF_MT_FRAME_SIZE, &m_Width, &m_Height));

    CHECK_HR (pMediaType->GetUINT32(MF_MT_DEFAULT_STRIDE, (UINT32*)&stride)); 

    SAFE_DELETE(m_pBitmap);

    //Create the bitmap with the given size
    m_pBitmap = new Bitmap(m_Width, m_Height, (INT32)stride, PixelFormat32bppRGB, pPixelData);

    if(!m_pBitmap)
    {
        hr = E_OUTOFMEMORY;
        goto done;
    }
    else
    {
        //Bitmap was created, set the flag
        m_fHasTestMedia = TRUE;
        TRACE((L"Bitmap for the key frame created.\n"));
    }

done:
    LOG_MSG_IF_FAILED(L"Bitmap could not be created.\n", hr);

    return hr;
}
HRESULT D3DPresentEngine::getSwapChainPresentParameters(IMFMediaType *type, D3DPRESENT_PARAMETERS* pp)
{
    ZeroMemory(pp, sizeof(D3DPRESENT_PARAMETERS));

    // Get some information about the video format.

    UINT32 width = 0, height = 0;

    HRESULT hr = MFGetAttributeSize(type, MF_MT_FRAME_SIZE, &width, &height);
    if (FAILED(hr))
        return hr;

    DWORD d3dFormat = 0;

    hr = qt_wmf_getFourCC(type, &d3dFormat);
    if (FAILED(hr))
        return hr;

    ZeroMemory(pp, sizeof(D3DPRESENT_PARAMETERS));
    pp->BackBufferWidth = width;
    pp->BackBufferHeight = height;
    pp->Windowed = TRUE;
    pp->SwapEffect = D3DSWAPEFFECT_DISCARD;
    pp->BackBufferFormat = (D3DFORMAT)d3dFormat;
    pp->hDeviceWindow = ::GetShellWindow();
    pp->Flags = D3DPRESENTFLAG_VIDEO;
    pp->PresentationInterval = D3DPRESENT_INTERVAL_DEFAULT;

    D3DDEVICE_CREATION_PARAMETERS params;
    hr = m_device->GetCreationParameters(&params);
    if (FAILED(hr))
        return hr;

    if (params.DeviceType != D3DDEVTYPE_HAL)
        pp->Flags |= D3DPRESENTFLAG_LOCKABLE_BACKBUFFER;

    return S_OK;
}
/** Add stream to topology */
FIntPoint FImfVideoPlayer::AddStreamToTopology( IMFTopology* Topology, IMFPresentationDescriptor* PresentationDesc, IMFStreamDescriptor* StreamDesc, FImfSampleGrabberCallback* SampleGrabberCallback )
{
	FIntPoint OutDimensions = FIntPoint( ForceInit );
	HRESULT HResult = S_OK;

	IMFActivate* SinkActivate = NULL;
	{
		IMFMediaTypeHandler* Handler = NULL;
		HResult = StreamDesc->GetMediaTypeHandler( &Handler );
		check( SUCCEEDED( HResult ) );

		GUID MajorType;
		HResult = Handler->GetMajorType( &MajorType );
		check( SUCCEEDED( HResult ) );

		/* Audio stream */
		if( MajorType == MFMediaType_Audio )
		{
			/* No audio required */

			Handler->Release( );
			return FIntPoint( ForceInit );
		}

		/* Video stream */
		else if( MajorType == MFMediaType_Video )
		{
			IMFMediaType* OutputType = NULL;
			HResult = Handler->GetCurrentMediaType( &OutputType );
			check( SUCCEEDED( HResult ) );

			IMFMediaType* InputType = NULL;
			HResult = MFCreateMediaType( &InputType );

			UINT32 Width = 0, Height = 0;
			HResult = MFGetAttributeSize( OutputType, MF_MT_FRAME_SIZE, &Width, &Height );
			check( SUCCEEDED( HResult ) );

			HResult = InputType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Video );
			check( SUCCEEDED( HResult ) );
			HResult = InputType->SetGUID( MF_MT_SUBTYPE, MFVideoFormat_RGB32 );
			check( SUCCEEDED( HResult ) );
			HResult = InputType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
			check( SUCCEEDED( HResult ) );
			HResult = MFCreateSampleGrabberSinkActivate( InputType, SampleGrabberCallback, &SinkActivate );

			check( SUCCEEDED( HResult ) );
			InputType->Release( );
			OutputType->Release( );

			OutDimensions = FIntPoint( Width, Height );
		}

		Handler->Release( );
	}

	IMFTopologyNode* SourceNode = NULL;
	{
		HResult = MFCreateTopologyNode( MF_TOPOLOGY_SOURCESTREAM_NODE, &SourceNode );
		check( SUCCEEDED( HResult ) );
		HResult = SourceNode->SetUnknown( MF_TOPONODE_SOURCE, MediaSource );
		check( SUCCEEDED( HResult ) );
		HResult = SourceNode->SetUnknown( MF_TOPONODE_PRESENTATION_DESCRIPTOR, PresentationDesc );
		check( SUCCEEDED( HResult ) );
		HResult = SourceNode->SetUnknown( MF_TOPONODE_STREAM_DESCRIPTOR, StreamDesc );
		check( SUCCEEDED( HResult ) );
		HResult = Topology->AddNode( SourceNode );
		check( SUCCEEDED( HResult ) );
	}

	IMFTopologyNode* OutputNode = NULL;
	{
		HResult = MFCreateTopologyNode( MF_TOPOLOGY_OUTPUT_NODE, &OutputNode );
		check( SUCCEEDED( HResult ) );
		HResult = OutputNode->SetObject( SinkActivate );
		check( SUCCEEDED( HResult ) );
		HResult = OutputNode->SetUINT32( MF_TOPONODE_STREAMID, 0 );
		check( SUCCEEDED( HResult ) );
		HResult = OutputNode->SetUINT32( MF_TOPONODE_NOSHUTDOWN_ON_REMOVE, 0 );
		check( SUCCEEDED( HResult ) );
		HResult = Topology->AddNode( OutputNode );
		check( SUCCEEDED( HResult ) );
	}

	HResult = SourceNode->ConnectOutput( 0, OutputNode, 0 );
	check( SUCCEEDED( HResult ) );

	SourceNode->Release( );
	OutputNode->Release( );
	SinkActivate->Release( );

	return OutDimensions;
}
HRESULT HDMediaSource::FindBestVideoStreamIndex(IMFPresentationDescriptor* ppd,PDWORD pdwStreamId,UINT* width,UINT* height,float* fps)
{
	if (ppd == nullptr)
		return E_INVALIDARG;

	DWORD dwCount = 0;
	HRESULT hr = ppd->GetStreamDescriptorCount(&dwCount);
	if (FAILED(hr))
		return hr;

	int vid_count = 0;

	auto pw = std::unique_ptr<unsigned[]>(new unsigned[dwCount]);
	auto ph = std::unique_ptr<unsigned[]>(new unsigned[dwCount]);
	auto psid = std::unique_ptr<DWORD[]>(new DWORD[dwCount]);

	for (unsigned i = 0;i < dwCount;i++)
	{
		BOOL fSelected;
		ComPtr<IMFStreamDescriptor> psd;

		hr = ppd->GetStreamDescriptorByIndex(i,&fSelected,psd.GetAddressOf());
		if (FAILED(hr))
			break;

		DWORD dwStreamId = 0;
		hr = psd->GetStreamIdentifier(&dwStreamId);
		if (FAILED(hr))
			break;

		ComPtr<IMFMediaTypeHandler> pHandler;
		hr = psd->GetMediaTypeHandler(pHandler.GetAddressOf());
		if (FAILED(hr))
			return hr;

		ComPtr<IMFMediaType> pMediaType;
		hr = pHandler->GetCurrentMediaType(pMediaType.GetAddressOf());
		if (FAILED(hr))
			break;

		if (FAILED(WMF::Misc::IsVideoMediaType(pMediaType.Get())))
			continue;
		
		UINT nWidth = 0,nHeight = 0;
		hr = MFGetAttributeSize(pMediaType.Get(),MF_MT_FRAME_SIZE,&nWidth,&nHeight);
		if (FAILED(hr))
			continue;

		MFRatio fps_ratio = {0,0};
		MFGetAttributeRatio(pMediaType.Get(),MF_MT_FRAME_RATE,
			(PUINT32)&fps_ratio.Numerator,(PUINT32)&fps_ratio.Denominator);

		if (fps && fps_ratio.Denominator != 0 && fps_ratio.Numerator != 0)
			*fps = (float)fps_ratio.Numerator / (float)fps_ratio.Denominator;

		pw[vid_count] = nWidth;
		ph[vid_count] = nHeight;
		psid[vid_count] = dwStreamId;

		vid_count++;
	}

	if (FAILED(hr))
		return hr;

	if (vid_count == 0)
		return MF_E_NOT_FOUND;

	unsigned cur_wh = pw[0] + ph[0];
	int max_index = 0;

	for (int i = 0;i < vid_count;i++)
	{
		if ((pw[i] + ph[i]) > cur_wh)
		{
			cur_wh = pw[i] + ph[i];
			max_index = i;
		}
	}

	if (pdwStreamId)
		*pdwStreamId = psid[max_index];

	if (width)
		*width = pw[max_index];
	if (height)
		*height = ph[max_index];

	return S_OK;
}
/*
Description:

    This is used whenever there is a media type change on an output pin and the 
    Output queue is being reconfigured.
    The possible return values for the function are as follows
    
    DeviceMftTransformXVPIllegal        -> If either of the mediatypes or both are NULL
    DeviceMftTransformXVPDisruptiveIn   -> If the mediatype  at the output pin is greater than the input pin. This will result in change of the media type on the input
    DeviceMftTransformXVPDisruptiveOut  -> This is a reconfiguration or addition of the XVP in the Output pin queue
    DeviceMftTransformXVPCurrent        -> No XVP needed at all
    Note: This iteration doesn't support decoder. The next one will and this function will accordingly change
*/
STDMETHODIMP CompareMediaTypesForXVP(
    _In_opt_ IMFMediaType *inMediaType,
    _In_    IMFMediaType                *newMediaType,
    _Inout_ MF_TRANSFORM_XVP_OPERATION  *operation 
    )
{
    UINT32  unWidthin, unHeightin, unWidthNew, unHeightNew = 0;
    HRESULT hr          = S_OK;
    GUID    guidTypeA   = GUID_NULL;
    GUID    guidTypeB   = GUID_NULL;
    
    *operation = DeviceMftTransformXVPIllegal;
    if ((!inMediaType) || (!newMediaType))
    {
       goto done;
    }

    DMFTCHECKHR_GOTO( MFGetAttributeSize( inMediaType, MF_MT_FRAME_SIZE, &unWidthin, &unHeightin ), done );
    DMFTCHECKHR_GOTO( MFGetAttributeSize( newMediaType, MF_MT_FRAME_SIZE, &unWidthNew, &unHeightNew ), done );


    if ( SUCCEEDED( inMediaType->GetGUID(  MF_MT_MAJOR_TYPE, &guidTypeA ) ) &&
         SUCCEEDED( newMediaType->GetGUID( MF_MT_MAJOR_TYPE, &guidTypeB ) ) &&
        IsEqualGUID( guidTypeA, guidTypeB ) )
    {
        if ( SUCCEEDED( inMediaType->GetGUID ( MF_MT_SUBTYPE, &guidTypeA ) ) &&
             SUCCEEDED( newMediaType->GetGUID( MF_MT_SUBTYPE, &guidTypeB ) ) &&
            IsEqualGUID( guidTypeA, guidTypeB ) )
        {
            //Comparing the MF_MT_AM_FORMAT_TYPE for the directshow format guid
#if 0
            if (SUCCEEDED(inMediaType->GetGUID(MF_MT_AM_FORMAT_TYPE, &guidTypeA)) &&
                SUCCEEDED(newMediaType->GetGUID(MF_MT_AM_FORMAT_TYPE, &guidTypeB)) &&
                IsEqualGUID(guidTypeA, guidTypeB))
#endif
            {

                if (!(( unWidthin == unWidthNew ) &&
                    ( unHeightin == unHeightNew ) ) )
                {
                    if ( ( unWidthNew > unWidthin ) || ( unHeightNew > unHeightin ) )
                    {
                      *operation = DeviceMftTransformXVPDisruptiveIn; //Media type needs to change at input
                    }
                    else
                    {
                        *operation = DeviceMftTransformXVPDisruptiveOut; //Media type needs to change at output
                    }
                    goto done;
                }

                if ( MFGetAttributeUINT32( inMediaType,  MF_MT_SAMPLE_SIZE, 0 ) !=
                     MFGetAttributeUINT32( newMediaType, MF_MT_SAMPLE_SIZE, 0 ) )
                {
                    hr = S_FALSE; //Sample sizes differ. 
                    goto done;
                }
                else
                {
                    //Same media type.. No XVP needed or the current XVP is fine!
                    *operation = DeviceMftTransformXVPCurrent;
                }
            }
        }
        else
        {
            //This is a disruptive operation. Actually a decoder operation!
            *operation = DeviceMftTransformXVPDisruptiveIn;
        }
    }
 done:
    return hr;
}
Beispiel #19
0
HRESULT DrawDevice::SetVideoType(IMFMediaType *pType)
{
    HRESULT hr = S_OK;
    GUID subtype = { 0 };
    MFRatio PAR = { 0 };

    // Find the video subtype.
    hr = pType->GetGUID(MF_MT_SUBTYPE, &subtype);

    if (FAILED(hr)) { goto done; }

    // Choose a conversion function.
    // (This also validates the format type.)

    hr = SetConversionFunction(subtype); 
    
    if (FAILED(hr)) { goto done; }

    //
    // Get some video attributes.
    //

    // Get the frame size.
    hr = MFGetAttributeSize(pType, MF_MT_FRAME_SIZE, &m_width, &m_height);
    
    if (FAILED(hr)) { goto done; }

    
    // Get the interlace mode. Default: assume progressive.
    m_interlace = (MFVideoInterlaceMode)MFGetAttributeUINT32(
        pType,
        MF_MT_INTERLACE_MODE, 
        MFVideoInterlace_Progressive
        );

    // Get the image stride.
    hr = GetDefaultStride(pType, &m_lDefaultStride);

    if (FAILED(hr)) { goto done; }

    // Get the pixel aspect ratio. Default: Assume square pixels (1:1)
    hr = MFGetAttributeRatio(
        pType, 
        MF_MT_PIXEL_ASPECT_RATIO, 
        (UINT32*)&PAR.Numerator, 
        (UINT32*)&PAR.Denominator
        );

    if (SUCCEEDED(hr))
    {
        m_PixelAR = PAR;
    }
    else
    {
        m_PixelAR.Numerator = m_PixelAR.Denominator = 1;
    }

    m_format = (D3DFORMAT)subtype.Data1;

    // Create Direct3D swap chains.

    hr = CreateSwapChains();

    if (FAILED(hr)) { goto done; }


    // Update the destination rectangle for the correct
    // aspect ratio.

    UpdateDestinationRect();

	if (m_pBuf) delete [] m_pBuf;
	m_pBuf = new BYTE[m_height * m_width * 3];

done:
    if (FAILED(hr))
    {
        m_format = D3DFMT_UNKNOWN;
        m_convertFn = NULL;
    }
    return hr;
}
Beispiel #20
0
STDMETHODIMP CDecWMV9MFT::ProcessOutput()
{
  HRESULT hr = S_OK;
  DWORD dwStatus = 0;

  MFT_OUTPUT_STREAM_INFO outputInfo = {0};
  m_pMFT->GetOutputStreamInfo(0, &outputInfo);

  IMFMediaBuffer *pMFBuffer = nullptr;
  ASSERT(!(outputInfo.dwFlags & MFT_OUTPUT_STREAM_PROVIDES_SAMPLES));

  MFT_OUTPUT_DATA_BUFFER OutputBuffer = {0};
  if (!(outputInfo.dwFlags & MFT_OUTPUT_STREAM_PROVIDES_SAMPLES)) {
    pMFBuffer = GetBuffer(outputInfo.cbSize);
    if (!pMFBuffer) { DbgLog((LOG_TRACE, 10, L"Unable to allocate media buffere")); return E_FAIL; }
  
    IMFSample *pSampleOut = nullptr;
    hr = MF.CreateSample(&pSampleOut);
    if (FAILED(hr)) { DbgLog((LOG_TRACE, 10, L"Unable to allocate MF sample, hr: 0x%x", hr)); ReleaseBuffer(pMFBuffer); return E_FAIL; }
    
    pSampleOut->AddBuffer(pMFBuffer);
    OutputBuffer.pSample = pSampleOut;
  }
  hr = m_pMFT->ProcessOutput(0, 1, &OutputBuffer, &dwStatus);

  // We don't process events, just release them
  SafeRelease(&OutputBuffer.pEvents);

  // handle stream format changes
  if (hr == MF_E_TRANSFORM_STREAM_CHANGE || OutputBuffer.dwStatus == MFT_OUTPUT_DATA_BUFFER_FORMAT_CHANGE ) {
    SafeRelease(&OutputBuffer.pSample);
    ReleaseBuffer(pMFBuffer);
    hr = SelectOutputType();
    if (FAILED(hr)) {
      DbgLog((LOG_TRACE, 10, L"-> Failed to handle stream change, hr: %x", hr));
      return E_FAIL;
    }
    // try again with the new type, it should work now!
    return ProcessOutput();
  }
  
  // the MFT generated no output, discard the sample and return
  if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT || OutputBuffer.dwStatus == MFT_OUTPUT_DATA_BUFFER_NO_SAMPLE) {
    SafeRelease(&OutputBuffer.pSample);
    ReleaseBuffer(pMFBuffer);
    return S_FALSE;
  }
  
  // unknown error condition
  if (FAILED(hr)) {
    DbgLog((LOG_TRACE, 10, L"-> ProcessOutput failed with hr: %x", hr));
    SafeRelease(&OutputBuffer.pSample);
    ReleaseBuffer(pMFBuffer);
    return E_FAIL;
  }

  LAVFrame *pFrame = nullptr;
  AllocateFrame(&pFrame);

  IMFMediaType *pMTOut = nullptr;
  m_pMFT->GetOutputCurrentType(0, &pMTOut);

  MFGetAttributeSize(pMTOut, MF_MT_FRAME_SIZE, (UINT32 *)&pFrame->width, (UINT32 *)&pFrame->height);
  pFrame->format = m_OutPixFmt;

  AVRational pixel_aspect_ratio = {1, 1};
  MFGetAttributeRatio(pMTOut, MF_MT_PIXEL_ASPECT_RATIO, (UINT32*)&pixel_aspect_ratio.num, (UINT32*)&pixel_aspect_ratio.den);

  AVRational display_aspect_ratio = {0, 0};
  av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, (int64_t)pixel_aspect_ratio.num * pFrame->width, (int64_t)pixel_aspect_ratio.den * pFrame->height, INT_MAX);
  pFrame->aspect_ratio = display_aspect_ratio;

  pFrame->interlaced = MFGetAttributeUINT32(OutputBuffer.pSample, MFSampleExtension_Interlaced,       FALSE);
  pFrame->repeat     = MFGetAttributeUINT32(OutputBuffer.pSample, MFSampleExtension_RepeatFirstField, FALSE);

  LAVDeintFieldOrder fo = m_pSettings->GetDeintFieldOrder();
  pFrame->tff = (fo == DeintFieldOrder_Auto) ? !MFGetAttributeUINT32(OutputBuffer.pSample, MFSampleExtension_BottomFieldFirst, FALSE) : (fo == DeintFieldOrder_TopFieldFirst);

  if (pFrame->interlaced && !m_bInterlaced)
    m_bInterlaced = TRUE;

  pFrame->interlaced = (pFrame->interlaced || (m_bInterlaced && m_pSettings->GetDeinterlacingMode() == DeintMode_Aggressive) || m_pSettings->GetDeinterlacingMode() == DeintMode_Force) && !(m_pSettings->GetDeinterlacingMode() == DeintMode_Disable);

  pFrame->ext_format.VideoPrimaries         = MFGetAttributeUINT32(pMTOut, MF_MT_VIDEO_PRIMARIES,     MFVideoPrimaries_Unknown);
  pFrame->ext_format.VideoTransferFunction  = MFGetAttributeUINT32(pMTOut, MF_MT_TRANSFER_FUNCTION,   MFVideoTransFunc_Unknown);
  pFrame->ext_format.VideoTransferMatrix    = MFGetAttributeUINT32(pMTOut, MF_MT_YUV_MATRIX,          MFVideoTransferMatrix_Unknown);
  pFrame->ext_format.VideoChromaSubsampling = MFGetAttributeUINT32(pMTOut, MF_MT_VIDEO_CHROMA_SITING, MFVideoChromaSubsampling_Unknown);
  pFrame->ext_format.NominalRange           = MFGetAttributeUINT32(pMTOut, MF_MT_VIDEO_NOMINAL_RANGE, MFNominalRange_Unknown);

  // HACK: don't flag range=limited if its the only value set, since its also the implied default, this helps to avoid a reconnect
  // The MFT always sets this value, even if the bitstream says nothing about it, causing a reconnect on every vc1/wmv3 file
  if (pFrame->ext_format.value == 0x2000)
    pFrame->ext_format.value = 0;

  // Timestamps
  if (m_bManualReorder) {
    if (!m_timestampQueue.empty()) {
      pFrame->rtStart = m_timestampQueue.front();
      m_timestampQueue.pop();
      
      LONGLONG llDuration = 0;
      hr = OutputBuffer.pSample->GetSampleDuration(&llDuration);
      if (SUCCEEDED(hr) && llDuration > 0) {
        pFrame->rtStop = pFrame->rtStart + llDuration;
      }
    }
  } else {
    LONGLONG llTimestamp = 0;
    hr = OutputBuffer.pSample->GetSampleTime(&llTimestamp);
    if (SUCCEEDED(hr)) {
      pFrame->rtStart = llTimestamp;
      
      LONGLONG llDuration = 0;
      hr = OutputBuffer.pSample->GetSampleDuration(&llDuration);
      if (SUCCEEDED(hr) && llDuration > 0) {
        pFrame->rtStop = pFrame->rtStart + llDuration;
      }
    }
  }

  SafeRelease(&pMTOut);

  // Lock memory in the buffer
  BYTE *pBuffer = nullptr;
  pMFBuffer->Lock(&pBuffer, NULL, NULL);

  // Check alignment
  // If not properly aligned, we need to make the data aligned.
  int alignment = (m_OutPixFmt == LAVPixFmt_NV12) ? 16 : 32;
  if ((pFrame->width % alignment) != 0) {
    hr = AllocLAVFrameBuffers(pFrame);
    if (FAILED(hr)) {
      pMFBuffer->Unlock();
      ReleaseBuffer(pMFBuffer);
      SafeRelease(&OutputBuffer.pSample);
      return hr;
    }
    size_t ySize = pFrame->width * pFrame->height;
    
    memcpy_plane(pFrame->data[0], pBuffer, pFrame->width, pFrame->stride[0], pFrame->height);
    if (m_OutPixFmt == LAVPixFmt_NV12) {
      memcpy_plane(pFrame->data[1], pBuffer + ySize, pFrame->width, pFrame->stride[1], pFrame->height / 2);
    } else if (m_OutPixFmt == LAVPixFmt_YUV420) {
      size_t uvSize = ySize / 4;
      memcpy_plane(pFrame->data[2], pBuffer + ySize, pFrame->width / 2, pFrame->stride[2], pFrame->height / 2);
      memcpy_plane(pFrame->data[1], pBuffer + ySize + uvSize, pFrame->width / 2, pFrame->stride[1], pFrame->height / 2);
    }
    pMFBuffer->Unlock();
    ReleaseBuffer(pMFBuffer);
  } else {
    if (m_OutPixFmt == LAVPixFmt_NV12) {
      pFrame->data[0] = pBuffer;
      pFrame->data[1] = pBuffer + pFrame->width * pFrame->height;
      pFrame->stride[0] = pFrame->stride[1] = pFrame->width;
    } else if (m_OutPixFmt == LAVPixFmt_YUV420) {
      pFrame->data[0] = pBuffer;
      pFrame->data[2] = pBuffer + pFrame->width * pFrame->height;
      pFrame->data[1] = pFrame->data[2] + (pFrame->width / 2) * (pFrame->height / 2);
      pFrame->stride[0] = pFrame->width;
      pFrame->stride[1] = pFrame->stride[2] = pFrame->width / 2;
    }
    pFrame->data[3] = (BYTE *)pMFBuffer;
    pFrame->destruct = wmv9_buffer_destruct;
    pFrame->priv_data = this;
  }
  pFrame->flags |= LAV_FRAME_FLAG_BUFFER_MODIFY;
  Deliver(pFrame);

  SafeRelease(&OutputBuffer.pSample);

  if (OutputBuffer.dwStatus == MFT_OUTPUT_DATA_BUFFER_INCOMPLETE)
    return ProcessOutput();
  return hr;
}
// Helper function to get the frame size from a video media type.
inline HRESULT GetFrameSize(IMFMediaType *pType, UINT32 *pWidth, UINT32 *pHeight)
{    return MFGetAttributeSize(pType, MF_MT_FRAME_SIZE, pWidth, pHeight);}
Beispiel #22
0
bool WinCaptureDevice::InitializeFirst(std::string& error)
{
	HRESULT hr = CoInitializeEx(NULL, COINIT_APARTMENTTHREADED | COINIT_DISABLE_OLE1DDE);
	if (!SUCCEEDED(hr))
	{
		return false;
		error = "CoInitializeEx failed";
	}

	hr = MFStartup(MF_VERSION, MFSTARTUP_FULL);
	if (!SUCCEEDED(hr))
	{
		error = "MFStartup failed";
		return false;
	}

	Close();

	memset(&InputType, 0, sizeof(InputType));

	IMFActivate* activate = WinCaptureDevice::ChooseFirst(error);
	if (!activate)
		return false;

	IMFMediaSource  *pSource = NULL;
	IMFAttributes   *pAttributes = NULL;
	IMFMediaType    *pType = NULL;

	UINT32 m_cchSymbolicLink = 0;

	// Create the media source for the device.
	if (SUCCEEDED(hr))
		hr = activate->ActivateObject(__uuidof(IMFMediaSource), (void**) &pSource);

	// Get the symbolic link.
	if (SUCCEEDED(hr))
		hr = activate->GetAllocatedString(MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_SYMBOLIC_LINK, &SymbolicLink, &m_cchSymbolicLink);

	//
	// Create the source reader.
	//

	// Create an attribute store to hold initialization settings.

	if (SUCCEEDED(hr))
		hr = MFCreateAttributes(&pAttributes, 2);

	if (SUCCEEDED(hr))
		hr = pAttributes->SetUINT32(MF_READWRITE_DISABLE_CONVERTERS, TRUE);

	// Set the callback pointer.
	if (SUCCEEDED(hr))
		hr = pAttributes->SetUnknown(MF_SOURCE_READER_ASYNC_CALLBACK, this);

	if (SUCCEEDED(hr))
		hr = MFCreateSourceReaderFromMediaSource(pSource, pAttributes, &Reader);

	// Try to find a suitable input type.
	if (SUCCEEDED(hr))
	{
		for (uint i = 0; ; i++)
		{
			hr = Reader->GetNativeMediaType((DWORD) MF_SOURCE_READER_FIRST_VIDEO_STREAM, i, &pType);
			if (FAILED(hr))
			{
				error = "Failed to find a supported output format (ie RGB24)";
				break;
			}
			memset(&InputType, 0, sizeof(InputType));
			bool isTypeOK = IsMediaTypeSupported(pType, InputType);
			if (isTypeOK)
			{
				// Get the frame size.
				hr = MFGetAttributeSize(pType, MF_MT_FRAME_SIZE, &InputWidth, &InputHeight);
				// Get the image stride.
				hr = GetDefaultStride(pType, &InputDefaultStride);
				// Get the interlace mode. Default: assume progressive.
				InputInterlaceMode = (MFVideoInterlaceMode) MFGetAttributeUINT32(pType, MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive);
			}
			SafeRelease(&pType);
			if (isTypeOK)
				break;
		}
	}

	if (SUCCEEDED(hr))
	{
		// Ask for the first sample.
		EnableCapture = 1;
		hr = Reader->ReadSample((DWORD) MF_SOURCE_READER_FIRST_VIDEO_STREAM, 0, NULL, NULL, NULL, NULL);
	}

	if (FAILED(hr))
	{
		if (pSource)
		{
			pSource->Shutdown();
			// NOTE: The source reader shuts down the media source by default, but we might not have gotten that far.
		}
		Close();
	}

	SafeRelease(&pSource);
	SafeRelease(&pAttributes);
	SafeRelease(&pType);
	SafeRelease(&activate);

	if (FAILED(hr) && error.length() == 0)
		error = ErrorMessage(L"Failed to initialize video capture device", hr);

	return SUCCEEDED(hr);
}