Exemple #1
0
MediaSink* MediaSink::Create(PCWSTR url, IMFASFContentInfo* afsContentInfo)
{
  IMFActivate* mfActivate = nullptr;
  IMFMediaSink* mfMediaSink = nullptr;

  //Create the activation object for the  file sink
  HRESULT hr = MFCreateASFMediaSinkActivate(url, afsContentInfo, &mfActivate);

  // Immediately activate the media sink as there's no real reason not to

  if (!SUCCEEDED(hr = mfActivate->ActivateObject(__uuidof(IMFMediaSink), (void**)&mfMediaSink)))
    throw std::exception("Could not activate MediaSink");

  mfActivate->Release();
// think I should not be releasing this  mfAsfContentInfo->Release();

  return new MediaSink(mfMediaSink, afsContentInfo);

}
MediaFoundationTransform *MediaFoundationTransform::LoadWmaEncoderTransform(WmaEncodingFormat encodingFormat)
{
	MediaFoundationTransform *result = nullptr;
	UINT32 transformCount;
	IMFActivate **transformActivationObjs;
	MFT_REGISTER_TYPE_INFO typeInfo;

	typeInfo.guidMajorType = MFMediaType_Audio;
	typeInfo.guidSubtype = (encodingFormat == WmaEncodingFormat::Lossless) ? MFAudioFormat_WMAudio_Lossless : MFAudioFormat_WMAudioV8;

	HRESULT hr = MFTEnumEx(MFT_CATEGORY_AUDIO_ENCODER, MFT_ENUM_FLAG_TRANSCODE_ONLY, nullptr,  &typeInfo, &transformActivationObjs, &transformCount);

	// early out if return code is bad or no transforms found

	if ((hr != S_OK) || (transformCount < 1))
		return nullptr;

	// Regardless how many activation objects returned, just instantiate the first one
	// (would I want to instantiate another?  Why?  Which one?)

	result = new MediaFoundationTransform(*transformActivationObjs, encodingFormat);

	// release all the stupid activation pointers (because COM was such a GREAT idea)

	for (UINT32 i = 0; i < transformCount; i++)
	{
		IMFActivate *temp = *(transformActivationObjs + i);
		temp->Release();
	}

	// free the stupid activation array object (because COM was such an f'ing great idea)
	// (did I ever mention I think COM was just... stupid?)

	CoTaskMemFree(transformActivationObjs);

	return result;
}
std::unique_ptr<MediaFoundation_DecompresserTransform> MediaFoundation_DecompresserTransform::getInstance(int width, int height, PixelFormat inputPixelFormat, PixelFormat outputPixelFormat, RESULT &result)
{
    CComPtr<IMFTransform> transform;

    GUID inputSubtype;
    bool ok = MediaFoundation_Utils::pixelFormatToVideoFormat(inputPixelFormat, inputSubtype);
    if (!ok) {
        result = RESULT::UNSUPPORTED_INPUT;
        return nullptr;
    }

    GUID outputSubtype;
    ok = MediaFoundation_Utils::pixelFormatToVideoFormat(outputPixelFormat, outputSubtype);
    if (!ok) {
        result = RESULT::UNSUPPORTED_OUTPUT_FOR_INPUT;
        return nullptr;
    }

    MFT_REGISTER_TYPE_INFO inputFilter = {MFMediaType_Video, inputSubtype};
    MFT_REGISTER_TYPE_INFO outputFilter = {MFMediaType_Video, outputSubtype};

    IMFActivate **activateArr;
    UINT32 activateCount;

    // TODO(nurupo): maybe prioritize hardware decoders first?
    HRESULT hr = MFTEnumEx(MFT_CATEGORY_VIDEO_DECODER, MFT_ENUM_FLAG_ALL, &inputFilter, &outputFilter, &activateArr, &activateCount);
    if (FAILED(hr) || activateCount < 1) {
        DEBUG_PRINT_HR_ERROR("Couldn't find an appropriate transform.", hr);
        CoTaskMemFree(activateArr);

        // check whether it's RESULT::UNSUPPORTED_INPUT or RESULT::UNSUPPORTED_OUTPUT_FOR_INPUT
        hr = MFTEnumEx(MFT_CATEGORY_VIDEO_DECODER, MFT_ENUM_FLAG_ALL, &inputFilter, nullptr, &activateArr, &activateCount);
        if (FAILED(hr) || activateCount < 1) {
            // there is no transform for such input
            result = RESULT::UNSUPPORTED_INPUT;
        } else {
            // there is some transform for this input, but not this output
            for (UINT32 i = 0; i < activateCount; i ++) {
                activateArr[i]->Release();
            }
            result = RESULT::UNSUPPORTED_OUTPUT_FOR_INPUT;
        }
        CoTaskMemFree(activateArr);

        return nullptr;
    }

    // release all but 1st transform
    for (UINT32 i = 1; i < activateCount; i ++) {
        activateArr[i]->Release();
    }

    // Activate 1st transform
    IMFActivate *activate = activateArr[0];
    CoTaskMemFree(activateArr);
    hr = activate->ActivateObject(IID_PPV_ARGS(&transform));
    if (FAILED(hr)) {
        DEBUG_PRINT_HR_ERROR("Couldn't activate a transform.", hr);
        activate->Release();
        result = RESULT::FAILURE;
        return nullptr;
    }

    std::unique_ptr<MediaFoundation_PixelFormatTransform> pixelFormatTransform = MediaFoundation_PixelFormatTransform::getInstance(transform, width, height, inputPixelFormat, outputPixelFormat, result);

    if (result != RESULT::OK) {
        activate->ShutdownObject();
        return nullptr;
    }

    result = RESULT::OK;

    return std::unique_ptr<MediaFoundation_DecompresserTransform>(new MediaFoundation_DecompresserTransform(pixelFormatTransform, activate));
}
/** Add stream to topology */
FIntPoint FImfVideoPlayer::AddStreamToTopology( IMFTopology* Topology, IMFPresentationDescriptor* PresentationDesc, IMFStreamDescriptor* StreamDesc, FImfSampleGrabberCallback* SampleGrabberCallback )
{
	FIntPoint OutDimensions = FIntPoint( ForceInit );
	HRESULT HResult = S_OK;

	IMFActivate* SinkActivate = NULL;
	{
		IMFMediaTypeHandler* Handler = NULL;
		HResult = StreamDesc->GetMediaTypeHandler( &Handler );
		check( SUCCEEDED( HResult ) );

		GUID MajorType;
		HResult = Handler->GetMajorType( &MajorType );
		check( SUCCEEDED( HResult ) );

		/* Audio stream */
		if( MajorType == MFMediaType_Audio )
		{
			/* No audio required */

			Handler->Release( );
			return FIntPoint( ForceInit );
		}

		/* Video stream */
		else if( MajorType == MFMediaType_Video )
		{
			IMFMediaType* OutputType = NULL;
			HResult = Handler->GetCurrentMediaType( &OutputType );
			check( SUCCEEDED( HResult ) );

			IMFMediaType* InputType = NULL;
			HResult = MFCreateMediaType( &InputType );

			UINT32 Width = 0, Height = 0;
			HResult = MFGetAttributeSize( OutputType, MF_MT_FRAME_SIZE, &Width, &Height );
			check( SUCCEEDED( HResult ) );

			HResult = InputType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Video );
			check( SUCCEEDED( HResult ) );
			HResult = InputType->SetGUID( MF_MT_SUBTYPE, MFVideoFormat_RGB32 );
			check( SUCCEEDED( HResult ) );
			HResult = InputType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
			check( SUCCEEDED( HResult ) );
			HResult = MFCreateSampleGrabberSinkActivate( InputType, SampleGrabberCallback, &SinkActivate );

			check( SUCCEEDED( HResult ) );
			InputType->Release( );
			OutputType->Release( );

			OutDimensions = FIntPoint( Width, Height );
		}

		Handler->Release( );
	}

	IMFTopologyNode* SourceNode = NULL;
	{
		HResult = MFCreateTopologyNode( MF_TOPOLOGY_SOURCESTREAM_NODE, &SourceNode );
		check( SUCCEEDED( HResult ) );
		HResult = SourceNode->SetUnknown( MF_TOPONODE_SOURCE, MediaSource );
		check( SUCCEEDED( HResult ) );
		HResult = SourceNode->SetUnknown( MF_TOPONODE_PRESENTATION_DESCRIPTOR, PresentationDesc );
		check( SUCCEEDED( HResult ) );
		HResult = SourceNode->SetUnknown( MF_TOPONODE_STREAM_DESCRIPTOR, StreamDesc );
		check( SUCCEEDED( HResult ) );
		HResult = Topology->AddNode( SourceNode );
		check( SUCCEEDED( HResult ) );
	}

	IMFTopologyNode* OutputNode = NULL;
	{
		HResult = MFCreateTopologyNode( MF_TOPOLOGY_OUTPUT_NODE, &OutputNode );
		check( SUCCEEDED( HResult ) );
		HResult = OutputNode->SetObject( SinkActivate );
		check( SUCCEEDED( HResult ) );
		HResult = OutputNode->SetUINT32( MF_TOPONODE_STREAMID, 0 );
		check( SUCCEEDED( HResult ) );
		HResult = OutputNode->SetUINT32( MF_TOPONODE_NOSHUTDOWN_ON_REMOVE, 0 );
		check( SUCCEEDED( HResult ) );
		HResult = Topology->AddNode( OutputNode );
		check( SUCCEEDED( HResult ) );
	}

	HResult = SourceNode->ConnectOutput( 0, OutputNode, 0 );
	check( SUCCEEDED( HResult ) );

	SourceNode->Release( );
	OutputNode->Release( );
	SinkActivate->Release( );

	return OutDimensions;
}