Exemple #1
0
IMFTransform* MediaSink::GetAudioEncoderForStream(WORD streamNumber)
{
  IMFMediaType* streamMediaType = nullptr;
  IPropertyStore* encodingConfigurationProperties = nullptr;
  IMFActivate* encoderActivationObj = nullptr;
  IMFTransform* mfTransform;
  HRESULT hr;

  do
  {
    // Need to get the mediatype for the output stream in order to instatiated the 
    // encoder.  My first inclination was to get the stream sink like we did in 
    // CreateTopologyOutputNode, and that could work, but... we also need to
    // get the encoder configuration parameters, which are tacked on to the 
    // IMFMediaSinkContentInfo object.  Uggh.  The Microsoft example does a 
    // little bit of both in order to maximize confusion...

    streamMediaType = GetMediaTypeForStream(streamNumber);

    if (!SUCCEEDED(hr = _mfAsfContentInfo->GetEncodingConfigurationPropertyStore(streamNumber, &encodingConfigurationProperties)))
      break;

    if (!SUCCEEDED(hr = MFCreateWMAEncoderActivate(streamMediaType, encodingConfigurationProperties, &encoderActivationObj)))
      break;

    if (!SUCCEEDED(hr = encoderActivationObj->ActivateObject(IID_PPV_ARGS(&mfTransform))))
      break;
  } while (0);

  if (FAILED(hr))
    throw std::exception("Unable to retrieve transform for StreamSink");

  return mfTransform;
}
Exemple #2
0
MediaSink* MediaSink::Create(PCWSTR url, IMFASFContentInfo* afsContentInfo)
{
  IMFActivate* mfActivate = nullptr;
  IMFMediaSink* mfMediaSink = nullptr;

  //Create the activation object for the  file sink
  HRESULT hr = MFCreateASFMediaSinkActivate(url, afsContentInfo, &mfActivate);

  // Immediately activate the media sink as there's no real reason not to

  if (!SUCCEEDED(hr = mfActivate->ActivateObject(__uuidof(IMFMediaSink), (void**)&mfMediaSink)))
    throw std::exception("Could not activate MediaSink");

  mfActivate->Release();
// think I should not be releasing this  mfAsfContentInfo->Release();

  return new MediaSink(mfMediaSink, afsContentInfo);

}
void MFAudioEndpointControl::setActiveOutput(const QString &name)
{
    if (m_activeEndpoint == name)
        return;
    QMap<QString, LPWSTR>::iterator it = m_devices.find(name);
    if (it == m_devices.end())
        return;

    LPWSTR wstrID = *it;
    IMFActivate *activate = NULL;
    HRESULT hr = MFCreateAudioRendererActivate(&activate);
    if (FAILED(hr)) {
        qWarning() << "Failed to create audio renderer activate";
        return;
    }

    if (wstrID) {
        hr = activate->SetString(MF_AUDIO_RENDERER_ATTRIBUTE_ENDPOINT_ID, wstrID);
    } else {
        //This is the default one that has been inserted in updateEndpoints(),
        //so give the activate a hint that we want to use the device for multimedia playback
        //then the media foundation will choose an appropriate one.

        //from MSDN:
        //The ERole enumeration defines constants that indicate the role that the system has assigned to an audio endpoint device.
        //eMultimedia: Music, movies, narration, and live music recording.
        hr = activate->SetUINT32(MF_AUDIO_RENDERER_ATTRIBUTE_ENDPOINT_ROLE, eMultimedia);
    }

    if (FAILED(hr)) {
        qWarning() << "Failed to set attribute for audio device" << name;
        return;
    }

    if (m_currentActivate)
        m_currentActivate->Release();
    m_currentActivate = activate;
    m_activeEndpoint = name;
}
MediaFoundationTransform *MediaFoundationTransform::LoadWmaEncoderTransform(WmaEncodingFormat encodingFormat)
{
	MediaFoundationTransform *result = nullptr;
	UINT32 transformCount;
	IMFActivate **transformActivationObjs;
	MFT_REGISTER_TYPE_INFO typeInfo;

	typeInfo.guidMajorType = MFMediaType_Audio;
	typeInfo.guidSubtype = (encodingFormat == WmaEncodingFormat::Lossless) ? MFAudioFormat_WMAudio_Lossless : MFAudioFormat_WMAudioV8;

	HRESULT hr = MFTEnumEx(MFT_CATEGORY_AUDIO_ENCODER, MFT_ENUM_FLAG_TRANSCODE_ONLY, nullptr,  &typeInfo, &transformActivationObjs, &transformCount);

	// early out if return code is bad or no transforms found

	if ((hr != S_OK) || (transformCount < 1))
		return nullptr;

	// Regardless how many activation objects returned, just instantiate the first one
	// (would I want to instantiate another?  Why?  Which one?)

	result = new MediaFoundationTransform(*transformActivationObjs, encodingFormat);

	// release all the stupid activation pointers (because COM was such a GREAT idea)

	for (UINT32 i = 0; i < transformCount; i++)
	{
		IMFActivate *temp = *(transformActivationObjs + i);
		temp->Release();
	}

	// free the stupid activation array object (because COM was such an f'ing great idea)
	// (did I ever mention I think COM was just... stupid?)

	CoTaskMemFree(transformActivationObjs);

	return result;
}
Exemple #5
0
camera_t * camera_open(const char *portname, int highres)
{
	camera_internal_t *camera = (camera_internal_t*)malloc(sizeof(camera_internal_t));
	camera->reader = NULL;

	if (highres)
	{
		console_printf("camera: highres is not supported on windows (yet).\n");
		highres = 0;
	}

	HRESULT hr = S_OK;

	// Initialize Media Foundation
	if (SUCCEEDED(hr))
	{
		hr = MFStartup(MF_VERSION);
	}
	///////////////////////////////////////////
	IMFAttributes *pAttributes = NULL;
	UINT32 m_cDevices = 0;
	IMFActivate **m_ppDevices = NULL;

	// Initialize an attribute store. We will use this to
	// specify the enumeration parameters.

	hr = MFCreateAttributes(&pAttributes, 1);

	// Ask for source type = video capture devices
	if (SUCCEEDED(hr))
	{
		hr = pAttributes->SetGUID(
			MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE,
			MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_GUID
			);
	}

	// Enumerate devices.
	if (SUCCEEDED(hr))
	{
		hr = MFEnumDeviceSources(pAttributes, &m_ppDevices, &m_cDevices);
	}

	SafeRelease(&pAttributes);

	/////////////////////////////////////////////////
	IMFActivate *pActivate = NULL;
	if (m_cDevices)
	{
		console_printf("camera: there are %d camera devices connected (0..%d).\n", m_cDevices, m_cDevices > 0 ? m_cDevices - 1 : 0);
		int device = strtol(portname, 0, 10);
		if (device < 0 || device >= m_cDevices)
			console_printf("camera: device %d does not exist.\n", device);
		else
			pActivate = m_ppDevices[device];
	}
	else
	{
		console_printf("camera: could not find a device\n");
	}
	/////////////////////////////////////////////////

	IMFMediaSource *pSource = NULL;

	//EnterCriticalSection(&m_critsec);

	// Create the media source for the device.
	hr = pActivate->ActivateObject(
		__uuidof(IMFMediaSource),
		(void**)&pSource
		);
	///////////////////////////////////////////

	//IMFAttributes *pAttributes = NULL;

	/*hr = MFCreateAttributes(&pAttributes, 2);

	if (SUCCEEDED(hr))
	{
	hr = pAttributes->SetUnknown(MF_SOURCE_READER_ASYNC_CALLBACK, this);
	}*/

	if (SUCCEEDED(hr))
	{
		hr = MFCreateSourceReaderFromMediaSource(
			pSource,
			NULL,//pAttributes,
			&camera->reader
			);
	}

	//SafeRelease(&pAttributes);

	////////////////////////////////////////////////////
	// The list of acceptable types.
	GUID subtypes[] = {
		MFVideoFormat_NV12, MFVideoFormat_YUY2, MFVideoFormat_UYVY,
		MFVideoFormat_RGB32, MFVideoFormat_RGB24, MFVideoFormat_IYUV
	};

	//HRESULT hr = S_OK;
	BOOL bUseNativeType = FALSE;

	GUID subtype = { 0 };

	IMFMediaType *pType = NULL;

	UINT32 width = 0, height = 0;
	int selectedSubtype = -1;

	// If the source's native format matches any of the formats in
	// the list, prefer the native format.

	// Note: The camera might support multiple output formats,
	// including a range of frame dimensions. The application could
	// provide a list to the user and have the user select the
	// camera's output format. That is outside the scope of this
	// sample, however.

	DWORD selectedStreamIndex = MF_SOURCE_READER_FIRST_VIDEO_STREAM;

	//while (true)
	//{
		hr = camera->reader->GetNativeMediaType(
			selectedStreamIndex,
			0,  // Type index
			&pType
			);

		if (FAILED(hr)) { console_printf("camera: could not get media type\n"); goto done; }

		hr = ::MFGetAttributeSize(pType, MF_MT_FRAME_SIZE, &width, &height);

		if (FAILED(hr)) { console_printf("camera: could not get resolution\n"); goto done; }

		//if (width != 1280 || height != 960)
		//{
			console_printf("camera: found resolution %dx%d\n", width, height);
			//selectedStreamIndex++;
			//continue;
		//}

		camera->size.width = width;
		camera->size.height = height;
		//break;
	//}


	/*UINT32 num = 0, denom = 0;
	hr = ::MFGetAttributeRatio(pType, MF_MT_FRAME_RATE_RANGE_MAX, &num, &denom);

	if (FAILED(hr)) { goto done; }*/

	//hr = ::MFSetAttributeSize(pType, MF_MT_FRAME_SIZE, 1280, 960);

	//if (FAILED(hr)) { goto done; }

	/*hr = ::MFSetAttributeRatio(pType, MF_MT_FRAME_RATE, num, denom);

	if (FAILED(hr)) { goto done; }*/

	hr = pType->GetGUID(MF_MT_SUBTYPE, &subtype);

	if (FAILED(hr)) { console_printf("camera: could not get stream type(1)\n"); goto done; }

	for (UINT32 i = 0; i < ARRAYSIZE(subtypes); i++)
	{
		if (subtype == subtypes[i])
		{
			hr = camera->reader->SetCurrentMediaType(
				(DWORD)MF_SOURCE_READER_FIRST_VIDEO_STREAM,
				NULL,
				pType
				);

			bUseNativeType = TRUE;
			selectedSubtype = i;
			break;
		}
	}

	if (!bUseNativeType)
	{
		// None of the native types worked. The camera might offer
		// output a compressed type such as MJPEG or DV.

		// Try adding a decoder.

		for (UINT32 i = 0; i < ARRAYSIZE(subtypes); i++)
		{
			hr = pType->SetGUID(MF_MT_SUBTYPE, subtypes[i]);

			if (FAILED(hr)) { console_printf("camera: could not get stream type(2)\n"); goto done; }

			hr = camera->reader->SetCurrentMediaType(
				(DWORD)MF_SOURCE_READER_FIRST_VIDEO_STREAM,
				NULL,
				pType
				);

			if (SUCCEEDED(hr))
			{
				selectedSubtype = i;
				break;
			}
		}
	}

/*	hr = ::MFGetAttributeSize(pType, MF_MT_FRAME_SIZE, &width, &height);
	WIDTH = width;
	HEIGHT = height;*/

	if (FAILED(hr)) { console_printf("camera: could not find stream type\n"); goto done; }

done:
	SafeRelease(&pType);

	console_printf("camera: selected type: %d, native: %s, resolution: %dx%d\n",
		selectedSubtype, bUseNativeType ? "yes" : "no", camera->size.width, camera->size.height);

	///////////////////////////////////////
	/*if (SUCCEEDED(hr))
	{
	hr = camera->reader->GetCurrentMediaType(
	(DWORD)MF_SOURCE_READER_FIRST_VIDEO_STREAM,
	&pType
	);
	}

	if (SUCCEEDED(hr))
	{
	// Register the color converter DSP for this process, in the video
	// processor category. This will enable the sink writer to enumerate
	// the color converter when the sink writer attempts to match the
	// media types.

	hr = MFTRegisterLocalByCLSID(
	__uuidof(CColorConvertDMO),
	MFT_CATEGORY_VIDEO_PROCESSOR,
	L"",
	MFT_ENUM_FLAG_SYNCMFT,
	0,
	NULL,
	0,
	NULL
	);
	}*/

	/////////////////////////////////////////////////

/*	IMFSample *pSample = NULL;
	DWORD streamIndex = 0, flags = 0;
	LONGLONG llTimeStamp = 0;

	hr = camera->reader->ReadSample(
		(DWORD)MF_SOURCE_READER_ANY_STREAM,    // Stream index.
		0,                              // Flags.
		&streamIndex,                   // Receives the actual stream index.
		&flags,                         // Receives status flags.
		&llTimeStamp,                   // Receives the time stamp.
		&pSample                        // Receives the sample or NULL.
		);*/

	if (selectedSubtype != 4)
	{
		console_printf("camera: unexpected stream type.\n");
		SafeRelease(&camera->reader);
		free(camera);
		return 0;
	}
	return (camera_t*)camera;
}
std::unique_ptr<MediaFoundation_DecompresserTransform> MediaFoundation_DecompresserTransform::getInstance(int width, int height, PixelFormat inputPixelFormat, PixelFormat outputPixelFormat, RESULT &result)
{
    CComPtr<IMFTransform> transform;

    GUID inputSubtype;
    bool ok = MediaFoundation_Utils::pixelFormatToVideoFormat(inputPixelFormat, inputSubtype);
    if (!ok) {
        result = RESULT::UNSUPPORTED_INPUT;
        return nullptr;
    }

    GUID outputSubtype;
    ok = MediaFoundation_Utils::pixelFormatToVideoFormat(outputPixelFormat, outputSubtype);
    if (!ok) {
        result = RESULT::UNSUPPORTED_OUTPUT_FOR_INPUT;
        return nullptr;
    }

    MFT_REGISTER_TYPE_INFO inputFilter = {MFMediaType_Video, inputSubtype};
    MFT_REGISTER_TYPE_INFO outputFilter = {MFMediaType_Video, outputSubtype};

    IMFActivate **activateArr;
    UINT32 activateCount;

    // TODO(nurupo): maybe prioritize hardware decoders first?
    HRESULT hr = MFTEnumEx(MFT_CATEGORY_VIDEO_DECODER, MFT_ENUM_FLAG_ALL, &inputFilter, &outputFilter, &activateArr, &activateCount);
    if (FAILED(hr) || activateCount < 1) {
        DEBUG_PRINT_HR_ERROR("Couldn't find an appropriate transform.", hr);
        CoTaskMemFree(activateArr);

        // check whether it's RESULT::UNSUPPORTED_INPUT or RESULT::UNSUPPORTED_OUTPUT_FOR_INPUT
        hr = MFTEnumEx(MFT_CATEGORY_VIDEO_DECODER, MFT_ENUM_FLAG_ALL, &inputFilter, nullptr, &activateArr, &activateCount);
        if (FAILED(hr) || activateCount < 1) {
            // there is no transform for such input
            result = RESULT::UNSUPPORTED_INPUT;
        } else {
            // there is some transform for this input, but not this output
            for (UINT32 i = 0; i < activateCount; i ++) {
                activateArr[i]->Release();
            }
            result = RESULT::UNSUPPORTED_OUTPUT_FOR_INPUT;
        }
        CoTaskMemFree(activateArr);

        return nullptr;
    }

    // release all but 1st transform
    for (UINT32 i = 1; i < activateCount; i ++) {
        activateArr[i]->Release();
    }

    // Activate 1st transform
    IMFActivate *activate = activateArr[0];
    CoTaskMemFree(activateArr);
    hr = activate->ActivateObject(IID_PPV_ARGS(&transform));
    if (FAILED(hr)) {
        DEBUG_PRINT_HR_ERROR("Couldn't activate a transform.", hr);
        activate->Release();
        result = RESULT::FAILURE;
        return nullptr;
    }

    std::unique_ptr<MediaFoundation_PixelFormatTransform> pixelFormatTransform = MediaFoundation_PixelFormatTransform::getInstance(transform, width, height, inputPixelFormat, outputPixelFormat, result);

    if (result != RESULT::OK) {
        activate->ShutdownObject();
        return nullptr;
    }

    result = RESULT::OK;

    return std::unique_ptr<MediaFoundation_DecompresserTransform>(new MediaFoundation_DecompresserTransform(pixelFormatTransform, activate));
}
Exemple #7
0
bool WinCaptureDevice::InitializeFirst(std::string& error)
{
	HRESULT hr = CoInitializeEx(NULL, COINIT_APARTMENTTHREADED | COINIT_DISABLE_OLE1DDE);
	if (!SUCCEEDED(hr))
	{
		return false;
		error = "CoInitializeEx failed";
	}

	hr = MFStartup(MF_VERSION, MFSTARTUP_FULL);
	if (!SUCCEEDED(hr))
	{
		error = "MFStartup failed";
		return false;
	}

	Close();

	memset(&InputType, 0, sizeof(InputType));

	IMFActivate* activate = WinCaptureDevice::ChooseFirst(error);
	if (!activate)
		return false;

	IMFMediaSource  *pSource = NULL;
	IMFAttributes   *pAttributes = NULL;
	IMFMediaType    *pType = NULL;

	UINT32 m_cchSymbolicLink = 0;

	// Create the media source for the device.
	if (SUCCEEDED(hr))
		hr = activate->ActivateObject(__uuidof(IMFMediaSource), (void**) &pSource);

	// Get the symbolic link.
	if (SUCCEEDED(hr))
		hr = activate->GetAllocatedString(MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_SYMBOLIC_LINK, &SymbolicLink, &m_cchSymbolicLink);

	//
	// Create the source reader.
	//

	// Create an attribute store to hold initialization settings.

	if (SUCCEEDED(hr))
		hr = MFCreateAttributes(&pAttributes, 2);

	if (SUCCEEDED(hr))
		hr = pAttributes->SetUINT32(MF_READWRITE_DISABLE_CONVERTERS, TRUE);

	// Set the callback pointer.
	if (SUCCEEDED(hr))
		hr = pAttributes->SetUnknown(MF_SOURCE_READER_ASYNC_CALLBACK, this);

	if (SUCCEEDED(hr))
		hr = MFCreateSourceReaderFromMediaSource(pSource, pAttributes, &Reader);

	// Try to find a suitable input type.
	if (SUCCEEDED(hr))
	{
		for (uint i = 0; ; i++)
		{
			hr = Reader->GetNativeMediaType((DWORD) MF_SOURCE_READER_FIRST_VIDEO_STREAM, i, &pType);
			if (FAILED(hr))
			{
				error = "Failed to find a supported output format (ie RGB24)";
				break;
			}
			memset(&InputType, 0, sizeof(InputType));
			bool isTypeOK = IsMediaTypeSupported(pType, InputType);
			if (isTypeOK)
			{
				// Get the frame size.
				hr = MFGetAttributeSize(pType, MF_MT_FRAME_SIZE, &InputWidth, &InputHeight);
				// Get the image stride.
				hr = GetDefaultStride(pType, &InputDefaultStride);
				// Get the interlace mode. Default: assume progressive.
				InputInterlaceMode = (MFVideoInterlaceMode) MFGetAttributeUINT32(pType, MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive);
			}
			SafeRelease(&pType);
			if (isTypeOK)
				break;
		}
	}

	if (SUCCEEDED(hr))
	{
		// Ask for the first sample.
		EnableCapture = 1;
		hr = Reader->ReadSample((DWORD) MF_SOURCE_READER_FIRST_VIDEO_STREAM, 0, NULL, NULL, NULL, NULL);
	}

	if (FAILED(hr))
	{
		if (pSource)
		{
			pSource->Shutdown();
			// NOTE: The source reader shuts down the media source by default, but we might not have gotten that far.
		}
		Close();
	}

	SafeRelease(&pSource);
	SafeRelease(&pAttributes);
	SafeRelease(&pType);
	SafeRelease(&activate);

	if (FAILED(hr) && error.length() == 0)
		error = ErrorMessage(L"Failed to initialize video capture device", hr);

	return SUCCEEDED(hr);
}
/** Add stream to topology */
FIntPoint FImfVideoPlayer::AddStreamToTopology( IMFTopology* Topology, IMFPresentationDescriptor* PresentationDesc, IMFStreamDescriptor* StreamDesc, FImfSampleGrabberCallback* SampleGrabberCallback )
{
	FIntPoint OutDimensions = FIntPoint( ForceInit );
	HRESULT HResult = S_OK;

	IMFActivate* SinkActivate = NULL;
	{
		IMFMediaTypeHandler* Handler = NULL;
		HResult = StreamDesc->GetMediaTypeHandler( &Handler );
		check( SUCCEEDED( HResult ) );

		GUID MajorType;
		HResult = Handler->GetMajorType( &MajorType );
		check( SUCCEEDED( HResult ) );

		/* Audio stream */
		if( MajorType == MFMediaType_Audio )
		{
			/* No audio required */

			Handler->Release( );
			return FIntPoint( ForceInit );
		}

		/* Video stream */
		else if( MajorType == MFMediaType_Video )
		{
			IMFMediaType* OutputType = NULL;
			HResult = Handler->GetCurrentMediaType( &OutputType );
			check( SUCCEEDED( HResult ) );

			IMFMediaType* InputType = NULL;
			HResult = MFCreateMediaType( &InputType );

			UINT32 Width = 0, Height = 0;
			HResult = MFGetAttributeSize( OutputType, MF_MT_FRAME_SIZE, &Width, &Height );
			check( SUCCEEDED( HResult ) );

			HResult = InputType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Video );
			check( SUCCEEDED( HResult ) );
			HResult = InputType->SetGUID( MF_MT_SUBTYPE, MFVideoFormat_RGB32 );
			check( SUCCEEDED( HResult ) );
			HResult = InputType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
			check( SUCCEEDED( HResult ) );
			HResult = MFCreateSampleGrabberSinkActivate( InputType, SampleGrabberCallback, &SinkActivate );

			check( SUCCEEDED( HResult ) );
			InputType->Release( );
			OutputType->Release( );

			OutDimensions = FIntPoint( Width, Height );
		}

		Handler->Release( );
	}

	IMFTopologyNode* SourceNode = NULL;
	{
		HResult = MFCreateTopologyNode( MF_TOPOLOGY_SOURCESTREAM_NODE, &SourceNode );
		check( SUCCEEDED( HResult ) );
		HResult = SourceNode->SetUnknown( MF_TOPONODE_SOURCE, MediaSource );
		check( SUCCEEDED( HResult ) );
		HResult = SourceNode->SetUnknown( MF_TOPONODE_PRESENTATION_DESCRIPTOR, PresentationDesc );
		check( SUCCEEDED( HResult ) );
		HResult = SourceNode->SetUnknown( MF_TOPONODE_STREAM_DESCRIPTOR, StreamDesc );
		check( SUCCEEDED( HResult ) );
		HResult = Topology->AddNode( SourceNode );
		check( SUCCEEDED( HResult ) );
	}

	IMFTopologyNode* OutputNode = NULL;
	{
		HResult = MFCreateTopologyNode( MF_TOPOLOGY_OUTPUT_NODE, &OutputNode );
		check( SUCCEEDED( HResult ) );
		HResult = OutputNode->SetObject( SinkActivate );
		check( SUCCEEDED( HResult ) );
		HResult = OutputNode->SetUINT32( MF_TOPONODE_STREAMID, 0 );
		check( SUCCEEDED( HResult ) );
		HResult = OutputNode->SetUINT32( MF_TOPONODE_NOSHUTDOWN_ON_REMOVE, 0 );
		check( SUCCEEDED( HResult ) );
		HResult = Topology->AddNode( OutputNode );
		check( SUCCEEDED( HResult ) );
	}

	HResult = SourceNode->ConnectOutput( 0, OutputNode, 0 );
	check( SUCCEEDED( HResult ) );

	SourceNode->Release( );
	OutputNode->Release( );
	SinkActivate->Release( );

	return OutDimensions;
}