コード例 #1
0
ファイル: WinQLAudioSession.cpp プロジェクト: chenbk85/QOR
	//-------------------------------------------------------------------
	//  Initializes the CAudioSessionEventHandler object.
	long CAudioSessionEventHandler::Initialize()
	{
		_WINQ_FCONTEXT( "CAudioSessionEventHandler::Initialize" );
		long hr = 0;

		IMMDeviceEnumerator* pDeviceEnumerator = 0;
		IMMDevice* pDevice = 0;
		IAudioSessionManager* pAudioSessionManager = 0;

		// Get the enumerator for the audio endpoint devices.
		hr = nsWinQAPI::COLE32::Instance().CoCreateInstance( reinterpret_cast< const ::IID& >( CLASS_MMDEVICEENUMERATOR ), 0, CLSCTX_INPROC_SERVER, reinterpret_cast< const ::IID& >( IMMDeviceEnumerator::_IID ), reinterpret_cast< void** >( &pDeviceEnumerator ) );

		if( hr < 0 ) { goto done; }

		// Get the default audio endpoint that the SAR will use.
		hr = pDeviceEnumerator->GetDefaultAudioEndpoint( eRender, eConsole,   // The SAR uses 'eConsole' by default.
			&pDevice
			);

		if( hr < 0 ) { goto done; }

		// Get the session manager for this device.
		hr = pDevice->Activate(  IAudioSessionManager::_IID,  CLSCTX_INPROC_SERVER, 0, (void**) &pAudioSessionManager );
		if( hr < 0 ) { goto done; }

		// Get the audio session. 
		hr = pAudioSessionManager->GetAudioSessionControl(reinterpret_cast< const nsWin32::GUID* >(&NULL_GUID),     // Get the default audio session. 
			0,          // The session is not cross-process.
			&m_pAudioSession 
			);

		if( hr < 0 ) { goto done; }

		hr = pAudioSessionManager->GetSimpleAudioVolume(reinterpret_cast< const nsWin32::GUID* >(&NULL_GUID), 0, &m_pSimpleAudioVolume);

done:

		pDeviceEnumerator->Release();
		pDevice->Release();
		pAudioSessionManager->Release();
		return hr;
	}
コード例 #2
0
/**
* Initialize the EndpointVolume (volume controller).
*/
void master_volume::loadVolumeController()
{
    m_endpointVolume = nullptr;

    HRESULT hr;
    IMMDeviceEnumerator *deviceEnumerator = nullptr;
    IMMDevice *defaultDevice = nullptr;

    // Get the list of audio devices
    hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr, CLSCTX_INPROC_SERVER, __uuidof(IMMDeviceEnumerator), (LPVOID *)&deviceEnumerator);
    if (hr != S_OK) {
        logger.Error("master_volume::loadVolumeController(), CoCreateInstance failed with error: " + hr);
        return;
    }

    // Get the default audio device
    hr = deviceEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &defaultDevice);
    if (hr != S_OK) {
        logger.Error("GetDefaultAudioEndpoint failed with error: " + hr);
        return;
    }

    // Free the device list
    if (deviceEnumerator) {
        deviceEnumerator->Release();
        deviceEnumerator = nullptr;
    }

    // Load EndpointVolume (volume controller)
    hr = defaultDevice->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_INPROC_SERVER, nullptr, (LPVOID *)&m_endpointVolume);
    if (hr != S_OK) {
        logger.Error("master_volume::loadVolumeController(), defaultDevice->Activate failed with error: " + hr);
        return;
    }

    // Release the default device
    if (defaultDevice) {
        defaultDevice->Release();
        defaultDevice = nullptr;
    }
}
コード例 #3
0
ファイル: MediaPlayer.cpp プロジェクト: quartorz/Hockey
//
//  Utility function to retrieve the session manager for the default audio endpoint.
//
HRESULT CMediaPlayer::GetSessionManager2()
{
    HRESULT hr = S_OK;
    if (_SessionManager2 == NULL)
    {
        IMMDeviceEnumerator *deviceEnumerator;
        IMMDevice *endpoint = NULL;

        //
        //  Start with the default endpoint.
        //
        hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&deviceEnumerator));
        if (SUCCEEDED(hr))
        {
            hr = deviceEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &endpoint);

            deviceEnumerator->Release();
            deviceEnumerator = NULL;
        }
        else
        {
            MessageBox(_AppWindow, L"Unable to instantiate MMDeviceEnumerator", L"Get SessionManager Error", MB_OK);
        }

        if (SUCCEEDED(hr))
        {
            hr = endpoint->Activate(__uuidof(IAudioSessionManager2), CLSCTX_INPROC_SERVER,
                    NULL, reinterpret_cast<void **>(&_SessionManager2));
            endpoint->Release();
            if (FAILED(hr))
            {
                MessageBox(_AppWindow, L"Unable to Activate session manager", L"Get SessionManager Error", MB_OK);
            }
        }
        else
        {
            MessageBox(_AppWindow, L"Unable to get default endpoint", L"Get SessionManager Error", MB_OK);
        }
    }
    return hr;
}
コード例 #4
0
ファイル: dllmain.cpp プロジェクト: ukanth/monitores-old
void SetMuteStatus()
{
    HRESULT hr;

    CoInitialize(NULL);
    IMMDeviceEnumerator *deviceEnumerator = NULL;
    hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_INPROC_SERVER, __uuidof(IMMDeviceEnumerator), (LPVOID *)&deviceEnumerator);
    if (!SUCCEEDED(hr)) 
    {
        CoUninitialize();
    }

    IMMDevice *defaultDevice = NULL;

    hr = deviceEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &defaultDevice);
    deviceEnumerator->Release();
    deviceEnumerator = NULL;
    if (!SUCCEEDED(hr))
    {
        CoUninitialize();
    }

    IAudioEndpointVolume *endpointVolume = NULL;
    hr = defaultDevice->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_INPROC_SERVER, NULL, (LPVOID *)&endpointVolume);
    defaultDevice->Release();
    defaultDevice = NULL; 

    if (!SUCCEEDED(hr)) 
    {
        CoUninitialize();
    }

    // -------------------------
	BOOL b;

	endpointVolume->GetMute(&b);
	b=!b;
	endpointVolume->SetMute(b, NULL);
	endpointVolume->Release();
    CoUninitialize();
}
コード例 #5
0
IMMDevice* GetDefaultAudioDevice()
{
    IMMDevice* mmDevice = nullptr;
    IMMDeviceEnumerator* mmDeviceEnumerator;

    // activate a device enumerator
    HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr, CLSCTX_ALL, __uuidof(IMMDeviceEnumerator), (void**)&mmDeviceEnumerator);
    if (FAILED(hr))
    {
        fprintf(stderr, "CoCreateInstance(IMMDeviceEnumerator) failed: hr = 0x%08x\n", hr);
        return mmDevice;
    }

    // get the default render endpoint
    hr = mmDeviceEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &mmDevice);
    mmDeviceEnumerator->Release();
    if (FAILED(hr))
        fprintf(stderr, "IMMDeviceEnumerator::GetDefaultAudioEndpoint failed: hr = 0x%08x\n", hr);

    return mmDevice;
}
コード例 #6
0
ファイル: AudioDriver.cpp プロジェクト: kylegordon/ohPlayer
// Obtain the use of the native multimedia device.
TBool AudioDriver::GetMultimediaDevice(IMMDevice **DeviceToUse)
{
    HRESULT hr;
    TBool   retValue = true;

    IMMDeviceEnumerator *deviceEnumerator = NULL;

    hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
                          NULL,
                          CLSCTX_INPROC_SERVER,
                          IID_PPV_ARGS(&deviceEnumerator));
    if (hr != S_OK)
    {
        Log::Print("Unable to instantiate device enumerator: %x\n", hr);
        retValue = false;
        goto Exit;
    }

    IMMDevice *device      = NULL;
    ERole      deviceRole  = eMultimedia;

    hr = deviceEnumerator->GetDefaultAudioEndpoint(eRender,
                                                   deviceRole,
                                                  &device);
    if (hr != S_OK)
    {
        Log::Print("Unable to get default multimedia device: %x\n", hr);
        retValue = false;
        goto Exit;
    }

    *DeviceToUse = device;
    retValue     = true;
Exit:
    SafeRelease(&deviceEnumerator);

    return retValue;
}
コード例 #7
0
ファイル: SetVolume.cpp プロジェクト: davidcie/SetVolume
void InitializeAudioEndpoint(IAudioEndpointVolume **audioEndpoint) 
{
	HRESULT hr;

	// Initialize the COM library
	CoInitialize(nullptr);

	// Get audio device enumerator
	IMMDeviceEnumerator *deviceEnumerator = nullptr;
	hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr, CLSCTX_INPROC_SERVER, __uuidof(IMMDeviceEnumerator), (LPVOID *)&deviceEnumerator);
	if (hr != S_OK) {
		printf("Unable to create instance of MMDeviceEnumerator (error code: 0x%08lx)\n", hr);
		CoUninitialize();
		exit(-1);
	}

	// Ask device enumerator for the default audio renderer
	IMMDevice *defaultDevice = nullptr;
	hr = deviceEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &defaultDevice);
	deviceEnumerator->Release();
	deviceEnumerator = nullptr;
	if (hr != S_OK) {
		printf("Unable to get default audio endpoint (error code: 0x%08lx)\n", hr);
		CoUninitialize();
		exit(-1);
	}
	
	// Ask default audio renderer for volume controller
	//IAudioEndpointVolume *endpointVolume = nullptr;
	hr = defaultDevice->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_INPROC_SERVER, nullptr, (LPVOID *)audioEndpoint);
	defaultDevice->Release();
	defaultDevice = nullptr;
	if (hr != S_OK) {
		printf("Unable to get default audio volume controller (error code: 0x%08lx)\n", hr);
		CoUninitialize();
		exit(-1);
	}
}
コード例 #8
0
ファイル: GetAudioDevices.cpp プロジェクト: AaronMike/OBS
bool GetDefaultDevice(String &strVal, EDataFlow df )
{
    const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator);
    const IID IID_IMMDeviceEnumerator    = __uuidof(IMMDeviceEnumerator);
    IMMDeviceEnumerator *mmEnumerator;
    HRESULT err;

    err = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void**)&mmEnumerator);
    if(FAILED(err))
        return false;

    //-------------------------------------------------------

    IMMDevice *defDevice;
    if(FAILED(mmEnumerator->GetDefaultAudioEndpoint(df, eCommunications, &defDevice)))
    {
        SafeRelease(mmEnumerator);
        return false;
    }

    CWSTR wstrDefaultID;
    if(FAILED(defDevice->GetId((LPWSTR*)&wstrDefaultID)))
    {
        SafeRelease(defDevice);
        SafeRelease(mmEnumerator);
        return false;
    }

    strVal = wstrDefaultID;

    CoTaskMemFree((LPVOID)wstrDefaultID);
    SafeRelease(defDevice);
    SafeRelease(mmEnumerator);

    return true;
}
HRESULT get_default_device(IMMDevice **ppMMDevice) {
    HRESULT hr = S_OK;
    IMMDeviceEnumerator *pMMDeviceEnumerator;
    // activate a device enumerator
    hr = CoCreateInstance(
        __uuidof(MMDeviceEnumerator), NULL, CLSCTX_ALL, 
        __uuidof(IMMDeviceEnumerator),
        (void**)&pMMDeviceEnumerator
    );
    if (FAILED(hr)) {
        printf("CoCreateInstance(IMMDeviceEnumerator) failed: hr = 0x%08x\n", hr);
        return hr;
    }

    // get the default render endpoint
    hr = pMMDeviceEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, ppMMDevice);
    pMMDeviceEnumerator->Release();
    if (FAILED(hr)) {
        printf("IMMDeviceEnumerator::GetDefaultAudioEndpoint failed: hr = 0x%08x\n", hr);
        return hr;
    }

    return S_OK;
}
コード例 #10
0
void VolumeControl::init()
{
	//initialize audio mixer interface
#if defined (__APPLE__)
	#warning TODO: Not implemented for MacOS yet!!!
#elif defined(__linux__)
	//try to open mixer device
	if (mixerHandle == nullptr)
	{
		snd_mixer_selem_id_alloca(&mixerSelemId);
		//sets simple-mixer index and name
		snd_mixer_selem_id_set_index(mixerSelemId, mixerIndex);
		snd_mixer_selem_id_set_name(mixerSelemId, mixerName);
		//open mixer
		if (snd_mixer_open(&mixerHandle, 0) >= 0)
		{
			LOG(LogDebug) << "VolumeControl::init() - Opened ALSA mixer";
			//ok. attach to defualt card
			if (snd_mixer_attach(mixerHandle, mixerCard) >= 0)
			{
				LOG(LogDebug) << "VolumeControl::init() - Attached to default card";
				//ok. register simple element class
				if (snd_mixer_selem_register(mixerHandle, NULL, NULL) >= 0)
				{
					LOG(LogDebug) << "VolumeControl::init() - Registered simple element class";
					//ok. load registered elements
					if (snd_mixer_load(mixerHandle) >= 0)
					{
						LOG(LogDebug) << "VolumeControl::init() - Loaded mixer elements";
						//ok. find elements now
						mixerElem = snd_mixer_find_selem(mixerHandle, mixerSelemId);
						if (mixerElem != nullptr)
						{
							//wohoo. good to go...
							LOG(LogDebug) << "VolumeControl::init() - Mixer initialized";
						}
						else
						{
							LOG(LogError) << "VolumeControl::init() - Failed to find mixer elements!";
							snd_mixer_close(mixerHandle);
							mixerHandle = nullptr;
						}
					}
					else
					{
						LOG(LogError) << "VolumeControl::init() - Failed to load mixer elements!";
						snd_mixer_close(mixerHandle);
						mixerHandle = nullptr;
					}
				}
				else
				{
					LOG(LogError) << "VolumeControl::init() - Failed to register simple element class!";
					snd_mixer_close(mixerHandle);
					mixerHandle = nullptr;
				}
			}
			else
			{
				LOG(LogError) << "VolumeControl::init() - Failed to attach to default card!";
				snd_mixer_close(mixerHandle);
				mixerHandle = nullptr;
			}
		}
		else
		{
			LOG(LogError) << "VolumeControl::init() - Failed to open ALSA mixer!";
		}
	}
#elif defined(WIN32) || defined(_WIN32)
	//get windows version information
	OSVERSIONINFOEXA osVer = {sizeof(OSVERSIONINFO)};
	::GetVersionExA(reinterpret_cast<LPOSVERSIONINFOA>(&osVer));
	//check windows version
	if(osVer.dwMajorVersion < 6)
	{
		//Windows older than Vista. use mixer API. open default mixer
		if (mixerHandle == nullptr)
		{
			if (mixerOpen(&mixerHandle, 0, NULL, 0, 0) == MMSYSERR_NOERROR)
			{
				//retrieve info on the volume slider control for the "Speaker Out" line
				MIXERLINECONTROLS mixerLineControls;
				mixerLineControls.cbStruct = sizeof(MIXERLINECONTROLS);
				mixerLineControls.dwLineID = 0xFFFF0000; //Id of "Speaker Out" line
				mixerLineControls.cControls = 1;
				//mixerLineControls.dwControlID = 0x00000000; //Id of "Speaker Out" line's volume slider
				mixerLineControls.dwControlType = MIXERCONTROL_CONTROLTYPE_VOLUME; //Get volume control
				mixerLineControls.pamxctrl = &mixerControl;
				mixerLineControls.cbmxctrl = sizeof(MIXERCONTROL);
				if (mixerGetLineControls((HMIXEROBJ)mixerHandle, &mixerLineControls, MIXER_GETLINECONTROLSF_ONEBYTYPE) != MMSYSERR_NOERROR)
				{
					LOG(LogError) << "VolumeControl::getVolume() - Failed to get mixer volume control!";
					mixerClose(mixerHandle);
					mixerHandle = nullptr;
				}
			}
			else
			{
				LOG(LogError) << "VolumeControl::init() - Failed to open mixer!";
			}
		}
	}
	else
	{
		//Windows Vista or above. use EndpointVolume API. get device enumerator
		if (endpointVolume == nullptr)
		{
			CoInitialize(nullptr);
			IMMDeviceEnumerator * deviceEnumerator = nullptr;
			CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr, CLSCTX_INPROC_SERVER, __uuidof(IMMDeviceEnumerator), (LPVOID *)&deviceEnumerator);
			if (deviceEnumerator != nullptr)
			{
				//get default endpoint
				IMMDevice * defaultDevice = nullptr;
				deviceEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &defaultDevice);
				if (defaultDevice != nullptr)
				{
					//retrieve endpoint volume
					defaultDevice->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_INPROC_SERVER, nullptr, (LPVOID *)&endpointVolume);
					if (endpointVolume == nullptr)
					{
						LOG(LogError) << "VolumeControl::init() - Failed to get default audio endpoint volume!";
					}
					//release default device. we don't need it anymore
					defaultDevice->Release();
				}
				else
				{
					LOG(LogError) << "VolumeControl::init() - Failed to get default audio endpoint!";
				}
				//release device enumerator. we don't need it anymore
				deviceEnumerator->Release();
			}
			else
			{
				LOG(LogError) << "VolumeControl::init() - Failed to get audio endpoint enumerator!";
				CoUninitialize();
			}
		}
	}
#endif
}
コード例 #11
0
void modifyVolume(char ch)
{
	HRESULT hr;
	bool decibels = false;
	bool scalar = true;
	double newVolume = 10;
	// -------------------------
	CoInitialize(NULL);
	IMMDeviceEnumerator *deviceEnumerator = NULL;
	hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_INPROC_SERVER, __uuidof(IMMDeviceEnumerator), (LPVOID *)&deviceEnumerator);
	IMMDevice *defaultDevice = NULL;

	hr = deviceEnumerator->GetDefaultAudioEndpoint(eRender, eMultimedia, &defaultDevice);
	deviceEnumerator->Release();
	deviceEnumerator = NULL;

	IAudioEndpointVolume *endpointVolume = NULL;
	hr = defaultDevice->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_INPROC_SERVER, NULL, (LPVOID *)&endpointVolume);
	defaultDevice->Release();
	defaultDevice = NULL;

	// -------------------------
	float currentVolume = 0;
	endpointVolume->GetMasterVolumeLevel(&currentVolume);
	//printf("Current volume in dB is: %f\n", currentVolume);

	hr = endpointVolume->GetMasterVolumeLevelScalar(&currentVolume);

	float min, max, inc;
	endpointVolume->GetVolumeRange(&min, &max, &inc);
	//printf("Current volume as a scalar is: %f\n", currentVolume);

	if (ch == 'u')
		newVolume = currentVolume + 0.1;
	else if (ch == 'd')
		newVolume = currentVolume - 0.1;

	if (ch == 'm')
	{
		endpointVolume->SetMute(TRUE, NULL);
	}
	else if (ch == 'n')
	{
		endpointVolume->SetMute(FALSE, NULL);
	}
	else
	{
		if (decibels)
		{
			hr = endpointVolume->SetMasterVolumeLevel((float)newVolume, NULL);
			//endpointVolume->VolumeStepUp(NULL);
		}
		else if (scalar)
		{
			hr = endpointVolume->SetMasterVolumeLevelScalar((float)newVolume, NULL);
			//endpointVolume->VolumeStepUp(NULL);
		}
	}


	endpointVolume->Release();

	CoUninitialize();
}
コード例 #12
0
void PlayAudio()
{
	REFERENCE_TIME hnsRequestedDuration = REFTIMES_PER_SEC;	// microseconds, so this is 1 seconds
	REFERENCE_TIME hnsActualDuration;

	HRESULT hr;

	IMMDeviceEnumerator *pEnumerator = NULL;
	IMMDevice *pDevice = NULL;
	IAudioClient *pAudioClient = NULL;
	IAudioRenderClient *pRenderClient = NULL;
	WAVEFORMATEX *pwfx = NULL;
	UINT32 bufferFrameCount;
	UINT32 numFramesAvailable;
	UINT32 numFramesPadding;
	BYTE *pData;
	DWORD flags = 0;


	hr = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void**)&pEnumerator);
	EXIT_ON_ERROR(hr);

	hr = pEnumerator->GetDefaultAudioEndpoint(
		eRender, eConsole, &pDevice);
	EXIT_ON_ERROR(hr);

	hr = pDevice->Activate(
			IID_IAudioClient, CLSCTX_ALL,
			NULL, (void**)&pAudioClient);
	EXIT_ON_ERROR(hr);

	hr = pAudioClient->GetMixFormat(&pwfx);
	EXIT_ON_ERROR(hr);

	hr = pAudioClient->Initialize(
			AUDCLNT_SHAREMODE_SHARED,
			0,
			hnsRequestedDuration,
			0,
			pwfx,
			NULL);
	EXIT_ON_ERROR(hr);

	// Get the actual size of the allocated buffer.
    hr = pAudioClient->GetBufferSize(&bufferFrameCount);
	EXIT_ON_ERROR(hr);

    hr = pAudioClient->GetService(
                         IID_IAudioRenderClient,
                         (void**)&pRenderClient);
	EXIT_ON_ERROR(hr);

    // Grab the entire buffer for the initial fill operation.
    hr = pRenderClient->GetBuffer(bufferFrameCount, &pData);
	EXIT_ON_ERROR(hr);

	// load initial data
	hr = LoadAudioBuffer(bufferFrameCount, pData, pwfx, &flags);
	EXIT_ON_ERROR(hr);

	hr = pRenderClient->ReleaseBuffer(bufferFrameCount, flags);
	EXIT_ON_ERROR(hr);

	// Calculate the actual duration of the allocated buffer.
    hnsActualDuration = (REFERENCE_TIME)((double)REFTIMES_PER_SEC * bufferFrameCount / pwfx->nSamplesPerSec);

    hr = pAudioClient->Start();  // Start playing.
	EXIT_ON_ERROR(hr);

    // Each loop fills about half of the shared buffer.
	while (flags != AUDCLNT_BUFFERFLAGS_SILENT)
	{
		        // Sleep for half the buffer duration.
        Sleep((DWORD)(hnsActualDuration/REFTIMES_PER_MILLISEC/2));

        // See how much buffer space is available.
        hr = pAudioClient->GetCurrentPadding(&numFramesPadding);
        EXIT_ON_ERROR(hr)

        numFramesAvailable = bufferFrameCount - numFramesPadding;

        // Grab all the available space in the shared buffer.
        hr = pRenderClient->GetBuffer(numFramesAvailable, &pData);
        EXIT_ON_ERROR(hr)

        // Get next 1/2-second of data from the audio source.
		hr = LoadAudioBuffer(numFramesAvailable, pData, pwfx, &flags);
        EXIT_ON_ERROR(hr)

        hr = pRenderClient->ReleaseBuffer(numFramesAvailable, flags);
        EXIT_ON_ERROR(hr)
    }

    // Wait for last data in buffer to play before stopping.
    Sleep((DWORD)(hnsActualDuration/REFTIMES_PER_MILLISEC/2));

    hr = pAudioClient->Stop();  // Stop playing.
	EXIT_ON_ERROR(hr);


Exit:
	CoTaskMemFree(pwfx);
	SAFE_RELEASE(pEnumerator);
	SAFE_RELEASE(pDevice);
	SAFE_RELEASE(pAudioClient);
	SAFE_RELEASE(pRenderClient);
}
// we only call this once...per hit of the play button :)
HRESULT LoopbackCaptureSetup()
{
	assert(shouldStop); // duplicate starts would be odd...
	shouldStop = false; // allow graphs to restart, if they so desire...
	pnFrames = 0;
	bool bInt16 = true; // makes it actually work, for some reason...LODO
	
    HRESULT hr;
    hr = get_default_device(&m_pMMDevice); // so it can re-place our pointer...
    if (FAILED(hr)) {
        return hr;
    }

	// tell it to not overflow one buffer's worth <sigh> not sure if this is right or not, and thus we don't "cache" or "buffer" more than that much currently...
	// but a buffer size is a buffer size...hmm...as long as we keep it small though...
	assert(expectedMaxBufferSize <= pBufOriginalSize);
    // activate an (the default, for us, since we want loopback) IAudioClient
    hr = m_pMMDevice->Activate(
        __uuidof(IAudioClient),
        CLSCTX_ALL, NULL,
        (void**)&pAudioClient
    );
    if (FAILED(hr)) {
        ShowOutput("IMMDevice::Activate(IAudioClient) failed: hr = 0x%08x", hr);
        return hr;
    }
    
    // get the default device periodicity, why? I don't know...
    REFERENCE_TIME hnsDefaultDevicePeriod;
    hr = pAudioClient->GetDevicePeriod(&hnsDefaultDevicePeriod, NULL);
    if (FAILED(hr)) {
        ShowOutput("IAudioClient::GetDevicePeriod failed: hr = 0x%08x\n", hr);
        pAudioClient->Release();
        return hr;
    }

    // get the default device format (incoming...)
    WAVEFORMATEX *pwfx; // incoming wave...
	// apparently propogated by GetMixFormat...
    hr = pAudioClient->GetMixFormat(&pwfx);
    if (FAILED(hr)) {
        ShowOutput("IAudioClient::GetMixFormat failed: hr = 0x%08x\n", hr);
        CoTaskMemFree(pwfx);
        pAudioClient->Release();
        return hr;
    }

    if (true /*bInt16*/) {
        // coerce int-16 wave format
        // can do this in-place since we're not changing the size of the format
        // also, the engine will auto-convert from float to int for us
        switch (pwfx->wFormatTag) {
            case WAVE_FORMAT_IEEE_FLOAT:
				assert(false);// we never get here...I hope...
                pwfx->wFormatTag = WAVE_FORMAT_PCM;
                pwfx->wBitsPerSample = 16;
                pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8;
                pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec;
                break;

            case WAVE_FORMAT_EXTENSIBLE:
                {
                    // naked scope for case-local variable
                    PWAVEFORMATEXTENSIBLE pEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(pwfx);
                    if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, pEx->SubFormat)) {
						// WE GET HERE!
                        pEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
                        pEx->Samples.wValidBitsPerSample = 16;
                        pwfx->wBitsPerSample = 16;
                        pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8;
                        pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec;
						/* scawah lodo...
						if(ifNotNullThenJustSetTypeOnly) {
							PWAVEFORMATEXTENSIBLE pEx2 = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(ifNotNullThenJustSetTypeOnly);
							pEx2->SubFormat = pEx->SubFormat;
							pEx2->Samples.wValidBitsPerSample = pEx->Samples.wValidBitsPerSample;
						} */
                    } else {
                        ShowOutput("Don't know how to coerce mix format to int-16\n");
                        CoTaskMemFree(pwfx);
                        pAudioClient->Release();
                        return E_UNEXPECTED;
                    }
                }
                break;

            default:
                ShowOutput("Don't know how to coerce WAVEFORMATEX with wFormatTag = 0x%08x to int-16\n", pwfx->wFormatTag);
                CoTaskMemFree(pwfx);
                pAudioClient->Release();
                return E_UNEXPECTED;
        }
    }
	/* scawah setting stream types up to match...didn't seem to work well...

	if(ifNotNullThenJustSetTypeOnly) {
		// pwfx is set at this point...
		WAVEFORMATEX* pwfex = ifNotNullThenJustSetTypeOnly;
		// copy them all out as the possible format...hmm...


                pwfx->wFormatTag = WAVE_FORMAT_PCM;
                pwfx->wBitsPerSample = 16;
                pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8;
                pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec;


		pwfex->wFormatTag = pwfx->wFormatTag;
		pwfex->nChannels = pwfx->nChannels;
        pwfex->nSamplesPerSec = pwfx->nSamplesPerSec;
        pwfex->wBitsPerSample = pwfx->wBitsPerSample;
        pwfex->nBlockAlign = pwfx->nBlockAlign;
        pwfex->nAvgBytesPerSec = pwfx->nAvgBytesPerSec;
        pwfex->cbSize = pwfx->cbSize;
		//FILE *fp = fopen("/normal2", "w"); // fails on me? maybe juts a VLC thing...
		//fShowOutput(fp, "hello world %d %d %d %d %d %d %d", pwfex->wFormatTag, pwfex->nChannels, 
		//	pwfex->nSamplesPerSec, pwfex->wBitsPerSample, pwfex->nBlockAlign, pwfex->nAvgBytesPerSec, pwfex->cbSize );
		//fclose(fp);
		// cleanup
		// I might be leaking here...
		CoTaskMemFree(pwfx);
        pAudioClient->Release();
        //m_pMMDevice->Release();
		return hr;
	}*/

    MMCKINFO ckRIFF = {0};
    MMCKINFO ckData = {0};

    nBlockAlign = pwfx->nBlockAlign;
    

// avoid stuttering on close
// http://social.msdn.microsoft.com/forums/en-US/windowspro-audiodevelopment/thread/c7ba0a04-46ce-43ff-ad15-ce8932c00171/ 
	
//IAudioClient *pAudioClient = NULL;
//IAudioCaptureClient *pCaptureClient = NULL;
	
IMMDeviceEnumerator *pEnumerator = NULL;
IMMDevice *pDevice = NULL;

IAudioRenderClient *pRenderClient = NULL;
WAVEFORMATEXTENSIBLE *captureDataFormat = NULL;
BYTE *captureData;

    REFERENCE_TIME hnsRequestedDuration = REFTIMES_PER_SEC;

    hr = CoCreateInstance(
           CLSID_MMDeviceEnumerator, NULL,
           CLSCTX_ALL, IID_IMMDeviceEnumerator,
           (void**)&pEnumerator);
    EXIT_ON_ERROR(hr)

    hr = pEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &pDevice);
    EXIT_ON_ERROR(hr)

    hr = pDevice->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&pAudioClient);
    EXIT_ON_ERROR(hr)

    hr = pAudioClient->GetMixFormat((WAVEFORMATEX **)&captureDataFormat);
    EXIT_ON_ERROR(hr)

	
    // Silence: initialise in sharedmode [this is the "silence" bug overwriter, so buffer doesn't matter as much...]
    hr = pAudioClient->Initialize(
                         AUDCLNT_SHAREMODE_SHARED,
                         0,
					     REFTIMES_PER_SEC, // buffer size a full 1.0s, though prolly doesn't matter here.
                         0,
                         pwfx,
                         NULL);
    EXIT_ON_ERROR(hr)

    // get the frame count
    UINT32  bufferFrameCount;
    hr = pAudioClient->GetBufferSize(&bufferFrameCount);
    EXIT_ON_ERROR(hr)

    // create a render client
    hr = pAudioClient->GetService(IID_IAudioRenderClient, (void**)&pRenderClient);
    EXIT_ON_ERROR(hr)

    // get the buffer
    hr = pRenderClient->GetBuffer(bufferFrameCount, &captureData);
    EXIT_ON_ERROR(hr)

    // release it
    hr = pRenderClient->ReleaseBuffer(bufferFrameCount, AUDCLNT_BUFFERFLAGS_SILENT);
    EXIT_ON_ERROR(hr)

    // release the audio client
    pAudioClient->Release();
    EXIT_ON_ERROR(hr)


    // create a new IAudioClient
    hr = pDevice->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&pAudioClient);
    EXIT_ON_ERROR(hr)

    // -============================ now the sniffing code initialization stuff, direct from mauritius... ===================================

	// call IAudioClient::Initialize
    // note that AUDCLNT_STREAMFLAGS_LOOPBACK and AUDCLNT_STREAMFLAGS_EVENTCALLBACK
    // do not work together...
    // the "data ready" event never gets set
    // so we're going to have to do this in a timer-driven loop...

    hr = pAudioClient->Initialize(
        AUDCLNT_SHAREMODE_SHARED,
        AUDCLNT_STREAMFLAGS_LOOPBACK,
        REFTIMES_PER_SEC, // buffer size a full 1.0s, seems ok VLC
		0, pwfx, 0
    );
    if (FAILED(hr)) {
        ShowOutput("IAudioClient::Initialize failed: hr = 0x%08x\n", hr);
        pAudioClient->Release();
        return hr;
    }
    CoTaskMemFree(pwfx);

    // activate an IAudioCaptureClient
    hr = pAudioClient->GetService(
        __uuidof(IAudioCaptureClient),
        (void**)&pAudioCaptureClient // CARE INSTANTIATION
    );
    if (FAILED(hr)) {
        ShowOutput("IAudioClient::GetService(IAudioCaptureClient) failed: hr 0x%08x\n", hr);
        pAudioClient->Release();
        return hr;
    }
    
    // register with MMCSS
    DWORD nTaskIndex = 0;

    hTask = AvSetMmThreadCharacteristics(L"Capture", &nTaskIndex);
    if (NULL == hTask) {
        DWORD dwErr = GetLastError();
        ShowOutput("AvSetMmThreadCharacteristics failed: last error = %u\n", dwErr);
        pAudioCaptureClient->Release();
        pAudioClient->Release();
        return HRESULT_FROM_WIN32(dwErr);
    }    

    // call IAudioClient::Start
    hr = pAudioClient->Start();
    if (FAILED(hr)) {
        ShowOutput("IAudioClient::Start failed: hr = 0x%08x\n", hr);
        AvRevertMmThreadCharacteristics(hTask);
        pAudioCaptureClient->Release();
        pAudioClient->Release();
        return hr;
    }
    
    bFirstPacket = true;

	// start the forever grabbing thread...
	DWORD dwThreadID;
    m_hThread = CreateThread(NULL,
                            0,
                            propagateBufferForever,
                            0,
                            0,
                            &dwThreadID);
    if(!m_hThread)
    {
        DWORD dwErr = GetLastError();
        return HRESULT_FROM_WIN32(dwErr);
    } else {
		// we...shouldn't need this...maybe?
		// seems to make no difference...
		hr = SetThreadPriority(m_hThread, THREAD_PRIORITY_TIME_CRITICAL);
        if (FAILED(hr)) { // of course we always want to be a high prio thread, right? [we don't use much cpu...]
		  return hr;
  	    }
	}

	return hr;
} // end LoopbackCaptureSetup
//
//  Based on the input switches, pick the specified device to use.
//
bool PickDevice(IMMDevice **DeviceToUse, bool *IsDefaultDevice, ERole *DefaultDeviceRole)
{
    HRESULT hr;
    bool retValue = true;
    IMMDeviceEnumerator *deviceEnumerator = NULL;
    IMMDeviceCollection *deviceCollection = NULL;

    *IsDefaultDevice = false;   // Assume we're not using the default device.

    hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&deviceEnumerator));
    if (FAILED(hr))
    {
        printf("Unable to instantiate device enumerator: %x\n", hr);
        retValue = false;
        goto Exit;
    }

    IMMDevice *device = NULL;

    //
    //  First off, if none of the console switches was specified, use the console device.
    //
    if (!UseConsoleDevice && !UseCommunicationsDevice && !UseMultimediaDevice && OutputEndpoint == NULL)
    {
        //
        //  The user didn't specify an output device, prompt the user for a device and use that.
        //
        hr = deviceEnumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &deviceCollection);
        if (FAILED(hr))
        {
            printf("Unable to retrieve device collection: %x\n", hr);
            retValue = false;
            goto Exit;
        }

        printf("Select an output device:\n");
        printf("    0:  Default Console Device\n");
        printf("    1:  Default Communications Device\n");
        printf("    2:  Default Multimedia Device\n");
        UINT deviceCount;
        hr = deviceCollection->GetCount(&deviceCount);
        if (FAILED(hr))
        {
            printf("Unable to get device collection length: %x\n", hr);
            retValue = false;
            goto Exit;
        }
        for (UINT i = 0 ; i < deviceCount ; i += 1)
        {
            LPWSTR deviceName;

            deviceName = GetDeviceName(deviceCollection, i);
            if (deviceName == NULL)
            {
                retValue = false;
                goto Exit;
            }
            printf("    %d:  %S\n", i + 3, deviceName);
            free(deviceName);
        }
        wchar_t choice[10];
        _getws_s(choice);   // Note: Using the safe CRT version of _getws.

        long deviceIndex;
        wchar_t *endPointer;

        deviceIndex = wcstoul(choice, &endPointer, 0);
        if (deviceIndex == 0 && endPointer == choice)
        {
            printf("unrecognized device index: %S\n", choice);
            retValue = false;
            goto Exit;
        }
        switch (deviceIndex)
        {
        case 0:
            UseConsoleDevice = 1;
            break;
        case 1:
            UseCommunicationsDevice = 1;
            break;
        case 2:
            UseMultimediaDevice = 1;
            break;
        default:
            hr = deviceCollection->Item(deviceIndex - 3, &device);
            if (FAILED(hr))
            {
                printf("Unable to retrieve device %d: %x\n", deviceIndex - 3, hr);
                retValue = false;
                goto Exit;
            }
            break;
        }
    } 
    else if (OutputEndpoint != NULL)
    {
        hr = deviceEnumerator->GetDevice(OutputEndpoint, &device);
        if (FAILED(hr))
        {
            printf("Unable to get endpoint for endpoint %S: %x\n", OutputEndpoint, hr);
            retValue = false;
            goto Exit;
        }
    }

    if (device == NULL)
    {
        ERole deviceRole = eConsole;    // Assume we're using the console role.
        if (UseConsoleDevice)
        {
            deviceRole = eConsole;
        }
        else if (UseCommunicationsDevice)
        {
            deviceRole = eCommunications;
        }
        else if (UseMultimediaDevice)
        {
            deviceRole = eMultimedia;
        }
        hr = deviceEnumerator->GetDefaultAudioEndpoint(eRender, deviceRole, &device);
        if (FAILED(hr))
        {
            printf("Unable to get default device for role %d: %x\n", deviceRole, hr);
            retValue = false;
            goto Exit;
        }
        *IsDefaultDevice = true;
        *DefaultDeviceRole = deviceRole;
    }

    *DeviceToUse = device;
    retValue = true;
Exit:
    SafeRelease(&deviceCollection);
    SafeRelease(&deviceEnumerator);

    return retValue;
}
コード例 #15
0
ファイル: AESinkWASAPI.cpp プロジェクト: CaptainRewind/xbmc
bool CAESinkWASAPI::Initialize(AEAudioFormat &format, std::string &device)
{
  if (m_initialized)
    return false;

  m_device = device;
  bool bdefault = false;

  /* Save requested format */
  /* Clear returned format */
  sinkReqFormat = format.m_dataFormat;
  sinkRetFormat = AE_FMT_INVALID;

  IMMDeviceEnumerator* pEnumerator = NULL;
  IMMDeviceCollection* pEnumDevices = NULL;

  HRESULT hr = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void**)&pEnumerator);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Could not allocate WASAPI device enumerator. CoCreateInstance error code: %li", hr)

  /* Get our device. First try to find the named device. */
  UINT uiCount = 0;

  hr = pEnumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &pEnumDevices);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Retrieval of audio endpoint enumeration failed.")

  hr = pEnumDevices->GetCount(&uiCount);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Retrieval of audio endpoint count failed.")

  if(StringUtils::EndsWithNoCase(device, std::string("default")))
    bdefault = true;

  if(!bdefault)
  {
    for (UINT i = 0; i < uiCount; i++)
    {
      IPropertyStore *pProperty = NULL;
      PROPVARIANT varName;

      hr = pEnumDevices->Item(i, &m_pDevice);
      EXIT_ON_FAILURE(hr, __FUNCTION__": Retrieval of WASAPI endpoint failed.")

      hr = m_pDevice->OpenPropertyStore(STGM_READ, &pProperty);
      EXIT_ON_FAILURE(hr, __FUNCTION__": Retrieval of WASAPI endpoint properties failed.")

      hr = pProperty->GetValue(PKEY_AudioEndpoint_GUID, &varName);
      if (FAILED(hr))
      {
        CLog::Log(LOGERROR, __FUNCTION__": Retrieval of WASAPI endpoint GUID failed.");
        SAFE_RELEASE(pProperty);
        goto failed;
      }

      std::string strDevName = localWideToUtf(varName.pwszVal);

      if (device == strDevName)
        i = uiCount;
      else
        SAFE_RELEASE(m_pDevice);

      PropVariantClear(&varName);
      SAFE_RELEASE(pProperty);
    }
  }
  SAFE_RELEASE(pEnumDevices);

  if (!m_pDevice)
  {
    if(!bdefault)
      CLog::Log(LOGINFO, __FUNCTION__": Could not locate the device named \"%s\" in the list of WASAPI endpoint devices.  Trying the default device...", device.c_str());
    hr = pEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &m_pDevice);
    EXIT_ON_FAILURE(hr, __FUNCTION__": Could not retrieve the default WASAPI audio endpoint.")

    IPropertyStore *pProperty = NULL;
    PROPVARIANT varName;

    hr = m_pDevice->OpenPropertyStore(STGM_READ, &pProperty);
    EXIT_ON_FAILURE(hr, __FUNCTION__": Retrieval of WASAPI endpoint properties failed.")

    hr = pProperty->GetValue(PKEY_AudioEndpoint_GUID, &varName);

    device = localWideToUtf(varName.pwszVal);
    PropVariantClear(&varName);
    SAFE_RELEASE(pProperty);
  }

  SAFE_RELEASE(pEnumerator);

  hr = m_pDevice->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&m_pAudioClient);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Activating the WASAPI endpoint device failed.")

  if (!InitializeExclusive(format))
  {
    CLog::Log(LOGINFO, __FUNCTION__": Could not Initialize Exclusive with that format");
    goto failed;
  }

  /* get the buffer size and calculate the frames for AE */
  m_pAudioClient->GetBufferSize(&m_uiBufferLen);

  format.m_frames       = m_uiBufferLen;
  format.m_frameSamples = format.m_frames * format.m_channelLayout.Count();
  m_format              = format;
  sinkRetFormat         = format.m_dataFormat;

  hr = m_pAudioClient->GetService(IID_IAudioRenderClient, (void**)&m_pRenderClient);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Could not initialize the WASAPI render client interface.")

  m_needDataEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
  hr = m_pAudioClient->SetEventHandle(m_needDataEvent);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Could not set the WASAPI event handler.");

  m_initialized = true;
  m_isDirty     = false;

  // allow feeding less samples than buffer size
  // if the device is opened exclusive and event driven, provided samples must match buffersize
  // ActiveAE tries to align provided samples with buffer size but cannot guarantee (e.g. transcoding)
  // this can be avoided by dropping the event mode which has not much benefit; SoftAE polls anyway
  delete [] m_pBuffer;
  m_pBuffer = new uint8_t[format.m_frames * format.m_frameSize];
  m_bufferPtr = 0;

  return true;

failed:
  CLog::Log(LOGERROR, __FUNCTION__": WASAPI initialization failed.");
  SAFE_RELEASE(pEnumDevices);
  SAFE_RELEASE(pEnumerator);
  SAFE_RELEASE(m_pRenderClient);
  SAFE_RELEASE(m_pAudioClient);
  SAFE_RELEASE(m_pDevice);
  if(m_needDataEvent)
  {
    CloseHandle(m_needDataEvent);
    m_needDataEvent = 0;
  }

  return false;
}
コード例 #16
0
std::vector<AudioDevice> EnumerateAudioDevices(_Out_opt_ AudioDevice* defaultDevice = nullptr)
{
    UINT32                   deviceCount      = 0;
    IMMDeviceEnumerator     *immDevEnum       = nullptr;
    IMMDeviceCollection     *immDevCollection = nullptr;
    IMMDevice               *immDev           = nullptr;
    std::vector<AudioDevice> vAudioDevices;

    HRESULT hResult = CoCreateInstance(
    __uuidof(MMDeviceEnumerator), NULL,
    CLSCTX_ALL, __uuidof(IMMDeviceEnumerator), (void**) &immDevEnum);

    if (FAILED(hResult)) {
        idLib::Warning( "Failed to get audio enumerator" );
        return std::move(vAudioDevices);
    }

    if (defaultDevice != nullptr)
    {
        ZeroMemory(defaultDevice, sizeof(AudioDevice));

        IMMDevice *defaultImmDev = nullptr;

        // @pjb: get the default audio endpoint and make it the first one in the list
        if (SUCCEEDED(immDevEnum->GetDefaultAudioEndpoint(eRender, eConsole, &defaultImmDev)))
        {
            GetAudioDeviceDetails(defaultImmDev, defaultDevice);
            defaultImmDev->Release();
        }
    }

    hResult = immDevEnum->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &immDevCollection);
    if (FAILED(hResult)) {
        idLib::Warning( "Failed to get audio endpoints" );
        return std::move(vAudioDevices);
    }

    hResult = immDevCollection->GetCount(&deviceCount);
    if (FAILED(hResult)) {
        idLib::Warning( "No audio devices found" );
        return std::move(vAudioDevices);
    }

    for (UINT i = 0; i < deviceCount; i++) {
        AudioDevice ad;

        hResult = immDevCollection->Item(i, &immDev);
        if (SUCCEEDED(hResult)) {
            hResult = GetAudioDeviceDetails(immDev, &ad);
        }

        if (SUCCEEDED(hResult)) {
            vAudioDevices.push_back(ad);
        }

        if (immDev != nullptr) {
            immDev->Release();
        }
    }

    immDevCollection->Release();
    immDevEnum->Release();

    return std::move(vAudioDevices);
}
コード例 #17
0
ファイル: wasapiengine.cpp プロジェクト: CUE0/gambatte
long WasapiEngine::doInit(long rate, int const latency) {
	{
		IMMDevice *pDevice = 0;
		IMMDeviceEnumerator *pEnumerator = 0;
		HRESULT hr;

		if (FAILED(CoCreateInstance(CLSID_MMDeviceEnumerator, 0, CLSCTX_ALL,
		                            IID_IMMDeviceEnumerator,
		                            reinterpret_cast<void **>(&pEnumerator)))) {
			std::cerr << "CoCreateInstance failed" << std::endl;
			return -1;
		}

		if (deviceSelector->count() > 1) {
			hr = pEnumerator->GetDevice(
				deviceSelector->itemData(deviceIndex).value<std::wstring>().c_str(),
				&pDevice);
		} else
			hr = pEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &pDevice);

		pEnumerator->Release();
		if (FAILED(hr)) {
			std::cerr << "pEnumerator->GetDefaultAudioEndpoint failed" << std::endl;
			return -1;
		}

		hr = pDevice->Activate(IID_IAudioClient, CLSCTX_ALL, 0,
		                       reinterpret_cast<void **>(&pAudioClient));
		pDevice->Release();
		if (FAILED(hr)) {
			std::cerr << "pDevice->Activate failed" << std::endl;
			return -1;
		}
	}

	{
		static GUID const ksdataformat_subtype_pcm = {
			WAVE_FORMAT_PCM, 0x0000, 0x0010,
			{ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71 }
		};

		WAVEFORMATEXTENSIBLE wfext;
		std::memset(&wfext, 0, sizeof wfext);
		wfext.Format.cbSize = sizeof wfext - sizeof wfext.Format;
		wfext.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
		wfext.Format.nChannels = 2;
		wfext.Format.nSamplesPerSec = rate;
		wfext.Format.wBitsPerSample = 16;
		wfext.Samples.wValidBitsPerSample = 16;
		wfext.SubFormat = ksdataformat_subtype_pcm;
		wfext.dwChannelMask = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT;

		if (!exclusive_.value()) {
			WAVEFORMATEX *mwfe = 0;
			if (SUCCEEDED(pAudioClient->GetMixFormat(&mwfe)) && mwfe) {
				wfext.Format.nChannels = std::max<int>(mwfe->nChannels, 2);
				wfext.Format.nSamplesPerSec = mwfe->nSamplesPerSec;
				if (mwfe->wFormatTag == WAVE_FORMAT_EXTENSIBLE) {
					wfext.dwChannelMask =
						reinterpret_cast<WAVEFORMATEXTENSIBLE *>(mwfe)->dwChannelMask;
				}

				CoTaskMemFree(mwfe);
			} else
				std::cerr << "pAudioClient->GetMixFormat failed\r\n";

			if (!(eventHandle_ = CreateEventA(0, false, false, 0)))
				std::cerr << "CreateEvent failed\r\n";
		}

		wfext.Format.nBlockAlign = wfext.Format.nChannels * wfext.Format.wBitsPerSample >> 3;
		wfext.Format.nAvgBytesPerSec = wfext.Format.nSamplesPerSec * wfext.Format.nBlockAlign;
		nchannels_ = wfext.Format.nChannels;
		rate = wfext.Format.nSamplesPerSec;

		HRESULT hr = pAudioClient->Initialize(
			exclusive_.value() ? AUDCLNT_SHAREMODE_EXCLUSIVE : AUDCLNT_SHAREMODE_SHARED,
			eventHandle_ ? AUDCLNT_STREAMFLAGS_EVENTCALLBACK : 0,
			latency * 10000,
			0,
			&wfext.Format,
			0);
		if (FAILED(hr)) {
			std::cerr << "pAudioClient->Initialize failed: " << hr << "\r\n";
			return -1;
		}
	}

	if (eventHandle_ && FAILED(pAudioClient->SetEventHandle(eventHandle_))) {
		std::cerr << "pAudioClient->SetEventHandle failed\r\n";
		return -1;
	}
	if (FAILED(pAudioClient->GetBufferSize(&bufferFrameCount))) {
		std::cerr << "pAudioClient->GetBufferSize failed" << std::endl;
		return -1;
	}
	if (FAILED(pAudioClient->GetService(IID_IAudioRenderClient,
	                                    reinterpret_cast<void **>(&pRenderClient)))) {
		std::cerr << "pAudioClient->GetService failed" << std::endl;
		return -1;
	}
	if (FAILED(pAudioClient->GetService(IID_IAudioClock,
	                                    reinterpret_cast<void **>(&pAudioClock)))) {
		std::cerr << "pAudioClient->GetService failed" << std::endl;
		return -1;
	}

	{
		UINT64 freq = 0;
		pAudioClock->GetFrequency(&freq);
		posFrames = freq / (rate ? rate : 1);
	}

	pos_ = 0;
	est = RateEst(rate, bufferFrameCount);

	return rate;
}
コード例 #18
0
	static bool UpdateAudioDucking(bool bDuckingOptOutChecked)
	{
		HRESULT hr = S_OK;

		//  Start with the default endpoint.
		IMMDeviceEnumerator* pDeviceEnumerator = NULL;
		hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
			NULL,
			CLSCTX_INPROC_SERVER,
			IID_PPV_ARGS(&pDeviceEnumerator));

		if (SUCCEEDED(hr))
		{
			{
				IMMDevice* pEndpoint = NULL;
				hr = pDeviceEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &pEndpoint);
				if (SUCCEEDED(hr))
				{
					LPWSTR Desc;
					pEndpoint->GetId(&Desc);
					UE_LOG(LogVoiceCapture, Display, TEXT("%s ducking on audio device. Desc: %s"), bDuckingOptOutChecked ? TEXT("Disabling") : TEXT("Enabling"), Desc);
					CoTaskMemFree(Desc);

					FAudioDuckingWindows::EnableDuckingOptOut(pEndpoint, bDuckingOptOutChecked);
					pEndpoint->Release();
					pEndpoint = NULL;
				}
			}

			if (0) // reference for enumerating all endpoints in case its necessary
			{
				IMMDeviceCollection* pDeviceCollection = NULL;
				IMMDevice* pCollEndpoint = NULL;
				hr = pDeviceEnumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &pDeviceCollection);
				if (SUCCEEDED(hr))
				{
					IPropertyStore *pProps = NULL;

					::UINT DeviceCount = 0;
					pDeviceCollection->GetCount(&DeviceCount);
					for (::UINT i = 0; i < DeviceCount; ++i)
					{
						hr = pDeviceCollection->Item(i, &pCollEndpoint);
						if (SUCCEEDED(hr) && pCollEndpoint)
						{
							LPWSTR Desc;
							pCollEndpoint->GetId(&Desc);

							hr = pCollEndpoint->OpenPropertyStore(STGM_READ, &pProps);
							if (SUCCEEDED(hr))
							{
								PROPVARIANT varName;
								// Initialize container for property value.
								PropVariantInit(&varName);

								// Get the endpoint's friendly-name property.
								hr = pProps->GetValue(
									PKEY_Device_FriendlyName, &varName);
								if (SUCCEEDED(hr))
								{
									// Print endpoint friendly name and endpoint ID.
									UE_LOG(LogVoiceCapture, Display, TEXT("%s ducking on audio device [%d]: \"%s\" (%s)"),
										bDuckingOptOutChecked ? TEXT("Disabling") : TEXT("Enabling"),
										i, varName.pwszVal, Desc);

									CoTaskMemFree(Desc);

									Desc = NULL;
									PropVariantClear(&varName);

									pProps->Release();
									pProps = NULL;
								}
							}

							FAudioDuckingWindows::EnableDuckingOptOut(pCollEndpoint, bDuckingOptOutChecked);

							pCollEndpoint->Release();
							pCollEndpoint = NULL;
						}
					}

					pDeviceCollection->Release();
					pDeviceCollection = NULL;
				}
			}

			pDeviceEnumerator->Release();
			pDeviceEnumerator = NULL;
		}

		if (FAILED(hr))
		{
			UE_LOG(LogVoiceCapture, Warning, TEXT("Failed to duck audio endpoint. Error: %0x08x"), hr);
		}

		return SUCCEEDED(hr);
	}
コード例 #19
0
ファイル: main.cpp プロジェクト: korha/SoundCardSwitch
//-------------------------------------------------------------------------------------------------
void FMain()
{
    if (const HWND hWnd = FindWindowW(g_wGuidClass, nullptr))
        PostMessageW(hWnd, WM_CLOSE, 0, 0);

    if (SUCCEEDED(CoInitializeEx(nullptr, COINIT_APARTMENTTHREADED)))
    {
        IMMDeviceEnumerator *immDeviceEnumerator;
        if (CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr, CLSCTX_ALL, __uuidof(IMMDeviceEnumerator), reinterpret_cast<LPVOID*>(&immDeviceEnumerator)) == S_OK)
        {
            IMMDevice *immDeviceDefault;
            if (immDeviceEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &immDeviceDefault) == S_OK)
            {
                wchar_t *wIdDefaultOld;
                HRESULT hr = immDeviceDefault->GetId(&wIdDefaultOld);
                immDeviceDefault->Release();
                if (hr == S_OK)
                {
                    IMMDeviceCollection *immDeviceCollection;
                    hr = immDeviceEnumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &immDeviceCollection);
                    immDeviceEnumerator->Release();
                    if (hr == S_OK)
                    {
                        UINT iCount;
                        if (immDeviceCollection->GetCount(&iCount) == S_OK)
                        {
                            bool bFail = true;
                            for (UINT i = 0; i < iCount; ++i)
                            {
                                IMMDevice *immDevice;
                                if (immDeviceCollection->Item(i, &immDevice) == S_OK)
                                {
                                    wchar_t *wIdEnum;
                                    hr = immDevice->GetId(&wIdEnum);
                                    immDevice->Release();
                                    if (hr == S_OK)
                                    {
                                        if (FCompareMemoryW(wIdDefaultOld, wIdEnum))
                                        {
                                            bFail = false;
                                            if (++i >= iCount)
                                                i = 0;
                                            hr = immDeviceCollection->Item(i, &immDevice);
                                            immDeviceCollection->Release();
                                            if (hr == S_OK)
                                            {
                                                wchar_t *wIdDefaultNew;
                                                if (immDevice->GetId(&wIdDefaultNew) == S_OK)
                                                {
                                                    IPropertyStore *ipStore;
                                                    hr = immDevice->OpenPropertyStore(STGM_READ, &ipStore);
                                                    immDevice->Release();
                                                    if (hr == S_OK)
                                                    {
                                                        PROPVARIANT propFriendlyName;
                                                        PropVariantInitFix(&propFriendlyName);
                                                        PROPERTYKEY propKeyFriendlyName;
                                                        propKeyFriendlyName.fmtid.Data1 = 0xA45C254E;
                                                        propKeyFriendlyName.fmtid.Data2 = 0xDF1C;
                                                        propKeyFriendlyName.fmtid.Data3 = 0x4EFD;
                                                        FCopyMemory(propKeyFriendlyName.fmtid.Data4);
                                                        propKeyFriendlyName.pid = 14;
                                                        hr = ipStore->GetValue(propKeyFriendlyName, &propFriendlyName);
                                                        ipStore->Release();
                                                        if (SUCCEEDED(hr))
                                                        {
                                                            IPolicyConfig *pPolicyConfig;
                                                            if (CoCreateInstance(__uuidof(CPolicyConfigClient), nullptr, CLSCTX_ALL, (GetVersion() & 0xFF) >= 10 ? __uuidof(IPolicyConfigWin10) : __uuidof(IPolicyConfig), reinterpret_cast<LPVOID*>(&pPolicyConfig)) == S_OK)
                                                            {
                                                                hr = pPolicyConfig->SetDefaultEndpoint(wIdDefaultNew, eConsole);
                                                                if (hr == S_OK)
                                                                {
                                                                    pPolicyConfig->SetDefaultEndpoint(wIdDefaultNew, eMultimedia);
                                                                    pPolicyConfig->SetDefaultEndpoint(wIdDefaultNew, eCommunications);
                                                                }
                                                                pPolicyConfig->Release();
                                                                if (hr == S_OK)
                                                                {
                                                                    WNDCLASSEX wndCl;
                                                                    wndCl.cbSize = sizeof(WNDCLASSEX);
                                                                    wndCl.style = 0;
                                                                    wndCl.lpfnWndProc = WindowProc;
                                                                    wndCl.cbClsExtra = 0;
                                                                    wndCl.cbWndExtra = 0;
                                                                    wndCl.hInstance = GetModuleHandleW(nullptr);
                                                                    wndCl.hIcon = nullptr;
                                                                    wndCl.hCursor = nullptr;
                                                                    wndCl.hbrBackground = nullptr;
                                                                    wndCl.lpszMenuName = nullptr;
                                                                    wndCl.lpszClassName = g_wGuidClass;
                                                                    wndCl.hIconSm = nullptr;

                                                                    if (RegisterClassExW(&wndCl))
                                                                    {
                                                                        if (CreateWindowExW(WS_EX_NOACTIVATE | WS_EX_LAYERED | WS_EX_TOOLWINDOW | WS_EX_TRANSPARENT | WS_EX_TOPMOST, g_wGuidClass, nullptr, WS_POPUP | WS_VISIBLE | WS_MAXIMIZE, 0, 0, 0, 0, nullptr, nullptr, wndCl.hInstance, propFriendlyName.pwszVal))
                                                                        {
                                                                            MSG msg;
                                                                            while (GetMessageW(&msg, nullptr, 0, 0) > 0)
                                                                                DispatchMessageW(&msg);
                                                                        }
                                                                        UnregisterClassW(g_wGuidClass, wndCl.hInstance);
                                                                    }
                                                                }
                                                            }
                                                        }
                                                        PropVariantClear(&propFriendlyName);
                                                    }
                                                    CoTaskMemFree(wIdDefaultNew);
                                                }
                                                else
                                                    immDevice->Release();
                                            }
                                            break;
                                        }
                                        CoTaskMemFree(wIdEnum);
                                    }
                                }
                            }
                            if (bFail)
                                immDeviceCollection->Release();
                        }
                        else
                            immDeviceCollection->Release();
                    }
                    CoTaskMemFree(wIdDefaultOld);
                }
                else
                    immDeviceEnumerator->Release();
            }
            else
                immDeviceEnumerator->Release();
        }
        CoUninitialize();
    }
}
コード例 #20
0
bool MMDeviceAudioSource::Initialize(bool bMic, CTSTR lpID)
{
    const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator);
    const IID IID_IMMDeviceEnumerator    = __uuidof(IMMDeviceEnumerator);
    const IID IID_IAudioClient           = __uuidof(IAudioClient);
    const IID IID_IAudioCaptureClient    = __uuidof(IAudioCaptureClient);

    HRESULT err;
    err = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void**)&mmEnumerator);
    if(FAILED(err))
    {
        AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Could not create IMMDeviceEnumerator = %08lX"), (BOOL)bMic, err);
        return false;
    }

    if(bMic)
        err = mmEnumerator->GetDevice(lpID, &mmDevice);
    else
        err = mmEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &mmDevice);

    if(FAILED(err))
    {
        AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Could not create IMMDevice = %08lX"), (BOOL)bMic, err);
        return false;
    }

    err = mmDevice->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&mmClient);
    if(FAILED(err))
    {
        AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Could not create IAudioClient = %08lX"), (BOOL)bMic, err);
        return false;
    }

    WAVEFORMATEX *pwfx;
    err = mmClient->GetMixFormat(&pwfx);
    if(FAILED(err))
    {
        AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Could not get mix format from audio client = %08lX"), (BOOL)bMic, err);
        return false;
    }

    String strName = GetDeviceName();
    if(bMic)
    {
        Log(TEXT("------------------------------------------"));
        Log(TEXT("Using auxilary audio input: %s"), strName.Array());
    }

    //the internal audio engine should always use floats (or so I read), but I suppose just to be safe better check
    if(pwfx->wFormatTag == WAVE_FORMAT_EXTENSIBLE)
    {
        WAVEFORMATEXTENSIBLE *wfext = (WAVEFORMATEXTENSIBLE*)pwfx;
        inputChannelMask = wfext->dwChannelMask;

        if(wfext->SubFormat != KSDATAFORMAT_SUBTYPE_IEEE_FLOAT)
        {
            AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Unsupported wave format"), (BOOL)bMic);
            return false;
        }
    }
    else if(pwfx->wFormatTag != WAVE_FORMAT_IEEE_FLOAT)
    {
        AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Unsupported wave format"), (BOOL)bMic);
        return false;
    }

    inputChannels      = pwfx->nChannels;
    inputBitsPerSample = 32;
    inputBlockSize     = pwfx->nBlockAlign;
    inputSamplesPerSec = pwfx->nSamplesPerSec;

    DWORD flags = bMic ? 0 : AUDCLNT_STREAMFLAGS_LOOPBACK;
    err = mmClient->Initialize(AUDCLNT_SHAREMODE_SHARED, flags, ConvertMSTo100NanoSec(5000), 0, pwfx, NULL);
    if(FAILED(err))
    {
        AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Could not initialize audio client, result = %08lX"), (BOOL)bMic, err);
        return false;
    }

    err = mmClient->GetService(IID_IAudioCaptureClient, (void**)&mmCapture);
    if(FAILED(err))
    {
        AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Could not get audio capture client, result = %08lX"), (BOOL)bMic, err);
        return false;
    }

    err = mmClient->GetService(__uuidof(IAudioClock), (void**)&mmClock);
    if(FAILED(err))
    {
        AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Could not get audio capture clock, result = %08lX"), (BOOL)bMic, err);
        return false;
    }

    CoTaskMemFree(pwfx);

    //-------------------------------------------------------------------------

    if(inputSamplesPerSec != 44100)
    {
        int errVal;

        int converterType = AppConfig->GetInt(TEXT("Audio"), TEXT("UseHighQualityResampling"), FALSE) ? SRC_SINC_FASTEST : SRC_LINEAR;
        resampler = src_new(converterType, 2, &errVal);//SRC_SINC_FASTEST//SRC_ZERO_ORDER_HOLD
        if(!resampler)
        {
            CrashError(TEXT("MMDeviceAudioSource::Initialize(%d): Could not initiate resampler"), (BOOL)bMic);
            return false;
        }

        resampleRatio = 44100.0 / double(inputSamplesPerSec);
        bResample = true;

        //----------------------------------------------------
        // hack to get rid of that weird first quirky resampled packet size
        // (always returns a non-441 sized packet on the first resample)

        SRC_DATA data;
        data.src_ratio = resampleRatio;

        List<float> blankBuffer;
        blankBuffer.SetSize(inputSamplesPerSec/100*2);

        data.data_in = blankBuffer.Array();
        data.input_frames = inputSamplesPerSec/100;

        UINT frameAdjust = UINT((double(data.input_frames) * resampleRatio) + 1.0);
        UINT newFrameSize = frameAdjust*2;

        tempResampleBuffer.SetSize(newFrameSize);

        data.data_out = tempResampleBuffer.Array();
        data.output_frames = frameAdjust;

        data.end_of_input = 0;

        int err = src_process(resampler, &data);

        nop();
    }

    //-------------------------------------------------------------------------

    if(inputChannels > 2)
    {
        if(inputChannelMask == 0)
        {
            switch(inputChannels)
            {
                case 3: inputChannelMask = KSAUDIO_SPEAKER_2POINT1; break;
                case 4: inputChannelMask = KSAUDIO_SPEAKER_QUAD;    break;
                case 5: inputChannelMask = KSAUDIO_SPEAKER_4POINT1; break;
                case 6: inputChannelMask = KSAUDIO_SPEAKER_5POINT1; break;
                case 8: inputChannelMask = KSAUDIO_SPEAKER_7POINT1; break;
            }
        }

        switch(inputChannelMask)
        {
            case KSAUDIO_SPEAKER_QUAD:              Log(TEXT("Using quad speaker setup"));                          break; //ocd anyone?
            case KSAUDIO_SPEAKER_2POINT1:           Log(TEXT("Using 2.1 speaker setup"));                           break;
            case KSAUDIO_SPEAKER_4POINT1:           Log(TEXT("Using 4.1 speaker setup"));                           break;
            case KSAUDIO_SPEAKER_SURROUND:          Log(TEXT("Using basic surround speaker setup"));                break;
            case KSAUDIO_SPEAKER_5POINT1:           Log(TEXT("Using 5.1 speaker setup"));                           break;
            case KSAUDIO_SPEAKER_5POINT1_SURROUND:  Log(TEXT("Using 5.1 surround speaker setup"));                  break;
            case KSAUDIO_SPEAKER_7POINT1:           Log(TEXT("Using 7.1 speaker setup (experimental)"));            break;
            case KSAUDIO_SPEAKER_7POINT1_SURROUND:  Log(TEXT("Using 7.1 surround speaker setup (experimental)"));   break;

            default:
                Log(TEXT("Using unknown speaker setup: 0x%lX"), inputChannelMask);
                CrashError(TEXT("Speaker setup not yet implemented -- dear god of all the audio APIs, the one I -have- to use doesn't support resampling or downmixing.  fabulous."));
                break;
        }
    }

    return true;
}
コード例 #21
0
bool CAESinkWASAPI::Initialize(AEAudioFormat &format, std::string &device)
{
  if (m_initialized)
    return false;

  CLog::Log(LOGDEBUG, __FUNCTION__": Initializing WASAPI Sink Rev. 1.0.5");

  m_device = device;

  /* Save requested format */
  /* Clear returned format */
  sinkReqFormat = format.m_dataFormat;
  sinkRetFormat = AE_FMT_INVALID;

  IMMDeviceEnumerator* pEnumerator = NULL;
  IMMDeviceCollection* pEnumDevices = NULL;

  HRESULT hr = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void**)&pEnumerator);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Could not allocate WASAPI device enumerator. CoCreateInstance error code: %li", hr)

  //Get our device.
  //First try to find the named device.
  UINT uiCount = 0;

  hr = pEnumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &pEnumDevices);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Retrieval of audio endpoint enumeration failed.")

  hr = pEnumDevices->GetCount(&uiCount);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Retrieval of audio endpoint count failed.")

  for (UINT i = 0; i < uiCount; i++)
  {
    IPropertyStore *pProperty = NULL;
    PROPVARIANT varName;

    hr = pEnumDevices->Item(i, &m_pDevice);
    EXIT_ON_FAILURE(hr, __FUNCTION__": Retrieval of WASAPI endpoint failed.")

    hr = m_pDevice->OpenPropertyStore(STGM_READ, &pProperty);
    EXIT_ON_FAILURE(hr, __FUNCTION__": Retrieval of WASAPI endpoint properties failed.")

    hr = pProperty->GetValue(PKEY_AudioEndpoint_GUID, &varName);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of WASAPI endpoint GUID failed.");
      SAFE_RELEASE(pProperty);
      goto failed;
    }

    std::wstring strRawDevName(varName.pwszVal);
    std::string strDevName = std::string(strRawDevName.begin(), strRawDevName.end());
    //g_charsetConverter.ucs2CharsetToStringCharset(strRawDevName, strDevName.c_str());

    if (device == strDevName)
      i = uiCount;
    else
      SAFE_RELEASE(m_pDevice);

    PropVariantClear(&varName);
    SAFE_RELEASE(pProperty);
  }

  SAFE_RELEASE(pEnumDevices);

  if (!m_pDevice)
  {
    CLog::Log(LOGINFO, __FUNCTION__": Could not locate the device named \"%s\" in the list of WASAPI endpoint devices.  Trying the default device...", device.c_str());
    hr = pEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &m_pDevice);
    EXIT_ON_FAILURE(hr, __FUNCTION__": Could not retrieve the default WASAPI audio endpoint.")

    IPropertyStore *pProperty = NULL;
    PROPVARIANT varName;

    hr = m_pDevice->OpenPropertyStore(STGM_READ, &pProperty);
    EXIT_ON_FAILURE(hr, __FUNCTION__": Retrieval of WASAPI endpoint properties failed.")

    hr = pProperty->GetValue(PKEY_AudioEndpoint_GUID, &varName);

    std::wstring strRawDevName(varName.pwszVal);
    std::string strDevName = std::string(strRawDevName.begin(), strRawDevName.end());

    PropVariantClear(&varName);
    SAFE_RELEASE(pProperty);
  }

  //We are done with the enumerator.
  SAFE_RELEASE(pEnumerator);

  hr = m_pDevice->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&m_pAudioClient);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Activating the WASAPI endpoint device failed.")

  if (!InitializeExclusive(format))
  {
    CLog::Log(LOGINFO, __FUNCTION__": Could not Initialize Exclusive with that format");
    goto failed;
  }

  /* get the buffer size and calculate the frames for AE */
  m_pAudioClient->GetBufferSize(&m_uiBufferLen);

  format.m_frames       = m_uiBufferLen;
  format.m_frameSamples = format.m_frames * format.m_channelLayout.Count();
  m_format              = format;
  sinkRetFormat         = format.m_dataFormat;

  CLog::Log(LOGDEBUG, __FUNCTION__": Buffer Size     = %d Bytes", m_uiBufferLen * format.m_frameSize);

  hr = m_pAudioClient->GetService(IID_IAudioRenderClient, (void**)&m_pRenderClient);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Could not initialize the WASAPI render client interface.")

  m_needDataEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
  hr = m_pAudioClient->SetEventHandle(m_needDataEvent);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Could not set the WASAPI event handler.");

  m_initialized = true;
  m_isDirty     = false;

  return true;

failed:
  CLog::Log(LOGERROR, __FUNCTION__": WASAPI initialization failed.");
  SAFE_RELEASE(pEnumDevices);
  SAFE_RELEASE(pEnumerator);
  SAFE_RELEASE(m_pRenderClient);
  SAFE_RELEASE(m_pAudioClient);
  SAFE_RELEASE(m_pDevice);
  CloseHandle(m_needDataEvent);

  return false;
}
コード例 #22
0
ファイル: MMDeviceAudioSource.cpp プロジェクト: bradparks/OBS
bool MMDeviceAudioSource::Initialize(bool bMic, CTSTR lpID)
{
    const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator);
    const IID IID_IMMDeviceEnumerator    = __uuidof(IMMDeviceEnumerator);
    const IID IID_IAudioClient           = __uuidof(IAudioClient);
    const IID IID_IAudioCaptureClient    = __uuidof(IAudioCaptureClient);

    HRESULT err;
    err = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void**)&mmEnumerator);
    if(FAILED(err))
    {
        AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Could not create IMMDeviceEnumerator = %08lX"), (BOOL)bMic, err);
        return false;
    }

    bIsMic = bMic;

    if (bIsMic) {
        BOOL bMicSyncFixHack = GlobalConfig->GetInt(TEXT("Audio"), TEXT("UseMicSyncFixHack"));
        angerThreshold = bMicSyncFixHack ? 40 : 1000;
    }

    if (scmpi(lpID, TEXT("Default")) == 0)
        err = mmEnumerator->GetDefaultAudioEndpoint(bMic ? eCapture : eRender, bMic ? eCommunications : eConsole, &mmDevice);
    else
        err = mmEnumerator->GetDevice(lpID, &mmDevice);

    if(FAILED(err))
    {
        AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Could not create IMMDevice = %08lX"), (BOOL)bMic, err);
        return false;
    }

    err = mmDevice->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&mmClient);
    if(FAILED(err))
    {
        AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Could not create IAudioClient = %08lX"), (BOOL)bMic, err);
        return false;
    }

    //-----------------------------------------------------------------
    // get name

    IPropertyStore *store;
    if(SUCCEEDED(mmDevice->OpenPropertyStore(STGM_READ, &store)))
    {
        PROPVARIANT varName;

        PropVariantInit(&varName);
        if(SUCCEEDED(store->GetValue(PKEY_Device_FriendlyName, &varName)))
        {
            CWSTR wstrName = varName.pwszVal;
            strDeviceName = wstrName;
        }

        store->Release();
    }

    if(bMic)
    {
        Log(TEXT("------------------------------------------"));
        Log(TEXT("Using auxilary audio input: %s"), GetDeviceName());

        bUseQPC = GlobalConfig->GetInt(TEXT("Audio"), TEXT("UseMicQPC")) != 0;
        if (bUseQPC)
            Log(TEXT("Using Mic QPC timestamps"));
    }
    else
    {
        Log(TEXT("------------------------------------------"));
        Log(TEXT("Using desktop audio input: %s"), GetDeviceName());

        bUseVideoTime = AppConfig->GetInt(TEXT("Audio"), TEXT("SyncToVideoTime")) != 0;
        SetTimeOffset(GlobalConfig->GetInt(TEXT("Audio"), TEXT("GlobalAudioTimeAdjust")));
    }

    //-----------------------------------------------------------------
    // get format

    WAVEFORMATEX *pwfx;
    err = mmClient->GetMixFormat(&pwfx);
    if(FAILED(err))
    {
        AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Could not get mix format from audio client = %08lX"), (BOOL)bMic, err);
        return false;
    }

    bool  bFloat;
    UINT  inputChannels;
    UINT  inputSamplesPerSec;
    UINT  inputBitsPerSample;
    UINT  inputBlockSize;
    DWORD inputChannelMask = 0;
    WAVEFORMATEXTENSIBLE *wfext = NULL;

    //the internal audio engine should always use floats (or so I read), but I suppose just to be safe better check
    if(pwfx->wFormatTag == WAVE_FORMAT_EXTENSIBLE)
    {
        wfext = (WAVEFORMATEXTENSIBLE*)pwfx;
        inputChannelMask = wfext->dwChannelMask;

        if(wfext->SubFormat != KSDATAFORMAT_SUBTYPE_IEEE_FLOAT)
        {
            AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Unsupported wave format"), (BOOL)bMic);
            return false;
        }
    }
    else if(pwfx->wFormatTag != WAVE_FORMAT_IEEE_FLOAT)
    {
        AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Unsupported wave format"), (BOOL)bMic);
        return false;
    }

    bFloat                = true;
    inputChannels         = pwfx->nChannels;
    inputBitsPerSample    = 32;
    inputBlockSize        = pwfx->nBlockAlign;
    inputSamplesPerSec    = pwfx->nSamplesPerSec;
    sampleWindowSize      = (inputSamplesPerSec/100);

    DWORD flags = bMic ? 0 : AUDCLNT_STREAMFLAGS_LOOPBACK;

    err = mmClient->Initialize(AUDCLNT_SHAREMODE_SHARED, flags, ConvertMSTo100NanoSec(5000), 0, pwfx, NULL);
    //err = AUDCLNT_E_UNSUPPORTED_FORMAT;

    if (err == AUDCLNT_E_UNSUPPORTED_FORMAT) { //workaround for razer kraken headset (bad drivers)
        pwfx->nBlockAlign     = 2*pwfx->nChannels;
        pwfx->nAvgBytesPerSec = inputSamplesPerSec*pwfx->nBlockAlign;
        pwfx->wBitsPerSample  = 16;

        wfext->SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
        wfext->Samples.wValidBitsPerSample = 16;

        bConvert = true;

        err = mmClient->Initialize(AUDCLNT_SHAREMODE_SHARED, flags, ConvertMSTo100NanoSec(5000), 0, pwfx, NULL);
    }

    if(FAILED(err))
    {
        AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Could not initialize audio client, result = %08lX"), (BOOL)bMic, err);
        return false;
    }

    //-----------------------------------------------------------------
    // acquire services

    err = mmClient->GetService(IID_IAudioCaptureClient, (void**)&mmCapture);
    if(FAILED(err))
    {
        AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Could not get audio capture client, result = %08lX"), (BOOL)bMic, err);
        return false;
    }

    err = mmClient->GetService(__uuidof(IAudioClock), (void**)&mmClock);
    if(FAILED(err))
    {
        AppWarning(TEXT("MMDeviceAudioSource::Initialize(%d): Could not get audio capture clock, result = %08lX"), (BOOL)bMic, err);
        return false;
    }

    CoTaskMemFree(pwfx);

    //-----------------------------------------------------------------

    InitAudioData(bFloat, inputChannels, inputSamplesPerSec, inputBitsPerSample, inputBlockSize, inputChannelMask);

    return true;
}
コード例 #23
0
ファイル: Win32WASAPI.cpp プロジェクト: ndbroadbent/xbmc
bool CWin32WASAPI::Initialize(IAudioCallback* pCallback, const CStdString& device, int iChannels, enum PCMChannels *channelMap, unsigned int uiSamplesPerSec, unsigned int uiBitsPerSample, bool bResample, bool bIsMusic, bool bAudioPassthrough)
{
    CLog::Log(LOGDEBUG, __FUNCTION__": endpoint device %s", device.c_str());

    //First check if the version of Windows we are running on even supports WASAPI.
    if (!g_sysinfo.IsVistaOrHigher())
    {
        CLog::Log(LOGERROR, __FUNCTION__": WASAPI output requires Vista or higher.");
        return false;
    }

    //Only one exclusive stream may be initialized at one time.
    if(m_bIsAllocated)
    {
        CLog::Log(LOGERROR, __FUNCTION__": Cannot create more then one WASAPI stream at one time.");
        return false;
    }

    int layoutChannels = 0;

    if(!bAudioPassthrough)
    {
        //If no channel map is specified, use the default.
        if(!channelMap)
            channelMap = (PCMChannels *)wasapi_default_channel_layout[iChannels - 1];

        PCMChannels *outLayout = m_remap.SetInputFormat(iChannels, channelMap, uiBitsPerSample / 8);

        for(PCMChannels *channel = outLayout; *channel != PCM_INVALID; channel++)
            ++layoutChannels;

        //Expand monural to stereo as most devices don't seem to like 1 channel PCM streams.
        //Stereo sources should be sent explicitly as two channels so that the external hardware
        //can apply ProLogic/5CH Stereo/etc processing on it.
        if(iChannels <= 2)
        {
            BuildChannelMapping(2, (PCMChannels *)wasapi_default_channel_layout[1]);

            layoutChannels = 2;
            m_remap.SetOutputFormat(2, m_SpeakerOrder, false);
        }
        else //Do the standard remapping.
        {
            BuildChannelMapping(layoutChannels, outLayout);
            m_remap.SetOutputFormat(layoutChannels, m_SpeakerOrder, false);
        }
    }

    m_bPlaying = false;
    m_bPause = false;
    m_bMuting = false;
    m_uiChannels = iChannels;
    m_uiBitsPerSample = uiBitsPerSample;
    m_bPassthrough = bAudioPassthrough;

    m_nCurrentVolume = g_settings.m_nVolumeLevel;
    m_pcmAmplifier.SetVolume(m_nCurrentVolume);

    WAVEFORMATEXTENSIBLE wfxex = {0};

    //fill waveformatex
    ZeroMemory(&wfxex, sizeof(WAVEFORMATEXTENSIBLE));
    wfxex.Format.cbSize          =  sizeof(WAVEFORMATEXTENSIBLE)-sizeof(WAVEFORMATEX);
    wfxex.Format.nChannels       = layoutChannels;
    wfxex.Format.nSamplesPerSec  = uiSamplesPerSec;
    if (bAudioPassthrough == true)
    {
        wfxex.dwChannelMask          = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT;
        wfxex.Format.wFormatTag      = WAVE_FORMAT_DOLBY_AC3_SPDIF;
        wfxex.SubFormat              = _KSDATAFORMAT_SUBTYPE_DOLBY_AC3_SPDIF;
        wfxex.Format.wBitsPerSample  = 16;
        wfxex.Format.nChannels       = 2;
    }
    else
    {
        wfxex.dwChannelMask          = m_uiSpeakerMask;
        wfxex.Format.wFormatTag      = WAVE_FORMAT_EXTENSIBLE;
        wfxex.SubFormat              = KSDATAFORMAT_SUBTYPE_PCM;
        wfxex.Format.wBitsPerSample  = uiBitsPerSample;
    }

    wfxex.Samples.wValidBitsPerSample = uiBitsPerSample == 32 ? 24 : uiBitsPerSample;
    wfxex.Format.nBlockAlign       = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
    wfxex.Format.nAvgBytesPerSec   = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;

    m_uiAvgBytesPerSec = wfxex.Format.nAvgBytesPerSec;

    m_uiBytesPerFrame = wfxex.Format.nBlockAlign;
    m_uiBytesPerSrcFrame = bAudioPassthrough ? m_uiBytesPerFrame : iChannels * wfxex.Format.wBitsPerSample >> 3;

    IMMDeviceEnumerator* pEnumerator = NULL;
    IMMDeviceCollection* pEnumDevices = NULL;

    //Shut down Directsound.
    g_audioContext.SetActiveDevice(CAudioContext::NONE);

    HRESULT hr = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void**)&pEnumerator);
    EXIT_ON_FAILURE(hr, __FUNCTION__": Could not allocate WASAPI device enumerator. CoCreateInstance error code: %i", hr)

    //Get our device.
    //First try to find the named device.
    UINT uiCount = 0;

    hr = pEnumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &pEnumDevices);
    EXIT_ON_FAILURE(hr, __FUNCTION__": Retrieval of audio endpoint enumeration failed.")

    hr = pEnumDevices->GetCount(&uiCount);
    EXIT_ON_FAILURE(hr, __FUNCTION__": Retrieval of audio endpoint count failed.")

    for(UINT i = 0; i < uiCount; i++)
    {
        IPropertyStore *pProperty = NULL;
        PROPVARIANT varName;

        hr = pEnumDevices->Item(i, &m_pDevice);
        EXIT_ON_FAILURE(hr, __FUNCTION__": Retrieval of WASAPI endpoint failed.")

        hr = m_pDevice->OpenPropertyStore(STGM_READ, &pProperty);
        EXIT_ON_FAILURE(hr, __FUNCTION__": Retrieval of WASAPI endpoint properties failed.")

        hr = pProperty->GetValue(PKEY_Device_FriendlyName, &varName);
        if(FAILED(hr))
        {
            CLog::Log(LOGERROR, __FUNCTION__": Retrieval of WASAPI endpoint device name failed.");
            SAFE_RELEASE(pProperty);
            goto failed;
        }

        CStdStringW strRawDevName(varName.pwszVal);
        CStdString strDevName;
        g_charsetConverter.wToUTF8(strRawDevName, strDevName);

        if(device == strDevName)
            i = uiCount;
        else
            SAFE_RELEASE(m_pDevice);

        PropVariantClear(&varName);
        SAFE_RELEASE(pProperty);
    }

    SAFE_RELEASE(pEnumDevices);

    if(!m_pDevice)
    {
        CLog::Log(LOGDEBUG, __FUNCTION__": Could not locate the device named \"%s\" in the list of WASAPI endpoint devices.  Trying the default device...", device.c_str());
        hr = pEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &m_pDevice);
        EXIT_ON_FAILURE(hr, __FUNCTION__": Could not retrieve the default WASAPI audio endpoint.")
    }
コード例 #24
0
RVolumeControl::RVolumeControl(QWidget *parent)
	: QWidget(parent), _value(0), _min(0), _max(100), _isEnableSound(true), _isOn(true), _isDark(true), _isWinXP(false)
{
	endpointVolume = NULL;
	volumeNotification = NULL;

	// ХАК ОТ ВАЛЕНТИНА
	setProperty("ignoreFilter", true);
	setToolTip(tr("Volume control"));

	BOOL mute = false;
	float currVolume = 0.;

	// Определяем версию ОС (xp или старше?)
	OSVERSIONINFO m_osinfo;
	ZeroMemory(&m_osinfo, sizeof(m_osinfo));
	m_osinfo.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);

	if (GetVersionEx((LPOSVERSIONINFO) &m_osinfo))
	{
		if(m_osinfo.dwMajorVersion < 6)
		{
			_isWinXP = true;
		}
	}

	if(!_isWinXP)
	{
		CoInitialize(NULL);

		HRESULT hr;

		IMMDeviceEnumerator *deviceEnumerator = NULL;
		hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_INPROC_SERVER, __uuidof(IMMDeviceEnumerator), (LPVOID *)&deviceEnumerator);

		if(hr == S_OK)
		{
			IMMDevice *defaultDevice = NULL;

			hr = deviceEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &defaultDevice);
			if(hr == S_OK)
			{
				deviceEnumerator->Release();
				deviceEnumerator = NULL;

				//endpointVolume = NULL;
				hr = defaultDevice->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_INPROC_SERVER, NULL, (LPVOID *)&endpointVolume);
				if(hr == S_OK)
				{
					defaultDevice->Release();
					defaultDevice = NULL;

					volumeNotification = new CVolumeNotification();
					//connect(volumeNotification, SIGNAL(volumeChanged(double)), this, SLOT(onVolumeChange(double)));
					connect(volumeNotification, SIGNAL(volumeChanged(int)), this, SLOT(onVolumeChange(int)));
					connect(volumeNotification, SIGNAL(volumeMuted(bool)), this, SLOT(onVolumeMuted(bool)));
					hr = endpointVolume->RegisterControlChangeNotify(volumeNotification);

					endpointVolume->GetMute(&mute);
					endpointVolume->GetMasterVolumeLevelScalar(&currVolume);
				}
コード例 #25
0
ファイル: AESinkWASAPI.cpp プロジェクト: CaptainRewind/xbmc
void CAESinkWASAPI::EnumerateDevicesEx(AEDeviceInfoList &deviceInfoList, bool force)
{
  IMMDeviceEnumerator* pEnumerator = NULL;
  IMMDeviceCollection* pEnumDevices = NULL;
  IMMDevice*           pDefaultDevice = NULL;
  CAEDeviceInfo        deviceInfo;
  CAEChannelInfo       deviceChannels;
  LPWSTR               pwszID = NULL;
  std::wstring         wstrDDID;

  WAVEFORMATEXTENSIBLE wfxex = {0};
  HRESULT              hr;

  hr = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void**)&pEnumerator);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Could not allocate WASAPI device enumerator. CoCreateInstance error code: %li", hr)

  UINT uiCount = 0;

  // get the default audio endpoint
  if(pEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &pDefaultDevice) == S_OK)
  {
    if(pDefaultDevice->GetId(&pwszID) == S_OK)
    {
      wstrDDID = pwszID;
      CoTaskMemFree(pwszID);
    }
    SAFE_RELEASE(pDefaultDevice);
  }

  // enumerate over all audio endpoints
  hr = pEnumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &pEnumDevices);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Retrieval of audio endpoint enumeration failed.")

  hr = pEnumDevices->GetCount(&uiCount);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Retrieval of audio endpoint count failed.")

  for (UINT i = 0; i < uiCount; i++)
  {
    IMMDevice *pDevice = NULL;
    IPropertyStore *pProperty = NULL;
    PROPVARIANT varName;
    PropVariantInit(&varName);

    deviceInfo.m_channels.Reset();
    deviceInfo.m_dataFormats.clear();
    deviceInfo.m_sampleRates.clear();

    hr = pEnumDevices->Item(i, &pDevice);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of WASAPI endpoint failed.");
      goto failed;
    }

    hr = pDevice->OpenPropertyStore(STGM_READ, &pProperty);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of WASAPI endpoint properties failed.");
      SAFE_RELEASE(pDevice);
      goto failed;
    }

    hr = pProperty->GetValue(PKEY_Device_FriendlyName, &varName);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of WASAPI endpoint device name failed.");
      SAFE_RELEASE(pDevice);
      SAFE_RELEASE(pProperty);
      goto failed;
    }

    std::string strFriendlyName = localWideToUtf(varName.pwszVal);
    PropVariantClear(&varName);

    hr = pProperty->GetValue(PKEY_AudioEndpoint_GUID, &varName);
    if(FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of WASAPI endpoint GUID failed.");
      SAFE_RELEASE(pDevice);
      SAFE_RELEASE(pProperty);
      goto failed;
    }

    std::string strDevName = localWideToUtf(varName.pwszVal);
    PropVariantClear(&varName);

    hr = pProperty->GetValue(PKEY_AudioEndpoint_FormFactor, &varName);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of WASAPI endpoint form factor failed.");
      SAFE_RELEASE(pDevice);
      SAFE_RELEASE(pProperty);
      goto failed;
    }
    std::string strWinDevType = winEndpoints[(EndpointFormFactor)varName.uiVal].winEndpointType;
    AEDeviceType aeDeviceType = winEndpoints[(EndpointFormFactor)varName.uiVal].aeDeviceType;

    PropVariantClear(&varName);

    hr = pProperty->GetValue(PKEY_AudioEndpoint_PhysicalSpeakers, &varName);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of WASAPI endpoint speaker layout failed.");
      SAFE_RELEASE(pDevice);
      SAFE_RELEASE(pProperty);
      goto failed;
    }
    unsigned int uiChannelMask = std::max(varName.uintVal, (unsigned int) KSAUDIO_SPEAKER_STEREO);

    deviceChannels.Reset();

    for (unsigned int c = 0; c < WASAPI_SPEAKER_COUNT; c++)
    {
      if (uiChannelMask & WASAPIChannelOrder[c])
        deviceChannels += AEChannelNames[c];
    }

    PropVariantClear(&varName);

    IAudioClient *pClient;
    hr = pDevice->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&pClient);
    if (SUCCEEDED(hr))
    {
      /* Test format DTS-HD */
      wfxex.Format.cbSize               = sizeof(WAVEFORMATEXTENSIBLE)-sizeof(WAVEFORMATEX);
      wfxex.Format.nSamplesPerSec       = 192000;
      wfxex.dwChannelMask               = KSAUDIO_SPEAKER_7POINT1_SURROUND;
      wfxex.Format.wFormatTag           = WAVE_FORMAT_EXTENSIBLE;
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_IEC61937_DTS_HD;
      wfxex.Format.wBitsPerSample       = 16;
      wfxex.Samples.wValidBitsPerSample = 16;
      wfxex.Format.nChannels            = 8;
      wfxex.Format.nBlockAlign          = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
      wfxex.Format.nAvgBytesPerSec      = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;
      hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
      if (SUCCEEDED(hr))
        deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_DTSHD));

      /* Test format Dolby TrueHD */
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_MLP;
      hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
      if (SUCCEEDED(hr))
        deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_TRUEHD));

      /* Test format Dolby EAC3 */
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_DIGITAL_PLUS;
      wfxex.Format.nChannels            = 2;
      wfxex.Format.nBlockAlign          = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
      wfxex.Format.nAvgBytesPerSec      = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;
      hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
      if (SUCCEEDED(hr))
        deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_EAC3));

      /* Test format DTS */
      wfxex.Format.nSamplesPerSec       = 48000;
      wfxex.dwChannelMask               = KSAUDIO_SPEAKER_5POINT1;
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_IEC61937_DTS;
      wfxex.Format.nBlockAlign          = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
      wfxex.Format.nAvgBytesPerSec      = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;
      hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
      if (SUCCEEDED(hr))
        deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_DTS));

      /* Test format Dolby AC3 */
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_DIGITAL;
      hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
      if (SUCCEEDED(hr))
        deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_AC3));

      /* Test format AAC */
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_IEC61937_AAC;
      hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
      if (SUCCEEDED(hr))
        deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_AAC));

      /* Test format for PCM format iteration */
      wfxex.Format.cbSize               = sizeof(WAVEFORMATEXTENSIBLE)-sizeof(WAVEFORMATEX);
      wfxex.dwChannelMask               = KSAUDIO_SPEAKER_STEREO;
      wfxex.Format.wFormatTag           = WAVE_FORMAT_EXTENSIBLE;
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;

      for (int p = AE_FMT_FLOAT; p > AE_FMT_INVALID; p--)
      {
        if (p < AE_FMT_FLOAT)
          wfxex.SubFormat               = KSDATAFORMAT_SUBTYPE_PCM;
        wfxex.Format.wBitsPerSample     = CAEUtil::DataFormatToBits((AEDataFormat) p);
        wfxex.Format.nBlockAlign        = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
        wfxex.Format.nAvgBytesPerSec    = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;
        if (p <= AE_FMT_S24NE4 && p >= AE_FMT_S24BE4)
        {
          wfxex.Samples.wValidBitsPerSample = 24;
        }
        else
        {
          wfxex.Samples.wValidBitsPerSample = wfxex.Format.wBitsPerSample;
        }

        hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
        if (SUCCEEDED(hr))
          deviceInfo.m_dataFormats.push_back((AEDataFormat) p);
      }

      /* Test format for sample rate iteration */
      wfxex.Format.cbSize               = sizeof(WAVEFORMATEXTENSIBLE)-sizeof(WAVEFORMATEX);
      wfxex.dwChannelMask               = KSAUDIO_SPEAKER_STEREO;
      wfxex.Format.wFormatTag           = WAVE_FORMAT_EXTENSIBLE;
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_PCM;
      wfxex.Format.wBitsPerSample       = 16;
      wfxex.Samples.wValidBitsPerSample = 16;
      wfxex.Format.nChannels            = 2;
      wfxex.Format.nBlockAlign          = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
      wfxex.Format.nAvgBytesPerSec      = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;

      for (int j = 0; j < WASAPISampleRateCount; j++)
      {
        wfxex.Format.nSamplesPerSec     = WASAPISampleRates[j];
        wfxex.Format.nAvgBytesPerSec    = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;
        hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
        if (SUCCEEDED(hr))
          deviceInfo.m_sampleRates.push_back(WASAPISampleRates[j]);
      }

      /* Test format for channels iteration */
      wfxex.Format.cbSize               = sizeof(WAVEFORMATEXTENSIBLE)-sizeof(WAVEFORMATEX);
      wfxex.dwChannelMask               = KSAUDIO_SPEAKER_STEREO;
      wfxex.Format.wFormatTag           = WAVE_FORMAT_EXTENSIBLE;
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_PCM;
      wfxex.Format.nSamplesPerSec       = 48000;
      wfxex.Format.wBitsPerSample       = 16;
      wfxex.Samples.wValidBitsPerSample = 16;
      wfxex.Format.nChannels            = 2;
      wfxex.Format.nBlockAlign          = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
      wfxex.Format.nAvgBytesPerSec      = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;

      bool hasLpcm = false;

      // Try with KSAUDIO_SPEAKER_DIRECTOUT
      for (unsigned int k = WASAPI_SPEAKER_COUNT; k > 0; k--)
      {
        wfxex.dwChannelMask             = KSAUDIO_SPEAKER_DIRECTOUT;
        wfxex.Format.nChannels          = k;
        wfxex.Format.nBlockAlign        = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
        wfxex.Format.nAvgBytesPerSec    = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;
        hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
        if (SUCCEEDED(hr))
        {
          if (k > 3) // Add only multichannel LPCM
          {
            deviceInfo.m_dataFormats.push_back(AE_FMT_LPCM);
            hasLpcm = true;
          }
          break;
        }
      }

      /* Try with reported channel mask */
      for (unsigned int k = WASAPI_SPEAKER_COUNT; k > 0; k--)
      {
        wfxex.dwChannelMask             = uiChannelMask;
        wfxex.Format.nChannels          = k;
        wfxex.Format.nBlockAlign        = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
        wfxex.Format.nAvgBytesPerSec    = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;
        hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
        if (SUCCEEDED(hr))
        {
          if ( !hasLpcm && k > 3) // Add only multichannel LPCM
          {
            deviceInfo.m_dataFormats.push_back(AE_FMT_LPCM);
            hasLpcm = true;
          }
          break;
        }
      }

      /* Try with specific speakers configurations */
      for (unsigned int i = 0; i < ARRAYSIZE(layoutsList); i++)
      {
        unsigned int nmbOfCh;
        wfxex.dwChannelMask             = ChLayoutToChMask(layoutsList[i], &nmbOfCh);
        wfxex.Format.nChannels          = nmbOfCh;
        wfxex.Format.nBlockAlign        = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
        wfxex.Format.nAvgBytesPerSec    = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;
        hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
        if (SUCCEEDED(hr))
        {
          if ( deviceChannels.Count() < nmbOfCh)
            deviceChannels = layoutsList[i];
          if ( !hasLpcm && nmbOfCh > 3) // Add only multichannel LPCM
          {
            deviceInfo.m_dataFormats.push_back(AE_FMT_LPCM);
            hasLpcm = true;
          }
        }
      }
      pClient->Release();
    }
    else
    {
      CLog::Log(LOGDEBUG, __FUNCTION__": Failed to activate device for passthrough capability testing.");
    }

    deviceInfo.m_deviceName       = strDevName;
    deviceInfo.m_displayName      = strWinDevType.append(strFriendlyName);
    deviceInfo.m_displayNameExtra = std::string("WASAPI: ").append(strFriendlyName);
    deviceInfo.m_deviceType       = aeDeviceType;
    deviceInfo.m_channels         = deviceChannels;

    /* Store the device info */
    deviceInfoList.push_back(deviceInfo);

    if(pDevice->GetId(&pwszID) == S_OK)
    {
      if(wstrDDID.compare(pwszID) == 0)
      {
        deviceInfo.m_deviceName = std::string("default");
        deviceInfo.m_displayName = std::string("default");
        deviceInfo.m_displayNameExtra = std::string("");
        deviceInfoList.push_back(deviceInfo);
      }
      CoTaskMemFree(pwszID);
    }

    SAFE_RELEASE(pDevice);
    SAFE_RELEASE(pProperty);
  }
コード例 #26
0
ファイル: audio_driver_wasapi.cpp プロジェクト: 93i/godot
Error AudioDriverWASAPI::audio_device_init(AudioDeviceWASAPI *p_device, bool p_capture, bool reinit) {

	WAVEFORMATEX *pwfex;
	IMMDeviceEnumerator *enumerator = NULL;
	IMMDevice *device = NULL;

	CoInitialize(NULL);

	HRESULT hr = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void **)&enumerator);
	ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);

	if (p_device->device_name == "Default") {
		hr = enumerator->GetDefaultAudioEndpoint(p_capture ? eCapture : eRender, eConsole, &device);
	} else {
		IMMDeviceCollection *devices = NULL;

		hr = enumerator->EnumAudioEndpoints(p_capture ? eCapture : eRender, DEVICE_STATE_ACTIVE, &devices);
		ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);

		LPWSTR strId = NULL;
		bool found = false;

		UINT count = 0;
		hr = devices->GetCount(&count);
		ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);

		for (ULONG i = 0; i < count && !found; i++) {
			IMMDevice *device = NULL;

			hr = devices->Item(i, &device);
			ERR_BREAK(hr != S_OK);

			IPropertyStore *props = NULL;
			hr = device->OpenPropertyStore(STGM_READ, &props);
			ERR_BREAK(hr != S_OK);

			PROPVARIANT propvar;
			PropVariantInit(&propvar);

			hr = props->GetValue(PKEY_Device_FriendlyName, &propvar);
			ERR_BREAK(hr != S_OK);

			if (p_device->device_name == String(propvar.pwszVal)) {
				hr = device->GetId(&strId);
				ERR_BREAK(hr != S_OK);

				found = true;
			}

			PropVariantClear(&propvar);
			props->Release();
			device->Release();
		}

		if (found) {
			hr = enumerator->GetDevice(strId, &device);
		}

		if (strId) {
			CoTaskMemFree(strId);
		}

		if (device == NULL) {
			hr = enumerator->GetDefaultAudioEndpoint(p_capture ? eCapture : eRender, eConsole, &device);
		}
	}

	if (reinit) {
		// In case we're trying to re-initialize the device prevent throwing this error on the console,
		// otherwise if there is currently no device available this will spam the console.
		if (hr != S_OK) {
			return ERR_CANT_OPEN;
		}
	} else {
		ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
	}

	hr = enumerator->RegisterEndpointNotificationCallback(&notif_client);
	SAFE_RELEASE(enumerator)

	if (hr != S_OK) {
		ERR_PRINT("WASAPI: RegisterEndpointNotificationCallback error");
	}

	hr = device->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void **)&p_device->audio_client);
	SAFE_RELEASE(device)

	if (reinit) {
		if (hr != S_OK) {
			return ERR_CANT_OPEN;
		}
	} else {
		ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
	}

	hr = p_device->audio_client->GetMixFormat(&pwfex);
	ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);

	// Since we're using WASAPI Shared Mode we can't control any of these, we just tag along
	p_device->channels = pwfex->nChannels;
	p_device->format_tag = pwfex->wFormatTag;
	p_device->bits_per_sample = pwfex->wBitsPerSample;
	p_device->frame_size = (p_device->bits_per_sample / 8) * p_device->channels;

	if (p_device->format_tag == WAVE_FORMAT_EXTENSIBLE) {
		WAVEFORMATEXTENSIBLE *wfex = (WAVEFORMATEXTENSIBLE *)pwfex;

		if (wfex->SubFormat == KSDATAFORMAT_SUBTYPE_PCM) {
			p_device->format_tag = WAVE_FORMAT_PCM;
		} else if (wfex->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT) {
			p_device->format_tag = WAVE_FORMAT_IEEE_FLOAT;
		} else {
			ERR_PRINT("WASAPI: Format not supported");
			ERR_FAIL_V(ERR_CANT_OPEN);
		}
	} else {
		if (p_device->format_tag != WAVE_FORMAT_PCM && p_device->format_tag != WAVE_FORMAT_IEEE_FLOAT) {
			ERR_PRINT("WASAPI: Format not supported");
			ERR_FAIL_V(ERR_CANT_OPEN);
		}
	}

	DWORD streamflags = 0;
	if (mix_rate != pwfex->nSamplesPerSec) {
		streamflags |= AUDCLNT_STREAMFLAGS_RATEADJUST;
		pwfex->nSamplesPerSec = mix_rate;
		pwfex->nAvgBytesPerSec = pwfex->nSamplesPerSec * pwfex->nChannels * (pwfex->wBitsPerSample / 8);
	}

	hr = p_device->audio_client->Initialize(AUDCLNT_SHAREMODE_SHARED, streamflags, p_capture ? REFTIMES_PER_SEC : 0, 0, pwfex, NULL);
	ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);

	if (p_capture) {
		hr = p_device->audio_client->GetService(IID_IAudioCaptureClient, (void **)&p_device->capture_client);
	} else {
		hr = p_device->audio_client->GetService(IID_IAudioRenderClient, (void **)&p_device->render_client);
	}
	ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);

	// Free memory
	CoTaskMemFree(pwfex);
	SAFE_RELEASE(device)

	return OK;
}
コード例 #27
0
int wasapi_init(cubeb ** context, char const * context_name)
{
  HRESULT hr;
  IMMDeviceEnumerator * enumerator = NULL;

  hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
  if (FAILED(hr)) {
    LOG("Could not init COM.");
    return CUBEB_ERROR;
  }

  cubeb * ctx = (cubeb *)calloc(1, sizeof(cubeb));

  ctx->ops = &wasapi_ops;

  hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
                        NULL, CLSCTX_INPROC_SERVER,
                        IID_PPV_ARGS(&enumerator));
  if (FAILED(hr)) {
    LOG("Could not get device enumerator.");
    wasapi_destroy(ctx);
    return CUBEB_ERROR;
  }

  /* eMultimedia is okay for now ("Music, movies, narration, [...]").
   * We will need to change this when we distinguish streams by use-case, other
   * possible values being eConsole ("Games, system notification sounds [...]")
   * and eCommunication ("Voice communication"). */
  hr = enumerator->GetDefaultAudioEndpoint(eRender,
                                           eMultimedia,
                                           &ctx->device);
  if (FAILED(hr)) {
    LOG("Could not get default audio endpoint.");
    SafeRelease(enumerator);
    wasapi_destroy(ctx);
    return CUBEB_ERROR;
  }

  ctx->mmcss_module = LoadLibraryA("Avrt.dll");

  if (ctx->mmcss_module) {
    ctx->set_mm_thread_characteristics =
      (set_mm_thread_characteristics_function) GetProcAddress(
          ctx->mmcss_module, "AvSetMmThreadCharacteristicsA");
    ctx->revert_mm_thread_characteristics =
      (revert_mm_thread_characteristics_function) GetProcAddress(
          ctx->mmcss_module, "AvRevertMmThreadCharacteristics");
    if (!(ctx->set_mm_thread_characteristics && ctx->revert_mm_thread_characteristics)) {
      LOG("Could not load AvSetMmThreadCharacteristics or AvRevertMmThreadCharacteristics: %d", GetLastError());
      FreeLibrary(ctx->mmcss_module);
    }
  } else {
    // This is not a fatal error, but we might end up glitching when
    // the system is under high load.
    LOG("Could not load Avrt.dll");
    ctx->set_mm_thread_characteristics = &set_mm_thread_characteristics_noop;
    ctx->revert_mm_thread_characteristics = &revert_mm_thread_characteristics_noop;
  }
  
  SafeRelease(enumerator);

  *context = ctx;

  return CUBEB_OK;
}
コード例 #28
0
ファイル: wasapish.cpp プロジェクト: gameblabla/mednafen-gcw
SexyAL_device *SexyALI_WASAPISH_Open(const char *id, SexyAL_format *format, SexyAL_buffering *buffering)
{
 SexyAL_device *dev;
 WASWrap *w;
 IMMDeviceEnumerator *immdeven = NULL;
 WAVEFORMATEXTENSIBLE wfe;
 HRESULT hr;

 if(!(dev = (SexyAL_device *)calloc(1, sizeof(SexyAL_device))))
 {
  printf("calloc failed.\n");
  return(NULL);
 }
 timeBeginPeriod(1);

 w = (WASWrap *)calloc(1, sizeof(WASWrap));
 dev->private_data = w;
 if(!w)
 {
  printf("calloc failed.\n");
  Cleanup(dev);
  return(NULL);
 }

 //
 //
 //
 hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
 if(hr != S_OK && hr != S_FALSE)
 {
  printf("CoInitializeEx() failed: 0x%08x\n", (unsigned)hr);
  Cleanup(dev);
  return(NULL);
 }

 //printf("NOODLES: 0x%08x 0x%08x\n", LV_CLSID_MMDeviceEnumerator.Data1, LV_IID_IMMDeviceEnumerator.Data1);

 TRYHR(CoCreateInstance(LV_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, LV_IID_IMMDeviceEnumerator, (void**)&immdeven));

 if(id == NULL)
 {
  TRYHR(immdeven->GetDefaultAudioEndpoint(eRender, eConsole, &w->immdev));
 }
 else
 {
  IMMDeviceCollection *devcoll = NULL;
  UINT numdevs = 0;
  wchar_t *id16 = (wchar_t *)calloc(strlen(id) + 1, sizeof(wchar_t));

  w->immdev = NULL;

  MultiByteToWideChar(CP_UTF8, 0, id, -1, id16, strlen(id) + 1);

  TRYHR(immdeven->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &devcoll));
  TRYHR(devcoll->GetCount(&numdevs));

  for(UINT i = 0; i < numdevs && w->immdev == NULL; i++)
  {
   IMMDevice *tmpdev = NULL;
   IPropertyStore *props = NULL;
   PROPVARIANT prop_fname;

   PropVariantInit(&prop_fname);

   TRYHR(devcoll->Item(i, &tmpdev));
   TRYHR(tmpdev->OpenPropertyStore(STGM_READ, &props));
   TRYHR(props->GetValue(LV_PKEY_Device_FriendlyName, &prop_fname));

   printf("Device: %S\n", prop_fname.pwszVal);

   if(!wcscmp(id16, prop_fname.pwszVal))
    w->immdev = tmpdev;
   else
   {
    tmpdev->Release();
    tmpdev = NULL;
   }

   PropVariantClear(&prop_fname);

   if(props != NULL)
   {
    props->Release();
    props = NULL;
   }
  }

  if(id16 != NULL)
  {
   free(id16);
   id16 = NULL;
  }

  if(devcoll != NULL)
  {
   devcoll->Release();
   devcoll = NULL;
  }

  if(w->immdev == NULL)
  {
   puts("Device not found!");
   Cleanup(dev);
   return(NULL);
  }
 }

 TRYHR(w->immdev->Activate(LV_IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&w->ac));

 {
  WAVEFORMATEX *mf = NULL;

  TRYHR(w->ac->GetMixFormat(&mf));

  memcpy(&wfe, mf, std::min<unsigned>(sizeof(WAVEFORMATEXTENSIBLE), sizeof(WAVEFORMATEX) + mf->cbSize));
  if(wfe.Format.cbSize > 22)
   wfe.Format.cbSize = 22;

  wfe.Format.wBitsPerSample = 16;
  wfe.Format.nBlockAlign = (wfe.Format.nChannels * wfe.Format.wBitsPerSample) / 8;
  wfe.Format.nAvgBytesPerSec = wfe.Format.nSamplesPerSec * wfe.Format.nBlockAlign;

  if(wfe.Format.wFormatTag == WAVE_FORMAT_EXTENSIBLE)
  {
   wfe.Samples.wValidBitsPerSample = wfe.Format.wBitsPerSample;
   wfe.SubFormat = LV_KSDATAFORMAT_SUBTYPE_PCM;
  }
  else
  {
   wfe.Format.wFormatTag = WAVE_FORMAT_PCM;
  }

  CoTaskMemFree(mf);
 }

 format->rate = wfe.Format.nSamplesPerSec;
 format->sampformat = ((wfe.Format.wBitsPerSample >> 3) << 4) | 1;

 if(wfe.Format.wBitsPerSample == 32)
  format->sampformat |= 2;

 format->channels = wfe.Format.nChannels;
 format->revbyteorder = false;
 format->noninterleaved = false;

 //
 //
 //
 {
  REFERENCE_TIME raw_ac_latency;
  int32_t des_effbuftime = buffering->ms ? buffering->ms : 52;
  int32_t des_effbufsize = (int64_t)des_effbuftime * wfe.Format.nSamplesPerSec / 1000;
  int32_t des_realbuftime = des_effbuftime + 40;

  //printf("%u\n", wfe.Format.wFormatTag);
  //printf("%u\n", wfe.Format.nChannels);
  //printf("%u\n", wfe.Format.nSamplesPerSec);

  //printf("%u\n", wfe.Format.wBitsPerSample);
  //printf("%u\n", wfe.Format.nBlockAlign);
  //printf("%u\n", wfe.Format.nAvgBytesPerSec);

  TRYHR(w->ac->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_EVENTCALLBACK, (REFERENCE_TIME)des_realbuftime * 10000, 0, (WAVEFORMATEX*)&wfe, NULL));

  TRYHR(w->ac->GetBufferSize(&w->bfc));

  TRYHR(w->ac->GetStreamLatency(&raw_ac_latency));

  w->BufferBPF = wfe.Format.wBitsPerSample / 8 * wfe.Format.nChannels;

  buffering->buffer_size = std::min<int32_t>(des_effbufsize, w->bfc);
  buffering->period_size = 0;
  buffering->latency = buffering->buffer_size + (((int64_t)raw_ac_latency * format->rate + 5000000) / 10000000);
  buffering->bt_gran = 0;
 }

 if(!(w->evt = CreateEvent(NULL, FALSE, FALSE, NULL)))
 {
  printf("Error creating event.\n");
  Cleanup(dev);
  return(NULL);
 }

 if((w->avrt_dll = LoadLibrary("avrt.dll")) != NULL)
 {
  if(!(w->p_AvSetMmThreadCharacteristicsA = (HANDLE WINAPI (*)(LPCSTR, LPDWORD))(GetProcAddress(w->avrt_dll, "AvSetMmThreadCharacteristicsA"))))
  {
   printf("Error getting pointer to AvSetMmThreadCharacteristicsA.\n");
   Cleanup(dev);
   return(NULL);
  }

  if(!(w->p_AvRevertMmThreadCharacteristics = (BOOL WINAPI (*)(HANDLE))(GetProcAddress(w->avrt_dll, "AvRevertMmThreadCharacteristics"))))
  {
   printf("Error getting pointer to AvRevertMmThreadCharacteristics.\n");
   Cleanup(dev);
   return(NULL);
  }
 }

 TRYHR(w->ac->SetEventHandle(w->evt));

 TRYHR(w->ac->GetService(LV_IID_IAudioRenderClient, (void**)&w->arc));

 memcpy(&dev->buffering, buffering, sizeof(SexyAL_buffering));
 memcpy(&dev->format, format, sizeof(SexyAL_format));
 dev->RawWrite = RawWrite;
 dev->RawCanWrite = RawCanWrite;
 dev->RawClose = Close;
 dev->Pause = Pause;

 //
 // Clear buffer.
 //
 {
  BYTE *bd;

  TRYHR(w->arc->GetBuffer(w->bfc, &bd));

  memset(bd, 0, w->bfc * w->BufferBPF);

  w->arc->ReleaseBuffer(w->bfc, 0);
 }

#if 0
 {
  UINT32 paddie = 0;

  printf("buffer_size=%u\n", buffering->buffer_size);
  printf("bfc=%u\n", w->bfc);
  w->ac->GetCurrentPadding(&paddie);
  printf("paddie=%u\n", paddie);
  printf("snoo=%d\n", (int)buffering->buffer_size - paddie);
 }
#endif

 TRYHR(w->ac->Start());

 if(QueryPerformanceFrequency(&w->qpc_freq) == 0)
 {
  printf("QueryPerformanceFrequency() failed.\n");
  Cleanup(dev);
  return(NULL);
 }

 //printf("qpc_freq=%f\n", (double)w->qpc_freq.QuadPart);

 w->AThreadRunning = true;
 if(!(w->AThread = CreateThread(NULL, 0, AThreadMain, dev, CREATE_SUSPENDED, NULL)))
 {
  printf("Error creating thread.\n");
  Cleanup(dev);
  return(NULL);
 }
 InitializeCriticalSection(&w->crit);
 ResumeThread(w->AThread);

 return(dev);
}
コード例 #29
0
ファイル: main.cpp プロジェクト: VLatunoV/AudioClient
int main(int argc, char** argv)
{
	if (!Initialize())
	{
		cout << "Failed to initialize socket layer\n";
		system("pause");
		return -1;
	}
	if (!Bind(PORT))
	{
		cout << "Failed to bind listening socket\n";
		system("pause");
		Cleanup();
		return -1;
	}
	Connection SendingSocket;

	char choice;
	char IP[16];
	cout << "Host or Join (h/j)\n";
	cin >> choice;
	if (choice == 'h')
	{
		if (!ReceivingSocket.Accept())
		{
			cout << "Error while connecting to partner\n";
			system("pause");
			Cleanup();
			return -1;
		}
		if(!SendingSocket.Accept())
		{
			cout << "Error while connecting to partner\n";
			system("pause");
			Cleanup();
			return -1;
		}
	}
	else
	{
		cout << "IP: ";
		cin >> IP;
		if (!SendingSocket.Connect(IP, PORT))
		{
			cout << "Error while connecting to partner\n";
			cout << StringError() << endl;
			system("pause");
			Cleanup();
			return -1;
		}
		if(!ReceivingSocket.Connect(IP, PORT))
		{
			cout << "Error while connecting to partner\n";
			cout << StringError() << endl;
			system("pause");
			Cleanup();
			return -1;
		}
	}

	HRESULT result;
	IAudioCaptureClient* capture_client = NULL;
	IAudioClient* audio_client_capture = NULL;
	IMMDevice* capture_device = NULL;
	IMMDeviceEnumerator* enumerator = NULL;
	IPropertyStore* propstore_capture = NULL;
	PROPVARIANT pv_capture;
	WAVEFORMATEX* wf_capture = NULL;
	UINT32 packSize;
	UINT32 availableFrames;
	UINT32 timeIntervalForBuffer = 1000000;
	UINT32 timeIntervalInMilliseconds = timeIntervalForBuffer / 10000;
	BYTE* pData = NULL;
	DWORD flags;
	float index = 0;

	result = CoInitialize(0);
	if (FAILED(result))
	{
		printf("Failed to initialize\n");
		goto Exit;
	}
	result = CoCreateInstance(
		__uuidof(MMDeviceEnumerator), NULL,
		CLSCTX_ALL, __uuidof(IMMDeviceEnumerator),
		(void**)&enumerator);
	if (FAILED(result))
	{
		printf("Failed to create device enumerator\n");
		goto Exit;
	}
	result = enumerator->GetDefaultAudioEndpoint(eCapture, eCommunications, &capture_device);
	if (FAILED(result))
	{
		printf("Failed to get capture endpoint handle\n");
		goto Exit;
	}
	PropVariantInit(&pv_capture);
	result = capture_device->OpenPropertyStore(STGM_READ, &propstore_capture);
	if (FAILED(result))
	{
		printf("Failed to read device properties\n");
		goto Exit;
	}
	propstore_capture->GetValue(PKEY_Device_FriendlyName, &pv_capture);
	printf("Opening capture device: %S\n", pv_capture.pwszVal);
	PropVariantClear(&pv_capture);
	result = capture_device->Activate(
		__uuidof(IAudioClient), CLSCTX_ALL, NULL, (void**)&audio_client_capture);
	if (FAILED(result))
	{
		printf("Failed to activate capture device\n");
		goto Exit;
	}
	result = audio_client_capture->GetMixFormat(&wf_capture);
	if (FAILED(result))
	{
		printf("Failed to get mix format\n");
		goto Exit;
	}
	result = audio_client_capture->Initialize(AUDCLNT_SHAREMODE_SHARED, 0, timeIntervalForBuffer, 0, wf_capture, NULL);
	if (FAILED(result))
	{
		printf("Failed to initialize audio client\n");
		goto Exit;
	}
	printf("Sample rate: %u Hz\n", wf_capture->nSamplesPerSec);
	printf("Sample size: %u bits\n", wf_capture->wBitsPerSample);
	printf("Size of audio frame: %u bytes\n", wf_capture->nBlockAlign);
	printf("Number of channels: %u\n", wf_capture->nChannels);
	result = audio_client_capture->Start();
	if (FAILED(result))
	{
		printf("Failed to start recording\n");
		goto Exit;
	}
	result = audio_client_capture->GetService(__uuidof(IAudioCaptureClient), (void**)&capture_client);
	if (FAILED(result))
	{
		printf("Failed to get capture service\n");
		goto Exit;
	}
	
	// Get render endpoint interface
	result = enumerator->GetDefaultAudioEndpoint(eRender, eCommunications, &render_device);
	if (FAILED(result))
	{
		printf("Failed to get render endpoint handle\n");
		goto Exit;
	}
	PropVariantInit(&pv_render);
	result = render_device->OpenPropertyStore(STGM_READ, &propstore_render);
	if (FAILED(result))
	{
		printf("Failed to read device properties\n");
		goto Exit;
	}
	propstore_render->GetValue(PKEY_Device_FriendlyName, &pv_render);
	printf("Opening render device: %S\n", pv_render.pwszVal);
	PropVariantClear(&pv_render);
	result = render_device->Activate(
		__uuidof(IAudioClient), CLSCTX_ALL, NULL, (void**)&audio_client_render);
	if (FAILED(result))
	{
		printf("Failed to activate render device\n");
		goto Exit;
	}
	result = audio_client_render->GetMixFormat(&wf_render);
	if (FAILED(result))
	{
		printf("Failed to get mix format\n");
		goto Exit;
	}
	result = audio_client_render->Initialize(AUDCLNT_SHAREMODE_SHARED, 0, timeIntervalForBuffer, 0, wf_render, NULL);
	if (FAILED(result))
	{
		printf("Failed to initialize audio client\n");
		goto Exit;
	}
	printf("Sample rate: %u Hz\n", wf_render->nSamplesPerSec);
	printf("Sample size: %u bits\n", wf_render->wBitsPerSample);
	printf("Size of audio frame: %u bytes\n", wf_render->nBlockAlign);
	printf("Number of channels: %u\n", wf_render->nChannels);
	result = audio_client_render->Start();
	if (FAILED(result))
	{
		printf("Failed to start recording\n");
		goto Exit;
	}
	result = audio_client_render->GetService(__uuidof(IAudioRenderClient), (void**)&render_client);
	if (FAILED(result))
	{
		printf("Failed to get render service\n");
		goto Exit;
	}

	int partner_format_received_size;
	SendingSocket.Send((const char*)wf_capture, sizeof(*wf_capture));
	ReceivingSocket.Recv((char*)&partner_format, sizeof(partner_format), partner_format_received_size);
	if (last_error != _NO_ERROR)
	{
		printf("Connection failed\n");
		goto Exit;
	}
	if (partner_format.nSamplesPerSec != wf_render->nSamplesPerSec)
	{
		/*cout << "Partned format:\n";
		cout << partner_format.wBitsPerSample << endl;
		cout << partner_format.nSamplesPerSec << endl;
		cout << "My format:\n";
		cout << wf_render->wBitsPerSample << endl;
		cout << wf_render->nSamplesPerSec << endl;*/
		printf("Partner capture format unsupported\n");
		//goto Exit;
	}
	LPTHREAD_START_ROUTINE StartRoutine = (LPTHREAD_START_ROUTINE)RenderAudio;
	DWORD threadID;
	HANDLE hThread = CreateThread(NULL, 0, StartRoutine, NULL, 0, &threadID);
	while (true)
	{
		Sleep(timeIntervalInMilliseconds);
		result = capture_client->GetNextPacketSize(&packSize);
		if (FAILED(result))
		{
			printf("Failed to get next pack size\n");
			break;
		}
		while (packSize)
		{
			result = capture_client->GetBuffer(&pData, &availableFrames, &flags, NULL, NULL);
			if (FAILED(result))
			{
				printf("Failed to get buffer\n");
				goto Exit;
			}
			if (!SendingSocket.Send((const char*)pData, availableFrames * wf_capture->nBlockAlign))
			{
				if (last_error == SOCKET_CLOSED)
				{
					printf("Disconnect\n");
				}
				else
				{
					printf("Connection broke\n");
				}
				goto Exit;
			}
			result = capture_client->ReleaseBuffer(packSize);
			result = capture_client->GetNextPacketSize(&packSize);
			if (FAILED(result))
			{
				printf("Failed to release buffer\n");
				goto Exit;
			}
		}
		result = capture_client->ReleaseBuffer(packSize);
		if (GetAsyncKeyState(VK_ESCAPE))
		{
			break;
		}
	}
	
Exit:
	audio_client_capture->Stop();
	//TerminateThread(hThread, 0);
	audio_client_render->Stop();

	SendingSocket.Disconnect();
	ReceivingSocket.Disconnect();
	SAFE_RELEASE(capture_device);
	SAFE_RELEASE(render_device);
	SAFE_RELEASE(enumerator);
	SAFE_RELEASE(propstore_capture);
	SAFE_RELEASE(propstore_render);
	SAFE_RELEASE(capture_client);
	SAFE_RELEASE(render_client);
	SAFE_RELEASE(audio_client_capture);
	SAFE_RELEASE(audio_client_render);
	system("pause");
	return 0;
}
コード例 #30
0
ファイル: audio_driver_wasapi.cpp プロジェクト: Bonfi96/godot
Error AudioDriverWASAPI::init_device() {

	WAVEFORMATEX *pwfex;
	IMMDeviceEnumerator *enumerator = NULL;
	IMMDevice *device = NULL;

	CoInitialize(NULL);

	HRESULT hr = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void **)&enumerator);
	ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);

	hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole, &device);
	ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);

	hr = device->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void **)&audio_client);
	ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);

	hr = audio_client->GetMixFormat(&pwfex);
	ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);

	// Since we're using WASAPI Shared Mode we can't control any of these, we just tag along
	channels = pwfex->nChannels;
	mix_rate = pwfex->nSamplesPerSec;
	format_tag = pwfex->wFormatTag;
	bits_per_sample = pwfex->wBitsPerSample;

	switch (channels) {
		case 2: // Stereo
		case 4: // Surround 3.1
		case 6: // Surround 5.1
		case 8: // Surround 7.1
			break;

		default:
			ERR_PRINTS("WASAPI: Unsupported number of channels: " + itos(channels));
			ERR_FAIL_V(ERR_CANT_OPEN);
			break;
	}

	if (format_tag == WAVE_FORMAT_EXTENSIBLE) {
		WAVEFORMATEXTENSIBLE *wfex = (WAVEFORMATEXTENSIBLE *)pwfex;

		if (wfex->SubFormat == KSDATAFORMAT_SUBTYPE_PCM) {
			format_tag = WAVE_FORMAT_PCM;
		} else if (wfex->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT) {
			format_tag = WAVE_FORMAT_IEEE_FLOAT;
		} else {
			ERR_PRINT("WASAPI: Format not supported");
			ERR_FAIL_V(ERR_CANT_OPEN);
		}
	} else {
		if (format_tag != WAVE_FORMAT_PCM && format_tag != WAVE_FORMAT_IEEE_FLOAT) {
			ERR_PRINT("WASAPI: Format not supported");
			ERR_FAIL_V(ERR_CANT_OPEN);
		}
	}

	hr = audio_client->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_EVENTCALLBACK, 0, 0, pwfex, NULL);
	ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);

	event = CreateEvent(NULL, FALSE, FALSE, NULL);
	ERR_FAIL_COND_V(event == NULL, ERR_CANT_OPEN);

	hr = audio_client->SetEventHandle(event);
	ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);

	hr = audio_client->GetService(IID_IAudioRenderClient, (void **)&render_client);
	ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);

	UINT32 max_frames;
	hr = audio_client->GetBufferSize(&max_frames);
	ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);

	// Due to WASAPI Shared Mode we have no control of the buffer size
	buffer_frames = max_frames;

	// Sample rate is independent of channels (ref: https://stackoverflow.com/questions/11048825/audio-sample-frequency-rely-on-channels)
	buffer_size = buffer_frames * channels;
	samples_in.resize(buffer_size);

	if (OS::get_singleton()->is_stdout_verbose()) {
		print_line("WASAPI: detected " + itos(channels) + " channels");
		print_line("WASAPI: audio buffer frames: " + itos(buffer_frames) + " calculated latency: " + itos(buffer_frames * 1000 / mix_rate) + "ms");
	}

	return OK;
}