void propagateWithRawCurrentFormat(WAVEFORMATEX *toThis) {
	WAVEFORMATEX *pwfx;
	IMMDevice *pMMDevice;
	IAudioClient *pAudioClient;
    HANDLE hTask;
    DWORD nTaskIndex = 0;
    hTask = AvSetMmThreadCharacteristics(L"Capture", &nTaskIndex);

    HRESULT hr = get_default_device(&pMMDevice);
    if (FAILED(hr)) {
        assert(false);
    }
	// activate an (the default, for us, since we want loopback) IAudioClient
    hr = pMMDevice->Activate(
        __uuidof(IAudioClient),
        CLSCTX_ALL, NULL,
        (void**)&pAudioClient
    );
    if (FAILED(hr)) {
        ShowOutput("IMMDevice::Activate(IAudioClient) failed: hr = 0x%08x", hr);
		assert(false);
    }

	hr = pAudioClient->GetMixFormat(&pwfx);
    if (FAILED(hr)) {
        ShowOutput("IAudioClient::GetMixFormat failed: hr = 0x%08x\n", hr);
        CoTaskMemFree(pwfx);
        pAudioClient->Release();
   		assert(false);
    }
	pAudioClient->Stop();
    AvRevertMmThreadCharacteristics(hTask);
    pAudioClient->Release();
    pMMDevice->Release();
	memcpy(toThis, pwfx, sizeof(WAVEFORMATEX));
	CoTaskMemFree(pwfx); 
}
예제 #2
0
void CAESinkDirectSound::EnumerateDevicesEx(AEDeviceInfoList &deviceInfoList, bool force)
{
  CAEDeviceInfo        deviceInfo;

  IMMDeviceEnumerator* pEnumerator = NULL;
  IMMDeviceCollection* pEnumDevices = NULL;

  HRESULT                hr;

  std::string strDD = GetDefaultDevice();

  /* Windows Vista or later - supporting WASAPI device probing */
  hr = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void**)&pEnumerator);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Could not allocate WASAPI device enumerator. CoCreateInstance error code: %li", hr)

  UINT uiCount = 0;

  hr = pEnumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &pEnumDevices);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Retrieval of audio endpoint enumeration failed.")

  hr = pEnumDevices->GetCount(&uiCount);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Retrieval of audio endpoint count failed.")

  for (UINT i = 0; i < uiCount; i++)
  {
    IMMDevice *pDevice = NULL;
    IPropertyStore *pProperty = NULL;
    PROPVARIANT varName;
    PropVariantInit(&varName);

    deviceInfo.m_channels.Reset();
    deviceInfo.m_dataFormats.clear();
    deviceInfo.m_sampleRates.clear();

    hr = pEnumDevices->Item(i, &pDevice);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of DirectSound endpoint failed.");
      goto failed;
    }

    hr = pDevice->OpenPropertyStore(STGM_READ, &pProperty);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of DirectSound endpoint properties failed.");
      SAFE_RELEASE(pDevice);
      goto failed;
    }

    hr = pProperty->GetValue(PKEY_Device_FriendlyName, &varName);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of DirectSound endpoint device name failed.");
      SAFE_RELEASE(pDevice);
      SAFE_RELEASE(pProperty);
      goto failed;
    }

    std::string strFriendlyName = localWideToUtf(varName.pwszVal);
    PropVariantClear(&varName);

    hr = pProperty->GetValue(PKEY_AudioEndpoint_GUID, &varName);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of DirectSound endpoint GUID failed.");
      SAFE_RELEASE(pDevice);
      SAFE_RELEASE(pProperty);
      goto failed;
    }

    std::string strDevName = localWideToUtf(varName.pwszVal);
    PropVariantClear(&varName);

    hr = pProperty->GetValue(PKEY_AudioEndpoint_FormFactor, &varName);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of DirectSound endpoint form factor failed.");
      SAFE_RELEASE(pDevice);
      SAFE_RELEASE(pProperty);
      goto failed;
    }
    std::string strWinDevType = winEndpoints[(EndpointFormFactor)varName.uiVal].winEndpointType;
    AEDeviceType aeDeviceType = winEndpoints[(EndpointFormFactor)varName.uiVal].aeDeviceType;

    PropVariantClear(&varName);

    /* In shared mode Windows tells us what format the audio must be in. */
    IAudioClient *pClient;
    hr = pDevice->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&pClient);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Activate device failed (%s)", WASAPIErrToStr(hr));
      goto failed;
    }

    //hr = pClient->GetMixFormat(&pwfxex);
    hr = pProperty->GetValue(PKEY_AudioEngine_DeviceFormat, &varName);
    if (SUCCEEDED(hr) && varName.blob.cbSize > 0)
    {
      WAVEFORMATEX* smpwfxex = (WAVEFORMATEX*)varName.blob.pBlobData;
      deviceInfo.m_channels = layoutsByChCount[std::max(std::min(smpwfxex->nChannels, (WORD) DS_SPEAKER_COUNT), (WORD) 2)];
      deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_FLOAT));
      deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_AC3));
      deviceInfo.m_sampleRates.push_back(std::min(smpwfxex->nSamplesPerSec, (DWORD) 192000));
    }
    else
    {
      CLog::Log(LOGERROR, __FUNCTION__": Getting DeviceFormat failed (%s)", WASAPIErrToStr(hr));
    }
    pClient->Release();

    SAFE_RELEASE(pDevice);
    SAFE_RELEASE(pProperty);

    deviceInfo.m_deviceName       = strDevName;
    deviceInfo.m_displayName      = strWinDevType.append(strFriendlyName);
    deviceInfo.m_displayNameExtra = std::string("DirectSound: ").append(strFriendlyName);
    deviceInfo.m_deviceType       = aeDeviceType;

    deviceInfoList.push_back(deviceInfo);

    // add the default device with m_deviceName = default
    if(strDD == strDevName)
    {
      deviceInfo.m_deviceName = std::string("default");
      deviceInfo.m_displayName = std::string("default");
      deviceInfo.m_displayNameExtra = std::string("");
      deviceInfoList.push_back(deviceInfo);
    }
  }

  return;

failed:

  if (FAILED(hr))
    CLog::Log(LOGERROR, __FUNCTION__": Failed to enumerate WASAPI endpoint devices (%s).", WASAPIErrToStr(hr));

  SAFE_RELEASE(pEnumDevices);
  SAFE_RELEASE(pEnumerator);

}
예제 #3
0
void CAESinkDirectSound::EnumerateDevicesEx(AEDeviceInfoList &deviceInfoList, bool force)
{
  CAEDeviceInfo        deviceInfo;

  IMMDeviceEnumerator* pEnumerator = NULL;
  IMMDeviceCollection* pEnumDevices = NULL;

  HRESULT                hr;

  /* See if we are on Windows XP */
  if (!g_sysinfo.IsWindowsVersionAtLeast(CSysInfo::WindowsVersionVista))
  {
    /* We are on XP - WASAPI not supported - enumerate using DS devices */
    LPGUID deviceGUID = NULL;
    RPC_CSTR cszGUID;
    std::string szGUID;
    std::list<DSDevice> DSDeviceList;
    DirectSoundEnumerate(DSEnumCallback, &DSDeviceList);

    for(std::list<DSDevice>::iterator itt = DSDeviceList.begin(); itt != DSDeviceList.end(); ++itt)
    {
      if (UuidToString((*itt).lpGuid, &cszGUID) != RPC_S_OK)
        continue;  /* could not convert GUID to string - skip device */

      deviceInfo.m_channels.Reset();
      deviceInfo.m_dataFormats.clear();
      deviceInfo.m_sampleRates.clear();

      szGUID = (LPSTR)cszGUID;

      deviceInfo.m_deviceName = "{" + szGUID + "}";
      deviceInfo.m_displayName = (*itt).name;
      deviceInfo.m_displayNameExtra = std::string("DirectSound: ") + (*itt).name;

      deviceInfo.m_deviceType = AE_DEVTYPE_PCM;
      deviceInfo.m_channels   = layoutsByChCount[2];

      deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_FLOAT));
      deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_AC3));

      deviceInfo.m_sampleRates.push_back((DWORD) 96000);

      deviceInfoList.push_back(deviceInfo);
    }

    RpcStringFree(&cszGUID);
    return;
  }

  /* Windows Vista or later - supporting WASAPI device probing */
  hr = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void**)&pEnumerator);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Could not allocate WASAPI device enumerator. CoCreateInstance error code: %li", hr)

  UINT uiCount = 0;

  hr = pEnumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &pEnumDevices);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Retrieval of audio endpoint enumeration failed.")

  hr = pEnumDevices->GetCount(&uiCount);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Retrieval of audio endpoint count failed.")

  for (UINT i = 0; i < uiCount; i++)
  {
    IMMDevice *pDevice = NULL;
    IPropertyStore *pProperty = NULL;
    PROPVARIANT varName;
    PropVariantInit(&varName);

    deviceInfo.m_channels.Reset();
    deviceInfo.m_dataFormats.clear();
    deviceInfo.m_sampleRates.clear();

    hr = pEnumDevices->Item(i, &pDevice);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of DirectSound endpoint failed.");
      goto failed;
    }

    hr = pDevice->OpenPropertyStore(STGM_READ, &pProperty);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of DirectSound endpoint properties failed.");
      SAFE_RELEASE(pDevice);
      goto failed;
    }

    hr = pProperty->GetValue(PKEY_Device_FriendlyName, &varName);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of DirectSound endpoint device name failed.");
      SAFE_RELEASE(pDevice);
      SAFE_RELEASE(pProperty);
      goto failed;
    }

    std::string strFriendlyName = localWideToUtf(varName.pwszVal);
    PropVariantClear(&varName);

    hr = pProperty->GetValue(PKEY_AudioEndpoint_GUID, &varName);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of DirectSound endpoint GUID failed.");
      SAFE_RELEASE(pDevice);
      SAFE_RELEASE(pProperty);
      goto failed;
    }

    std::string strDevName = localWideToUtf(varName.pwszVal);
    PropVariantClear(&varName);

    hr = pProperty->GetValue(PKEY_AudioEndpoint_FormFactor, &varName);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of DirectSound endpoint form factor failed.");
      SAFE_RELEASE(pDevice);
      SAFE_RELEASE(pProperty);
      goto failed;
    }
    std::string strWinDevType = winEndpoints[(EndpointFormFactor)varName.uiVal].winEndpointType;
    AEDeviceType aeDeviceType = winEndpoints[(EndpointFormFactor)varName.uiVal].aeDeviceType;

    PropVariantClear(&varName);

    /* In shared mode Windows tells us what format the audio must be in. */
    IAudioClient *pClient;
    hr = pDevice->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&pClient);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Activate device failed (%s)", WASAPIErrToStr(hr));
      goto failed;
    }

    //hr = pClient->GetMixFormat(&pwfxex);
    hr = pProperty->GetValue(PKEY_AudioEngine_DeviceFormat, &varName);
    if (SUCCEEDED(hr) && varName.blob.cbSize > 0)
    {
      WAVEFORMATEX* smpwfxex = (WAVEFORMATEX*)varName.blob.pBlobData;
      deviceInfo.m_channels = layoutsByChCount[std::max(std::min(smpwfxex->nChannels, (WORD) DS_SPEAKER_COUNT), (WORD) 2)];
      deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_FLOAT));
      deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_AC3));
      deviceInfo.m_sampleRates.push_back(std::min(smpwfxex->nSamplesPerSec, (DWORD) 192000));
    }
    else
    {
      CLog::Log(LOGERROR, __FUNCTION__": Getting DeviceFormat failed (%s)", WASAPIErrToStr(hr));
    }
    pClient->Release();

    SAFE_RELEASE(pDevice);
    SAFE_RELEASE(pProperty);

    deviceInfo.m_deviceName       = strDevName;
    deviceInfo.m_displayName      = strWinDevType.append(strFriendlyName);
    deviceInfo.m_displayNameExtra = std::string("DirectSound: ").append(strFriendlyName);
    deviceInfo.m_deviceType       = aeDeviceType;

    deviceInfoList.push_back(deviceInfo);
  }

  // since AE takes the first device in deviceInfoList as default audio device we need
  // to sort it in order to use the real default device
  if(deviceInfoList.size() > 1)
  {
    std::string strDD = GetDefaultDevice();
    for (AEDeviceInfoList::iterator itt = deviceInfoList.begin(); itt != deviceInfoList.end(); ++itt)
    {
      CAEDeviceInfo devInfo = *itt;
      if(devInfo.m_deviceName == strDD)
      {
        deviceInfoList.erase(itt);
        deviceInfoList.insert(deviceInfoList.begin(), devInfo);
        break;
      }
    }
  }

  return;

failed:

  if (FAILED(hr))
    CLog::Log(LOGERROR, __FUNCTION__": Failed to enumerate WASAPI endpoint devices (%s).", WASAPIErrToStr(hr));

  SAFE_RELEASE(pEnumDevices);
  SAFE_RELEASE(pEnumerator);

}
예제 #4
0
void CAESinkWASAPI::EnumerateDevicesEx(AEDeviceInfoList &deviceInfoList, bool force)
{
  IMMDeviceEnumerator* pEnumerator = NULL;
  IMMDeviceCollection* pEnumDevices = NULL;
  IMMDevice*           pDefaultDevice = NULL;
  CAEDeviceInfo        deviceInfo;
  CAEChannelInfo       deviceChannels;
  LPWSTR               pwszID = NULL;
  std::wstring         wstrDDID;

  WAVEFORMATEXTENSIBLE wfxex = {0};
  HRESULT              hr;

  hr = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void**)&pEnumerator);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Could not allocate WASAPI device enumerator. CoCreateInstance error code: %li", hr)

  UINT uiCount = 0;

  // get the default audio endpoint
  if(pEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &pDefaultDevice) == S_OK)
  {
    if(pDefaultDevice->GetId(&pwszID) == S_OK)
    {
      wstrDDID = pwszID;
      CoTaskMemFree(pwszID);
    }
    SAFE_RELEASE(pDefaultDevice);
  }

  // enumerate over all audio endpoints
  hr = pEnumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &pEnumDevices);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Retrieval of audio endpoint enumeration failed.")

  hr = pEnumDevices->GetCount(&uiCount);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Retrieval of audio endpoint count failed.")

  for (UINT i = 0; i < uiCount; i++)
  {
    IMMDevice *pDevice = NULL;
    IPropertyStore *pProperty = NULL;
    PROPVARIANT varName;
    PropVariantInit(&varName);

    deviceInfo.m_channels.Reset();
    deviceInfo.m_dataFormats.clear();
    deviceInfo.m_sampleRates.clear();

    hr = pEnumDevices->Item(i, &pDevice);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of WASAPI endpoint failed.");
      goto failed;
    }

    hr = pDevice->OpenPropertyStore(STGM_READ, &pProperty);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of WASAPI endpoint properties failed.");
      SAFE_RELEASE(pDevice);
      goto failed;
    }

    hr = pProperty->GetValue(PKEY_Device_FriendlyName, &varName);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of WASAPI endpoint device name failed.");
      SAFE_RELEASE(pDevice);
      SAFE_RELEASE(pProperty);
      goto failed;
    }

    std::string strFriendlyName = localWideToUtf(varName.pwszVal);
    PropVariantClear(&varName);

    hr = pProperty->GetValue(PKEY_AudioEndpoint_GUID, &varName);
    if(FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of WASAPI endpoint GUID failed.");
      SAFE_RELEASE(pDevice);
      SAFE_RELEASE(pProperty);
      goto failed;
    }

    std::string strDevName = localWideToUtf(varName.pwszVal);
    PropVariantClear(&varName);

    hr = pProperty->GetValue(PKEY_AudioEndpoint_FormFactor, &varName);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of WASAPI endpoint form factor failed.");
      SAFE_RELEASE(pDevice);
      SAFE_RELEASE(pProperty);
      goto failed;
    }
    std::string strWinDevType = winEndpoints[(EndpointFormFactor)varName.uiVal].winEndpointType;
    AEDeviceType aeDeviceType = winEndpoints[(EndpointFormFactor)varName.uiVal].aeDeviceType;

    PropVariantClear(&varName);

    hr = pProperty->GetValue(PKEY_AudioEndpoint_PhysicalSpeakers, &varName);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of WASAPI endpoint speaker layout failed.");
      SAFE_RELEASE(pDevice);
      SAFE_RELEASE(pProperty);
      goto failed;
    }
    unsigned int uiChannelMask = std::max(varName.uintVal, (unsigned int) KSAUDIO_SPEAKER_STEREO);

    deviceChannels.Reset();

    for (unsigned int c = 0; c < WASAPI_SPEAKER_COUNT; c++)
    {
      if (uiChannelMask & WASAPIChannelOrder[c])
        deviceChannels += AEChannelNames[c];
    }

    PropVariantClear(&varName);

    IAudioClient *pClient;
    hr = pDevice->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&pClient);
    if (SUCCEEDED(hr))
    {
      /* Test format DTS-HD */
      wfxex.Format.cbSize               = sizeof(WAVEFORMATEXTENSIBLE)-sizeof(WAVEFORMATEX);
      wfxex.Format.nSamplesPerSec       = 192000;
      wfxex.dwChannelMask               = KSAUDIO_SPEAKER_7POINT1_SURROUND;
      wfxex.Format.wFormatTag           = WAVE_FORMAT_EXTENSIBLE;
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_IEC61937_DTS_HD;
      wfxex.Format.wBitsPerSample       = 16;
      wfxex.Samples.wValidBitsPerSample = 16;
      wfxex.Format.nChannels            = 8;
      wfxex.Format.nBlockAlign          = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
      wfxex.Format.nAvgBytesPerSec      = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;
      hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
      if (SUCCEEDED(hr))
        deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_DTSHD));

      /* Test format Dolby TrueHD */
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_MLP;
      hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
      if (SUCCEEDED(hr))
        deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_TRUEHD));

      /* Test format Dolby EAC3 */
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_DIGITAL_PLUS;
      wfxex.Format.nChannels            = 2;
      wfxex.Format.nBlockAlign          = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
      wfxex.Format.nAvgBytesPerSec      = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;
      hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
      if (SUCCEEDED(hr))
        deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_EAC3));

      /* Test format DTS */
      wfxex.Format.nSamplesPerSec       = 48000;
      wfxex.dwChannelMask               = KSAUDIO_SPEAKER_5POINT1;
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_IEC61937_DTS;
      wfxex.Format.nBlockAlign          = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
      wfxex.Format.nAvgBytesPerSec      = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;
      hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
      if (SUCCEEDED(hr))
        deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_DTS));

      /* Test format Dolby AC3 */
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_DIGITAL;
      hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
      if (SUCCEEDED(hr))
        deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_AC3));

      /* Test format AAC */
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_IEC61937_AAC;
      hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
      if (SUCCEEDED(hr))
        deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_AAC));

      /* Test format for PCM format iteration */
      wfxex.Format.cbSize               = sizeof(WAVEFORMATEXTENSIBLE)-sizeof(WAVEFORMATEX);
      wfxex.dwChannelMask               = KSAUDIO_SPEAKER_STEREO;
      wfxex.Format.wFormatTag           = WAVE_FORMAT_EXTENSIBLE;
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;

      for (int p = AE_FMT_FLOAT; p > AE_FMT_INVALID; p--)
      {
        if (p < AE_FMT_FLOAT)
          wfxex.SubFormat               = KSDATAFORMAT_SUBTYPE_PCM;
        wfxex.Format.wBitsPerSample     = CAEUtil::DataFormatToBits((AEDataFormat) p);
        wfxex.Format.nBlockAlign        = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
        wfxex.Format.nAvgBytesPerSec    = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;
        if (p <= AE_FMT_S24NE4 && p >= AE_FMT_S24BE4)
        {
          wfxex.Samples.wValidBitsPerSample = 24;
        }
        else
        {
          wfxex.Samples.wValidBitsPerSample = wfxex.Format.wBitsPerSample;
        }

        hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
        if (SUCCEEDED(hr))
          deviceInfo.m_dataFormats.push_back((AEDataFormat) p);
      }

      /* Test format for sample rate iteration */
      wfxex.Format.cbSize               = sizeof(WAVEFORMATEXTENSIBLE)-sizeof(WAVEFORMATEX);
      wfxex.dwChannelMask               = KSAUDIO_SPEAKER_STEREO;
      wfxex.Format.wFormatTag           = WAVE_FORMAT_EXTENSIBLE;
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_PCM;
      wfxex.Format.wBitsPerSample       = 16;
      wfxex.Samples.wValidBitsPerSample = 16;
      wfxex.Format.nChannels            = 2;
      wfxex.Format.nBlockAlign          = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
      wfxex.Format.nAvgBytesPerSec      = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;

      for (int j = 0; j < WASAPISampleRateCount; j++)
      {
        wfxex.Format.nSamplesPerSec     = WASAPISampleRates[j];
        wfxex.Format.nAvgBytesPerSec    = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;
        hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
        if (SUCCEEDED(hr))
          deviceInfo.m_sampleRates.push_back(WASAPISampleRates[j]);
      }

      /* Test format for channels iteration */
      wfxex.Format.cbSize               = sizeof(WAVEFORMATEXTENSIBLE)-sizeof(WAVEFORMATEX);
      wfxex.dwChannelMask               = KSAUDIO_SPEAKER_STEREO;
      wfxex.Format.wFormatTag           = WAVE_FORMAT_EXTENSIBLE;
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_PCM;
      wfxex.Format.nSamplesPerSec       = 48000;
      wfxex.Format.wBitsPerSample       = 16;
      wfxex.Samples.wValidBitsPerSample = 16;
      wfxex.Format.nChannels            = 2;
      wfxex.Format.nBlockAlign          = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
      wfxex.Format.nAvgBytesPerSec      = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;

      bool hasLpcm = false;

      // Try with KSAUDIO_SPEAKER_DIRECTOUT
      for (unsigned int k = WASAPI_SPEAKER_COUNT; k > 0; k--)
      {
        wfxex.dwChannelMask             = KSAUDIO_SPEAKER_DIRECTOUT;
        wfxex.Format.nChannels          = k;
        wfxex.Format.nBlockAlign        = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
        wfxex.Format.nAvgBytesPerSec    = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;
        hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
        if (SUCCEEDED(hr))
        {
          if (k > 3) // Add only multichannel LPCM
          {
            deviceInfo.m_dataFormats.push_back(AE_FMT_LPCM);
            hasLpcm = true;
          }
          break;
        }
      }

      /* Try with reported channel mask */
      for (unsigned int k = WASAPI_SPEAKER_COUNT; k > 0; k--)
      {
        wfxex.dwChannelMask             = uiChannelMask;
        wfxex.Format.nChannels          = k;
        wfxex.Format.nBlockAlign        = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
        wfxex.Format.nAvgBytesPerSec    = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;
        hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
        if (SUCCEEDED(hr))
        {
          if ( !hasLpcm && k > 3) // Add only multichannel LPCM
          {
            deviceInfo.m_dataFormats.push_back(AE_FMT_LPCM);
            hasLpcm = true;
          }
          break;
        }
      }

      /* Try with specific speakers configurations */
      for (unsigned int i = 0; i < ARRAYSIZE(layoutsList); i++)
      {
        unsigned int nmbOfCh;
        wfxex.dwChannelMask             = ChLayoutToChMask(layoutsList[i], &nmbOfCh);
        wfxex.Format.nChannels          = nmbOfCh;
        wfxex.Format.nBlockAlign        = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
        wfxex.Format.nAvgBytesPerSec    = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;
        hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
        if (SUCCEEDED(hr))
        {
          if ( deviceChannels.Count() < nmbOfCh)
            deviceChannels = layoutsList[i];
          if ( !hasLpcm && nmbOfCh > 3) // Add only multichannel LPCM
          {
            deviceInfo.m_dataFormats.push_back(AE_FMT_LPCM);
            hasLpcm = true;
          }
        }
      }
      pClient->Release();
    }
    else
    {
      CLog::Log(LOGDEBUG, __FUNCTION__": Failed to activate device for passthrough capability testing.");
    }

    deviceInfo.m_deviceName       = strDevName;
    deviceInfo.m_displayName      = strWinDevType.append(strFriendlyName);
    deviceInfo.m_displayNameExtra = std::string("WASAPI: ").append(strFriendlyName);
    deviceInfo.m_deviceType       = aeDeviceType;
    deviceInfo.m_channels         = deviceChannels;

    /* Store the device info */
    deviceInfoList.push_back(deviceInfo);

    if(pDevice->GetId(&pwszID) == S_OK)
    {
      if(wstrDDID.compare(pwszID) == 0)
      {
        deviceInfo.m_deviceName = std::string("default");
        deviceInfo.m_displayName = std::string("default");
        deviceInfo.m_displayNameExtra = std::string("");
        deviceInfoList.push_back(deviceInfo);
      }
      CoTaskMemFree(pwszID);
    }

    SAFE_RELEASE(pDevice);
    SAFE_RELEASE(pProperty);
  }
예제 #5
0
void CAESinkDirectSound::EnumerateDevicesEx(AEDeviceInfoList &deviceInfoList)
{
    CAEDeviceInfo        deviceInfo;
    OSVERSIONINFO        osvi;

    IMMDeviceEnumerator* pEnumerator = NULL;
    IMMDeviceCollection* pEnumDevices = NULL;

    WAVEFORMATEX*          pwfxex = NULL;
    HRESULT                hr;

    /* See if we are on Windows XP */
    ZeroMemory(&osvi, sizeof(OSVERSIONINFO));
    osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);

    GetVersionEx(&osvi);

    if (osvi.dwMajorVersion == 5)
    {
        /* We are on XP - WASAPI not supported - enumerate using DS devices */
        LPGUID deviceGUID = NULL;
        RPC_CSTR cszGUID;
        std::string szGUID;
        std::list<DSDevice> DSDeviceList;
        DirectSoundEnumerate(DSEnumCallback, &DSDeviceList);

        for(std::list<DSDevice>::iterator itt = DSDeviceList.begin(); itt != DSDeviceList.end(); itt++)
        {
            if (UuidToString((*itt).lpGuid, &cszGUID) != RPC_S_OK)
                continue;  /* could not convert GUID to string - skip device */

            deviceInfo.m_channels.Reset();
            deviceInfo.m_dataFormats.clear();
            deviceInfo.m_sampleRates.clear();

            szGUID = (LPSTR)cszGUID;

            deviceInfo.m_deviceName = "{" + szGUID + "}";
            deviceInfo.m_displayName = (*itt).name;
            deviceInfo.m_displayNameExtra = std::string("DirectSound: ") + (*itt).name;

            deviceInfo.m_deviceType = AE_DEVTYPE_PCM;
            deviceInfo.m_channels   = layoutsByChCount[2];

            deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_FLOAT));
            deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_AC3));

            deviceInfo.m_sampleRates.push_back((DWORD) 96000);

            deviceInfoList.push_back(deviceInfo);
        }

        RpcStringFree(&cszGUID);
        return;
    }

    /* Windows Vista or later - supporting WASAPI device probing */
    hr = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void**)&pEnumerator);
    EXIT_ON_FAILURE(hr, __FUNCTION__": Could not allocate WASAPI device enumerator. CoCreateInstance error code: %li", hr)

    UINT uiCount = 0;

    hr = pEnumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &pEnumDevices);
    EXIT_ON_FAILURE(hr, __FUNCTION__": Retrieval of audio endpoint enumeration failed.")

    hr = pEnumDevices->GetCount(&uiCount);
    EXIT_ON_FAILURE(hr, __FUNCTION__": Retrieval of audio endpoint count failed.")

    for (UINT i = 0; i < uiCount; i++)
    {
        IMMDevice *pDevice = NULL;
        IPropertyStore *pProperty = NULL;
        PROPVARIANT varName;
        PropVariantInit(&varName);

        deviceInfo.m_channels.Reset();
        deviceInfo.m_dataFormats.clear();
        deviceInfo.m_sampleRates.clear();

        hr = pEnumDevices->Item(i, &pDevice);
        if (FAILED(hr))
        {
            CLog::Log(LOGERROR, __FUNCTION__": Retrieval of DirectSound endpoint failed.");
            goto failed;
        }

        hr = pDevice->OpenPropertyStore(STGM_READ, &pProperty);
        if (FAILED(hr))
        {
            CLog::Log(LOGERROR, __FUNCTION__": Retrieval of DirectSound endpoint properties failed.");
            SAFE_RELEASE(pDevice);
            goto failed;
        }

        hr = pProperty->GetValue(PKEY_Device_FriendlyName, &varName);
        if (FAILED(hr))
        {
            CLog::Log(LOGERROR, __FUNCTION__": Retrieval of DirectSound endpoint device name failed.");
            SAFE_RELEASE(pDevice);
            SAFE_RELEASE(pProperty);
            goto failed;
        }

        std::wstring strRawFriendlyName(varName.pwszVal);
        std::string strFriendlyName = std::string(strRawFriendlyName.begin(), strRawFriendlyName.end());

        PropVariantClear(&varName);

        hr = pProperty->GetValue(PKEY_AudioEndpoint_GUID, &varName);
        if (FAILED(hr))
        {
            CLog::Log(LOGERROR, __FUNCTION__": Retrieval of DirectSound endpoint GUID failed.");
            SAFE_RELEASE(pDevice);
            SAFE_RELEASE(pProperty);
            goto failed;
        }

        std::wstring strRawDevName(varName.pwszVal);
        std::string strDevName = std::string(strRawDevName.begin(), strRawDevName.end());
        PropVariantClear(&varName);

        hr = pProperty->GetValue(PKEY_AudioEndpoint_FormFactor, &varName);
        if (FAILED(hr))
        {
            CLog::Log(LOGERROR, __FUNCTION__": Retrieval of DirectSound endpoint form factor failed.");
            SAFE_RELEASE(pDevice);
            SAFE_RELEASE(pProperty);
            goto failed;
        }
        std::string strWinDevType = winEndpoints[(EndpointFormFactor)varName.uiVal].winEndpointType;
        AEDeviceType aeDeviceType = winEndpoints[(EndpointFormFactor)varName.uiVal].aeDeviceType;

        PropVariantClear(&varName);

        /* In shared mode Windows tells us what format the audio must be in. */
        IAudioClient *pClient;
        hr = pDevice->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&pClient);
        if (FAILED(hr))
        {
            CLog::Log(LOGERROR, __FUNCTION__": Activate device failed (%s)", WASAPIErrToStr(hr));
        }

        //hr = pClient->GetMixFormat(&pwfxex);
        hr = pProperty->GetValue(PKEY_AudioEngine_DeviceFormat, &varName);
        if (SUCCEEDED(hr))
        {
            WAVEFORMATEX* smpwfxex = (WAVEFORMATEX*)varName.blob.pBlobData;
            deviceInfo.m_channels = layoutsByChCount[std::min(smpwfxex->nChannels, (WORD) 2)];
            deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_FLOAT));
            deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_AC3));
            deviceInfo.m_sampleRates.push_back(std::min(smpwfxex->nSamplesPerSec, (DWORD) 96000));
        }
        else
        {
            CLog::Log(LOGERROR, __FUNCTION__": GetMixFormat failed (%s)", WASAPIErrToStr(hr));
        }
        pClient->Release();

        SAFE_RELEASE(pDevice);
        SAFE_RELEASE(pProperty);

        deviceInfo.m_deviceName       = strDevName;
        deviceInfo.m_displayName      = strWinDevType.append(strFriendlyName);
        deviceInfo.m_displayNameExtra = std::string("DirectSound: ").append(strFriendlyName);
        deviceInfo.m_deviceType       = aeDeviceType;

        /* Now logged by AESinkFactory on startup */
        //CLog::Log(LOGDEBUG,"Audio Device %d:    %s", i, ((std::string)deviceInfo).c_str());

        deviceInfoList.push_back(deviceInfo);
    }

    return;

failed:

    if (FAILED(hr))
        CLog::Log(LOGERROR, __FUNCTION__": Failed to enumerate WASAPI endpoint devices (%s).", WASAPIErrToStr(hr));

    SAFE_RELEASE(pEnumDevices);
    SAFE_RELEASE(pEnumerator);

}
예제 #6
0
void LoopbackCaptureFor(IMMDevice* mmDevice, std::string filename, int secs)
{
    // open new file
    MMIOINFO mi = { 0 };

    // some flags cause mmioOpen write to this buffer
    // but not any that we're using
    std::wstring wsFilename(filename.begin(), filename.end()); // mmioOpen wants a wstring
    HMMIO file = mmioOpen(const_cast<LPWSTR>(wsFilename.c_str()), &mi, MMIO_WRITE | MMIO_CREATE);

    time_t startTime = time(nullptr);

    // activate an IAudioClient
    IAudioClient* audioClient;
    HRESULT hr = mmDevice->Activate(__uuidof(IAudioClient), CLSCTX_ALL, nullptr, (void**)&audioClient);
    if (FAILED(hr))
    {
        fprintf(stderr, "IMMDevice::Activate(IAudioClient) failed: hr = 0x%08x", hr);
        return;
    }

    // get the default device periodicity
    REFERENCE_TIME hnsDefaultDevicePeriod;
    hr = audioClient->GetDevicePeriod(&hnsDefaultDevicePeriod, nullptr);
    if (FAILED(hr))
    {
        fprintf(stderr, "IAudioClient::GetDevicePeriod failed: hr = 0x%08x\n", hr);
        audioClient->Release();
        return;
    }

    // get the default device format
    WAVEFORMATEX* waveform;
    hr = audioClient->GetMixFormat(&waveform);
    if (FAILED(hr))
    {
        fprintf(stderr, "IAudioClient::GetMixFormat failed: hr = 0x%08x\n", hr);
        CoTaskMemFree(waveform);
        audioClient->Release();
        return;
    }

    // coerce int-16 wave format
    // can do this in-place since we're not changing the size of the format
    // also, the engine will auto-convert from float to int for us
    switch (waveform->wFormatTag)
    {
        case WAVE_FORMAT_IEEE_FLOAT:
            waveform->wFormatTag = WAVE_FORMAT_PCM;
            waveform->wBitsPerSample = BITS_PER_SAMPLE;
            waveform->nBlockAlign = BLOCK_ALIGN;
            waveform->nAvgBytesPerSec = BYTE_RATE;
            break;
        case WAVE_FORMAT_EXTENSIBLE:
        {
            // naked scope for case-local variable
            PWAVEFORMATEXTENSIBLE pEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(waveform);
            if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, pEx->SubFormat))
            {
                pEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
                pEx->Samples.wValidBitsPerSample = BITS_PER_SAMPLE;
                waveform->wBitsPerSample = BITS_PER_SAMPLE;
                waveform->nBlockAlign = waveform->nChannels * BYTE_PER_SAMPLE;
                waveform->nAvgBytesPerSec = waveform->nBlockAlign * waveform->nSamplesPerSec;
            }
            break;
        }
    }

    MMCKINFO ckRIFF = { 0 };
    MMCKINFO ckData = { 0 };
    hr = WriteWaveHeader(file, waveform, &ckRIFF, &ckData);

    // create a periodic waitable timer
    HANDLE hWakeUp = CreateWaitableTimer(nullptr, FALSE, nullptr);
    UINT32 nBlockAlign = waveform->nBlockAlign;

    // call IAudioClient::Initialize
    // note that AUDCLNT_STREAMFLAGS_LOOPBACK and AUDCLNT_STREAMFLAGS_EVENTCALLBACK
    // do not work together...
    // the "data ready" event never gets set
    // so we're going to do a timer-driven loop
    hr = audioClient->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_LOOPBACK, 0, 0, waveform, 0);
    if (FAILED(hr))
    {
        fprintf(stderr, "IAudioClient::Initialize failed: hr = 0x%08x\n", hr);
        CloseHandle(hWakeUp);
        audioClient->Release();
        return;
    }

    // free up waveform
    CoTaskMemFree(waveform);

    // activate an IAudioCaptureClient
    IAudioCaptureClient* audioCaptureClient;
    hr = audioClient->GetService(__uuidof(IAudioCaptureClient), (void**)&audioCaptureClient);

    // register with MMCSS
    DWORD nTaskIndex = 0;
    HANDLE hTask = AvSetMmThreadCharacteristics(L"Capture", &nTaskIndex);
    if (hTask == nullptr)
    {
        DWORD dwErr = GetLastError();
        fprintf(stderr, "AvSetMmThreadCharacteristics failed: last error = %u\n", dwErr);
        audioCaptureClient->Release();
        CloseHandle(hWakeUp);
        audioClient->Release();
        return;
    }

    // set the waitable timer
    LARGE_INTEGER liFirstFire;
    liFirstFire.QuadPart = -hnsDefaultDevicePeriod / 2; // negative means relative time
    LONG lTimeBetweenFires = (LONG)hnsDefaultDevicePeriod / 2 / (10 * 1000); // convert to milliseconds
    if (!SetWaitableTimer(hWakeUp, &liFirstFire, lTimeBetweenFires, nullptr, nullptr, FALSE))
    {
        DWORD dwErr = GetLastError();
        fprintf(stderr, "SetWaitableTimer failed: last error = %u\n", dwErr);
        AvRevertMmThreadCharacteristics(hTask);
        audioCaptureClient->Release();
        CloseHandle(hWakeUp);
        audioClient->Release();
        return;
    }

    // call IAudioClient::Start
    hr = audioClient->Start();

    // loopback capture loop
    DWORD dwWaitResult;

    UINT32 frames = 0;
    for (UINT32 passes = 0; ; passes++)
    {
        // drain data while it is available
        UINT32 nextPacketSize;
        for (hr = audioCaptureClient->GetNextPacketSize(&nextPacketSize);
            SUCCEEDED(hr) && nextPacketSize > 0;
            hr = audioCaptureClient->GetNextPacketSize(&nextPacketSize))
        {
            // get the captured data
            BYTE* data;
            UINT32 framesToRead;
            DWORD dwFlags;

            hr = audioCaptureClient->GetBuffer(&data, &framesToRead, &dwFlags, nullptr, nullptr);
            if (FAILED(hr))
            {
                fprintf(stderr, "IAudioCaptureClient::GetBuffer failed on pass %u after %u frames: hr = 0x%08x\n", passes, frames, hr);
                audioClient->Stop();
                CancelWaitableTimer(hWakeUp);
                AvRevertMmThreadCharacteristics(hTask);
                audioCaptureClient->Release();
                CloseHandle(hWakeUp);
                audioClient->Release();
                return;
            }

            // this type of error seems to happen often, ignore it
            if (dwFlags == AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY)
                ;
            else if (dwFlags != 0) {
                fprintf(stderr, "IAudioCaptureClient::GetBuffer set flags to 0x%08x on pass %u after %u frames\n", dwFlags, passes, frames);
                audioClient->Stop();
                CancelWaitableTimer(hWakeUp);
                AvRevertMmThreadCharacteristics(hTask);
                audioCaptureClient->Release();
                CloseHandle(hWakeUp);
                audioClient->Release();
                return;
            }

            if (framesToRead == 0)
            {
                fprintf(stderr, "IAudioCaptureClient::GetBuffer said to read 0 frames on pass %u after %u frames\n", passes, frames);
                audioClient->Stop();
                CancelWaitableTimer(hWakeUp);
                AvRevertMmThreadCharacteristics(hTask);
                audioCaptureClient->Release();
                CloseHandle(hWakeUp);
                audioClient->Release();
                return;
            }

            LONG lBytesToWrite = framesToRead * nBlockAlign;
#pragma prefast(suppress: __WARNING_INCORRECT_ANNOTATION, "IAudioCaptureClient::GetBuffer SAL annotation implies a 1-byte buffer")
            LONG lBytesWritten = mmioWrite(file, reinterpret_cast<PCHAR>(data), lBytesToWrite);
            if (lBytesToWrite != lBytesWritten)
            {
                fprintf(stderr, "mmioWrite wrote %u bytes on pass %u after %u frames: expected %u bytes\n", lBytesWritten, passes, frames, lBytesToWrite);
                audioClient->Stop();
                CancelWaitableTimer(hWakeUp);
                AvRevertMmThreadCharacteristics(hTask);
                audioCaptureClient->Release();
                CloseHandle(hWakeUp);
                audioClient->Release();
                return;
            }

            frames += framesToRead;

            hr = audioCaptureClient->ReleaseBuffer(framesToRead);
        }

        dwWaitResult = WaitForSingleObject(hWakeUp, INFINITE);

        if (time(nullptr) - startTime > secs)
            break;
    }

    FinishWaveFile(file, &ckData, &ckRIFF);
    audioClient->Stop();
    CancelWaitableTimer(hWakeUp);
    AvRevertMmThreadCharacteristics(hTask);
    audioCaptureClient->Release();
    CloseHandle(hWakeUp);
    audioClient->Release();


    // everything went well... fixup the fact chunk in the file
    MMRESULT result = mmioClose(file, 0);
    file = nullptr;
    if (result != MMSYSERR_NOERROR)
    {
        fprintf(stderr, "mmioClose failed: MMSYSERR = %u\n", result);
        return;
    }

    // reopen the file in read/write mode
    mi = { 0 };
    file = mmioOpen(const_cast<LPWSTR>(wsFilename.c_str()), &mi, MMIO_READWRITE);
    if (file == nullptr)
    {
        fprintf(stderr, "mmioOpen(\"%ls\", ...) failed. wErrorRet == %u\n", filename, mi.wErrorRet);
        return;
    }

    // descend into the RIFF/WAVE chunk
    ckRIFF = { 0 };
    ckRIFF.ckid = MAKEFOURCC('W', 'A', 'V', 'E'); // this is right for mmioDescend
    result = mmioDescend(file, &ckRIFF, nullptr, MMIO_FINDRIFF);
    if (result != MMSYSERR_NOERROR)
    {
        fprintf(stderr, "mmioDescend(\"WAVE\") failed: MMSYSERR = %u\n", result);
        return;
    }

    // descend into the fact chunk
    MMCKINFO ckFact = { 0 };
    ckFact.ckid = MAKEFOURCC('f', 'a', 'c', 't');
    result = mmioDescend(file, &ckFact, &ckRIFF, MMIO_FINDCHUNK);
    if (result != MMSYSERR_NOERROR) {
        fprintf(stderr, "mmioDescend(\"fact\") failed: MMSYSERR = %u\n", result);
        return;
    }

    // write the correct data to the fact chunk
    LONG lBytesWritten = mmioWrite(file, reinterpret_cast<PCHAR>(&frames), sizeof(frames));
    if (lBytesWritten != sizeof(frames))
    {
        fprintf(stderr, "Updating the fact chunk wrote %u bytes; expected %u\n", lBytesWritten, (UINT32)sizeof(frames));
        return;
    }

    // ascend out of the fact chunk
    result = mmioAscend(file, &ckFact, 0);
    if (result != MMSYSERR_NOERROR)
        fprintf(stderr, "mmioAscend(\"fact\") failed: MMSYSERR = %u\n", result);
}
//HRESULT LoopbackCapture(
//    IMMDevice *pMMDevice,
//    bool bInt16,
//    HANDLE hStartedEvent,
//    HANDLE hStopEvent,
//    PUINT32 pnFrames,
//	HMMIO hFile,
//	AudioBuffer *pBuffer
//)
HRESULT LoopbackCapture::Process()
{
    HRESULT hr;

    // activate an IAudioClient
    IAudioClient *pAudioClient;
    hr = pMMDevice->Activate(
             __uuidof(IAudioClient),
             CLSCTX_ALL, NULL,
             (void**)&pAudioClient
         );
    if (FAILED(hr)) {
        printf("IMMDevice::Activate(IAudioClient) failed: hr = 0x%08x", hr);
        return hr;
    }

    // get the default device periodicity
    REFERENCE_TIME hnsDefaultDevicePeriod;
    hr = pAudioClient->GetDevicePeriod(&hnsDefaultDevicePeriod, NULL);
    if (FAILED(hr)) {
        printf("IAudioClient::GetDevicePeriod failed: hr = 0x%08x\n", hr);
        pAudioClient->Release();
        return hr;
    }

    // get the default device format
    WAVEFORMATEX *pwfx;
    hr = pAudioClient->GetMixFormat(&pwfx);
    if (FAILED(hr)) {
        printf("IAudioClient::GetMixFormat failed: hr = 0x%08x\n", hr);
        CoTaskMemFree(pwfx);
        pAudioClient->Release();
        return hr;
    }

    if (pwfx->wFormatTag == WAVE_FORMAT_EXTENSIBLE)
    {
        PWAVEFORMATEXTENSIBLE pEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(pwfx);
        //pEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
        printf("WAVE_FORMAT_EXTENSIBLE\n");
        if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, pEx->SubFormat))
        {
            printf("float\n");
        }//
        else if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_PCM, pEx->SubFormat))
        {
            printf("PCM\n");
        }//KSDATAFORMAT_SUBTYPE_WAVEFORMATEX
        else if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_WAVEFORMATEX, pEx->SubFormat))
        {
            printf("WAVEFORMATEX\n");
        }
    }

    if (bInt16) {
        // coerce int-16 wave format
        // can do this in-place since we're not changing the size of the format
        // also, the engine will auto-convert from float to int for us
        switch (pwfx->wFormatTag) {
        case WAVE_FORMAT_IEEE_FLOAT:
            pwfx->wFormatTag = WAVE_FORMAT_PCM;
            pwfx->wBitsPerSample = 16;
            pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8;
            pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec;
            break;

        case WAVE_FORMAT_EXTENSIBLE:
        {
            // naked scope for case-local variable
            PWAVEFORMATEXTENSIBLE pEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(pwfx);
            if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, pEx->SubFormat)) {
                pEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
                pEx->Samples.wValidBitsPerSample = 16;
                pwfx->wBitsPerSample = 16;
                pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8;
                pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec;
            } else {
                printf("Don't know how to coerce mix format to int-16\n");
                CoTaskMemFree(pwfx);
                pAudioClient->Release();
                return E_UNEXPECTED;
            }
        }
        break;

        default:
            printf("Don't know how to coerce WAVEFORMATEX with wFormatTag = 0x%08x to int-16\n", pwfx->wFormatTag);
            CoTaskMemFree(pwfx);
            pAudioClient->Release();
            return E_UNEXPECTED;
        }
    }

    MMCKINFO ckRIFF = {0};
    MMCKINFO ckData = {0};
    if (hFile!=NULL)
        hr = WriteWaveHeader(hFile, pwfx, &ckRIFF, &ckData);
    if (pBuffer)
    {
        bool isFloat = false;
        switch (pwfx->wFormatTag) {
        case WAVE_FORMAT_IEEE_FLOAT:
            isFloat = true;
            break;

        case WAVE_FORMAT_EXTENSIBLE:
        {
            // naked scope for case-local variable
            PWAVEFORMATEXTENSIBLE pEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(pwfx);
            if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, pEx->SubFormat)) {
                isFloat = true;
            }
        }
        break;
        default:
            break;
        }
        pBuffer->SetAudioInfo(pwfx->nSamplesPerSec,pwfx->nBlockAlign,pwfx->nChannels,pwfx->wBitsPerSample,isFloat);
    }

    if (FAILED(hr)) {
        // WriteWaveHeader does its own logging
        CoTaskMemFree(pwfx);
        pAudioClient->Release();
        return hr;
    }

    // create a periodic waitable timer
    HANDLE hWakeUp = CreateWaitableTimer(NULL, FALSE, NULL);
    if (NULL == hWakeUp) {
        DWORD dwErr = GetLastError();
        printf("CreateWaitableTimer failed: last error = %u\n", dwErr);
        CoTaskMemFree(pwfx);
        pAudioClient->Release();
        return HRESULT_FROM_WIN32(dwErr);
    }

    UINT32 nBlockAlign = pwfx->nBlockAlign;
    UINT32 nChannels = pwfx->nChannels;
    nFrames = 0;

    // call IAudioClient::Initialize
    // note that AUDCLNT_STREAMFLAGS_LOOPBACK and AUDCLNT_STREAMFLAGS_EVENTCALLBACK
    // do not work together...
    // the "data ready" event never gets set
    // so we're going to do a timer-driven loop
    hr = pAudioClient->Initialize(
             AUDCLNT_SHAREMODE_SHARED,
             AUDCLNT_STREAMFLAGS_LOOPBACK,
             0, 0, pwfx, 0
         );
    if (FAILED(hr)) {
        printf("IAudioClient::Initialize failed: hr = 0x%08x\n", hr);
        CloseHandle(hWakeUp);
        pAudioClient->Release();
        return hr;
    }
    CoTaskMemFree(pwfx);

    // activate an IAudioCaptureClient
    IAudioCaptureClient *pAudioCaptureClient;
    hr = pAudioClient->GetService(
             __uuidof(IAudioCaptureClient),
             (void**)&pAudioCaptureClient
         );
    if (FAILED(hr)) {
        printf("IAudioClient::GetService(IAudioCaptureClient) failed: hr 0x%08x\n", hr);
        CloseHandle(hWakeUp);
        pAudioClient->Release();
        return hr;
    }

    // register with MMCSS
    DWORD nTaskIndex = 0;
    HANDLE hTask = AvSetMmThreadCharacteristics(L"Capture", &nTaskIndex);
    if (NULL == hTask) {
        DWORD dwErr = GetLastError();
        printf("AvSetMmThreadCharacteristics failed: last error = %u\n", dwErr);
        pAudioCaptureClient->Release();
        CloseHandle(hWakeUp);
        pAudioClient->Release();
        return HRESULT_FROM_WIN32(dwErr);
    }

    // set the waitable timer
    LARGE_INTEGER liFirstFire;
    liFirstFire.QuadPart = -hnsDefaultDevicePeriod / 2; // negative means relative time
    LONG lTimeBetweenFires = (LONG)hnsDefaultDevicePeriod / 2 / (10 * 1000); // convert to milliseconds
    BOOL bOK = SetWaitableTimer(
                   hWakeUp,
                   &liFirstFire,
                   lTimeBetweenFires,
                   NULL, NULL, FALSE
               );
    if (!bOK) {
        DWORD dwErr = GetLastError();
        printf("SetWaitableTimer failed: last error = %u\n", dwErr);
        AvRevertMmThreadCharacteristics(hTask);
        pAudioCaptureClient->Release();
        CloseHandle(hWakeUp);
        pAudioClient->Release();
        return HRESULT_FROM_WIN32(dwErr);
    }

    // call IAudioClient::Start
    hr = pAudioClient->Start();
    if (FAILED(hr)) {
        printf("IAudioClient::Start failed: hr = 0x%08x\n", hr);
        AvRevertMmThreadCharacteristics(hTask);
        pAudioCaptureClient->Release();
        CloseHandle(hWakeUp);
        pAudioClient->Release();
        return hr;
    }
    SetEvent(hStartedEvent);

    // loopback capture loop
    HANDLE waitArray[2] = { hStopEvent, hWakeUp };
    DWORD dwWaitResult;
    DWORD immdState;

    bool bDone = false;
    bool bFirstPacket = true;
    for (UINT32 nPasses = 0; !bDone; nPasses++) {
        dwWaitResult = WaitForMultipleObjects(
                           ARRAYSIZE(waitArray), waitArray,
                           FALSE, INFINITE
                       );

        if (WAIT_OBJECT_0 == dwWaitResult) {
            //printf("Received stop event after %u passes and %u frames\n", nPasses, nFrames);
            bDone = true;
            continue; // exits loop
        }

        if (WAIT_OBJECT_0 + 1 != dwWaitResult) {
            printf("Unexpected WaitForMultipleObjects return value %u on pass %u after %u frames\n", dwWaitResult, nPasses, nFrames);
            pAudioClient->Stop();
            CancelWaitableTimer(hWakeUp);
            AvRevertMmThreadCharacteristics(hTask);
            pAudioCaptureClient->Release();
            CloseHandle(hWakeUp);
            pAudioClient->Release();
            return E_UNEXPECTED;
        }

        printf("'");

        // got a "wake up" event - see if there's data
        UINT32 nNextPacketSize;
        hr = pAudioCaptureClient->GetNextPacketSize(&nNextPacketSize);
        if (FAILED(hr)) {
            if (hr == AUDCLNT_E_SERVICE_NOT_RUNNING)
                printf("AUDCLNT_E_SERVICE_NOT_RUNNING : \n");
            else if (hr == AUDCLNT_E_DEVICE_INVALIDATED)
                printf("AUDCLNT_E_DEVICE_INVALIDATED : \n");
            else
                printf("UNKNOWN ERROR!!! : \n");

            printf("IAudioCaptureClient::GetNextPacketSize failed on pass %u after %u frames: hr = 0x%08x\n", nPasses, nFrames, hr);
            pAudioClient->Stop();
            CancelWaitableTimer(hWakeUp);
            AvRevertMmThreadCharacteristics(hTask);
            pAudioCaptureClient->Release();
            CloseHandle(hWakeUp);
            pAudioClient->Release();
            return hr;
        }

        if (0 == nNextPacketSize) {
            // no data yet
            continue;
        }

        // get the captured data
        BYTE *pData;
        UINT32 nNumFramesToRead;
        DWORD dwFlags;

        hr = pAudioCaptureClient->GetBuffer(
                 &pData,
                 &nNumFramesToRead,
                 &dwFlags,
                 NULL,
                 NULL
             );
        if (FAILED(hr)) {
            printf("IAudioCaptureClient::GetBuffer failed on pass %u after %u frames: hr = 0x%08x\n", nPasses, nFrames, hr);
            pAudioClient->Stop();
            CancelWaitableTimer(hWakeUp);
            AvRevertMmThreadCharacteristics(hTask);
            pAudioCaptureClient->Release();
            CloseHandle(hWakeUp);
            pAudioClient->Release();
            return hr;
        }

        if (bFirstPacket && AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY == dwFlags) {
            printf("Probably spurious glitch reported on first packet\n");
        }
        else if (dwFlags & AUDCLNT_BUFFERFLAGS_SILENT)
        {
            printf("#");
        }
        else if (dwFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY)
        {
            printf("!");
        }
        else if (0 != dwFlags) {
            printf("IAudioCaptureClient::GetBuffer set flags to 0x%08x on pass %u after %u frames\n", dwFlags, nPasses, nFrames);
            pAudioClient->Stop();
            CancelWaitableTimer(hWakeUp);
            AvRevertMmThreadCharacteristics(hTask);
            pAudioCaptureClient->Release();
            CloseHandle(hWakeUp);
            pAudioClient->Release();
            return E_UNEXPECTED;
        }

        if (0 == nNumFramesToRead) {
            // no data yet
            continue;
        }

        //if (0 == nNumFramesToRead) {
        //    printf("IAudioCaptureClient::GetBuffer said to read 0 frames on pass %u after %u frames\n", nPasses, nFrames);
        //    pAudioClient->Stop();
        //    CancelWaitableTimer(hWakeUp);
        //    AvRevertMmThreadCharacteristics(hTask);
        //    pAudioCaptureClient->Release();
        //    CloseHandle(hWakeUp);
        //    pAudioClient->Release();
        //    return E_UNEXPECTED;
        //}

        LONG lBytesToWrite = nNumFramesToRead * nBlockAlign;
#pragma prefast(suppress: __WARNING_INCORRECT_ANNOTATION, "IAudioCaptureClient::GetBuffer SAL annotation implies a 1-byte buffer")

        if (hFile!=NULL)
        {
            LONG lBytesWritten = mmioWrite(hFile, reinterpret_cast<PCHAR>(pData), lBytesToWrite);
            if (lBytesToWrite != lBytesWritten) {
                printf("mmioWrite wrote %u bytes on pass %u after %u frames: expected %u bytes\n", lBytesWritten, nPasses, nFrames, lBytesToWrite);
                pAudioClient->Stop();
                CancelWaitableTimer(hWakeUp);
                AvRevertMmThreadCharacteristics(hTask);
                pAudioCaptureClient->Release();
                CloseHandle(hWakeUp);
                pAudioClient->Release();
                return E_UNEXPECTED;
            }
        }
        if (pBuffer)
        {
            //switch (nBlockAlign/nChannels)
            //{
            //case 1:
            //	ShowPCM((unsigned char*)pData,nNumFramesToRead,nChannels,1024,60,"SYS_Byte");
            //	break;
            //case 2:
            //	ShowPCM((short*)pData,nNumFramesToRead,nChannels,1024,60,"SYS_Short");
            //	break;
            //case 4:
            //	ShowPCM((int*)pData,nNumFramesToRead,nChannels,1024,60,"SYS_Int");
            //	//ShowPCM((float*)pData,nNumFramesToRead,nChannels,1024,60,"SYS_float");
            //	break;
            //}


            pBuffer->PushBuffer(pData,lBytesToWrite);
        }
        nFrames += nNumFramesToRead;

        hr = pAudioCaptureClient->ReleaseBuffer(nNumFramesToRead);
        if (FAILED(hr)) {
            printf("IAudioCaptureClient::ReleaseBuffer failed on pass %u after %u frames: hr = 0x%08x\n", nPasses, nFrames, hr);
            pAudioClient->Stop();
            CancelWaitableTimer(hWakeUp);
            AvRevertMmThreadCharacteristics(hTask);
            pAudioCaptureClient->Release();
            CloseHandle(hWakeUp);
            pAudioClient->Release();
            return hr;
        }

        bFirstPacket = false;
    } // capture loop

    if (hFile!=NULL)
        hr = FinishWaveFile(hFile, &ckData, &ckRIFF);


    if (FAILED(hr)) {
        // FinishWaveFile does it's own logging
        pAudioClient->Stop();
        CancelWaitableTimer(hWakeUp);
        AvRevertMmThreadCharacteristics(hTask);
        pAudioCaptureClient->Release();
        CloseHandle(hWakeUp);
        pAudioClient->Release();
        return hr;
    }

    pAudioClient->Stop();
    CancelWaitableTimer(hWakeUp);
    AvRevertMmThreadCharacteristics(hTask);
    pAudioCaptureClient->Release();
    CloseHandle(hWakeUp);
    pAudioClient->Release();

    return hr;
}
예제 #8
0
int main(int argc, char *argv[])
{
	CoInitialize(nullptr);

	listDevices();

	IAudioClient *pAudioClient;
	IMMDevice *device;

	getDefaultDevice(&device);

    HRESULT hr = device->Activate(__uuidof(IAudioClient),
        CLSCTX_ALL, nullptr, (void**)&pAudioClient);
    if (FAILED(hr)) {
        printf("IMMDevice::Activate(IAudioClient) failed: hr = 0x%08x", hr);
        return hr;
    }

	REFERENCE_TIME hnsDefaultDevicePeriod;
    hr = pAudioClient->GetDevicePeriod(&hnsDefaultDevicePeriod, nullptr);
    if (FAILED(hr)) {
        printf("IAudioClient::GetDevicePeriod failed: hr = 0x%08x\n", hr);
        pAudioClient->Release();
        return hr;
    }

	// get the default device format
    WAVEFORMATEX *pwfx;
    hr = pAudioClient->GetMixFormat(&pwfx);
    if (FAILED(hr)) {
        printf("IAudioClient::GetMixFormat failed: hr = 0x%08x\n", hr);
        CoTaskMemFree(pwfx);
        pAudioClient->Release();
        return hr;
    }

    DVAR(pwfx->wFormatTag);
    DVAR(pwfx->wBitsPerSample);
    DVAR(pwfx->nBlockAlign);
    DVAR(pwfx->nAvgBytesPerSec);

    switch (pwfx->wFormatTag) {
        case WAVE_FORMAT_IEEE_FLOAT:
            pwfx->wFormatTag = WAVE_FORMAT_PCM;
            pwfx->wBitsPerSample = 16;
            pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8;
            pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec;
            break;

        case WAVE_FORMAT_EXTENSIBLE:
            {
                // naked scope for case-local variable
                PWAVEFORMATEXTENSIBLE pEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(pwfx);
                if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, pEx->SubFormat)) {
                    pEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
                    pEx->Samples.wValidBitsPerSample = 16;
                    pwfx->wBitsPerSample = 16;
                    pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8;
                    pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec;
                } else {
                    printf("Don't know how to coerce mix format to int-16\n");
                    CoTaskMemFree(pwfx);
                    pAudioClient->Release();
                    return E_UNEXPECTED;
                }
            }
            break;

        default:
            printf("Don't know how to coerce WAVEFORMATEX with wFormatTag = 0x%08x to int-16\n", pwfx->wFormatTag);
            CoTaskMemFree(pwfx);
            pAudioClient->Release();
            return E_UNEXPECTED;
    }

    DVAR(pwfx->wFormatTag);
    DVAR(pwfx->wBitsPerSample);
    DVAR(pwfx->nBlockAlign);
    DVAR(pwfx->nAvgBytesPerSec);

	hr = pAudioClient->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_LOOPBACK, 0, 0, pwfx, 0 );
    if (FAILED(hr)) {
        printf("IAudioClient::Initialize failed: hr = 0x%08x\n", hr);
        pAudioClient->Release();
        return hr;
    }

	IAudioCaptureClient *pAudioCaptureClient;
    hr = pAudioClient->GetService(__uuidof(IAudioCaptureClient), (void**)&pAudioCaptureClient);
    if (FAILED(hr)) {
        printf("IAudioClient::GetService(IAudioCaptureClient) failed: hr 0x%08x\n", hr);
        pAudioClient->Release();
        return hr;
    }


    hr = pAudioClient->Start();
    if (FAILED(hr)) {
        printf("IAudioClient::Start failed: hr = 0x%08x\n", hr);
        pAudioCaptureClient->Release();
        pAudioClient->Release();
        return hr;
    }


for (int i = 0; i < 10; ++i)
{

	UINT32 nNextPacketSize;
    hr = pAudioCaptureClient->GetNextPacketSize(&nNextPacketSize);
    if (FAILED(hr)) {
        printf("IAudioCaptureClient::GetNextPacketSize failed on pass %u after %u frames: hr = 0x%08x\n", 0, 0, hr);
        pAudioClient->Stop();
        pAudioCaptureClient->Release();
        pAudioClient->Release();
        return hr;
    }


    // get the captured data
    BYTE *pData;
    UINT32 nNumFramesToRead;
    DWORD dwFlags;

    hr = pAudioCaptureClient->GetBuffer(&pData, &nNumFramesToRead, &dwFlags, nullptr,
        nullptr);
    if (FAILED(hr)) {
        printf("IAudioCaptureClient::GetBuffer failed on pass %u after %u frames: hr = 0x%08x\n", 0, 0, hr);
        pAudioClient->Stop();
        pAudioCaptureClient->Release();
        pAudioClient->Release();
        return hr;
    }

    DVAR(nNumFramesToRead);


    // if (bFirstPacket && AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY == dwFlags) {
        // printf("Probably spurious glitch reported on first packet\n");
    if (0 != dwFlags && AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY != dwFlags) {
        printf("IAudioCaptureClient::GetBuffer set flags to 0x%08x on pass %u after %u frames\n", dwFlags, 0, 0);
        // pAudioClient->Stop();
        // pAudioCaptureClient->Release();
        // pAudioClient->Release();
        // return E_UNEXPECTED;
    }
    else
    	DVAR((int)*pData);

    if (0 == nNumFramesToRead) {
        printf("IAudioCaptureClient::GetBuffer said to read 0 frames on pass %u after %u frames\n", 0, 0);
        pAudioClient->Stop();
        pAudioCaptureClient->Release();
        pAudioClient->Release();
        return E_UNEXPECTED;
    }


    UINT32 nBlockAlign = pwfx->nBlockAlign;
    LONG lBytesToWrite = nNumFramesToRead * nBlockAlign;
    hr = pAudioCaptureClient->ReleaseBuffer(nNumFramesToRead);
}

	pAudioClient->Stop();
    pAudioCaptureClient->Release();
    pAudioClient->Release();


	CoUninitialize();

	return 0;
}
예제 #9
0
int _tmain(int argc, _TCHAR* argv[]) {
  IMMDeviceEnumerator *enumerator = 0;
  IMMDevice *device = 0;
  FILE *f;
	f=fopen("c:/1.wav","w");
  CoInitialize(0);
  CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_ALL, __uuidof(IMMDeviceEnumerator), 
    (void**) &enumerator);
  enumerator->GetDefaultAudioEndpoint(eRender, eConsole, &device);

  HANDLE processOutWrite, processOutRead, processInWrite, processInRead;    

  /*wchar_t processCommand[2000];
  {
    FILE* commandFile;
    fopen_s(&commandFile, "command.txt", "r");
    char cmd[2000];
    fread(cmd, sizeof(char), 2000, commandFile);
    fclose(commandFile);
    size_t count;
    mbstowcs_s(&count, processCommand, cmd, 2000);
  }*/

  /*{	
    //create pipes for plink process
    SECURITY_ATTRIBUTES pipeAttributes = {0};
    pipeAttributes.nLength = sizeof(SECURITY_ATTRIBUTES);
    pipeAttributes.bInheritHandle = TRUE;
    pipeAttributes.lpSecurityDescriptor= NULL;
    CreatePipe(&processOutRead, &processOutWrite, &pipeAttributes, 0);
    CreatePipe(&processInRead,  &processInWrite,  &pipeAttributes, 0);
    STARTUPINFO startupInfo;
    ZeroMemory(&startupInfo, sizeof(STARTUPINFO));
    startupInfo.cb = sizeof(STARTUPINFO);
    startupInfo.hStdError = processOutWrite;
    startupInfo.hStdOutput = processOutWrite;
    startupInfo.hStdInput = processInRead;
    startupInfo.dwFlags |= STARTF_USESTDHANDLES;
    PROCESS_INFORMATION processInfo = {0};
    //launch process
    CreateProcess(NULL, processCommand, NULL, NULL, TRUE, 0, NULL, NULL, &startupInfo, &processInfo);

    //wait for plink to connect to minimze sound delay  (magic number)
    Sleep(2500);

  }*/

  HRESULT hr;
  // activate an IAudioClient
  IAudioClient *audioClient;
  hr = device->Activate(__uuidof(IAudioClient), CLSCTX_ALL, NULL, (void**) &audioClient); 
  if (FAILED(hr)) {
    printf("IMMDevice::Activate(IAudioClient) failed: hr = 0x%08x", hr);
    return hr;
  }

  // get the default device format
  WAVEFORMATEX *waveFormat;
  hr = audioClient->GetMixFormat(&waveFormat);
  if (FAILED(hr)) {
    printf("IAudioClient::GetMixFormat failed: hr = 0x%08x\n", hr);
    CoTaskMemFree(waveFormat);
    audioClient->Release();
    return hr;
  }

  // coerce int-16 wave format
  // can do this in-place since we're not changing the size of the format
  // also, the engine will auto-convert from float to int for us
  switch (waveFormat->wFormatTag) {
  case WAVE_FORMAT_IEEE_FLOAT:
    waveFormat->wFormatTag = WAVE_FORMAT_PCM;
    waveFormat->wBitsPerSample = 16;
    waveFormat->nBlockAlign = waveFormat->nChannels * waveFormat->wBitsPerSample / 8;
    waveFormat->nAvgBytesPerSec = waveFormat->nBlockAlign * waveFormat->nSamplesPerSec;
    break;

  case WAVE_FORMAT_EXTENSIBLE:
    {
      // naked scope for case-local variable
      PWAVEFORMATEXTENSIBLE waveFormatEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(waveFormat);
      if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, waveFormatEx->SubFormat)) {
        waveFormatEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
        waveFormatEx->Samples.wValidBitsPerSample = 16;
        waveFormat->wBitsPerSample = 16;
        waveFormat->nBlockAlign = waveFormat->nChannels * waveFormat->wBitsPerSample / 8;
        waveFormat->nAvgBytesPerSec = waveFormat->nBlockAlign * waveFormat->nSamplesPerSec;
      } else {
        printf("Don't know how to coerce mix format to int-16\n");
        CoTaskMemFree(waveFormat);
        audioClient->Release();
        return E_UNEXPECTED;
      }
    }
    break;

  default:
    printf("Don't know how to coerce WAVEFORMATEX with wFormatTag = 0x%08x to int-16\n", waveFormat->wFormatTag);
    CoTaskMemFree(waveFormat);
    audioClient->Release();
    return E_UNEXPECTED;
  }

  UINT32 blockAlign = waveFormat->nBlockAlign;

  // call IAudioClient::Initialize
  // note that AUDCLNT_STREAMFLAGS_LOOPBACK and AUDCLNT_STREAMFLAGS_EVENTCALLBACK do not work together...
  // the "data ready" event never gets set, so we're going to do a timer-driven loop
  hr = audioClient->Initialize(
    AUDCLNT_SHAREMODE_SHARED,
    AUDCLNT_STREAMFLAGS_LOOPBACK,
    10000000, 0, waveFormat, 0
    );
  if (FAILED(hr)) {
    printf("IAudioClient::Initialize failed: hr = 0x%08x\n", hr);
    audioClient->Release();
    return hr;
  }
  CoTaskMemFree(waveFormat);

  // activate an IAudioCaptureClient
  IAudioCaptureClient *audioCaptureClient;
  hr = audioClient->GetService(__uuidof(IAudioCaptureClient), (void**) &audioCaptureClient);
  if (FAILED(hr)) {
    printf("IAudioClient::GetService(IAudioCaptureClient) failed: hr 0x%08x\n", hr);
    audioClient->Release();
    return hr;
  }

  hr = audioClient->Start();
  if (FAILED(hr)) {
    printf("IAudioClient::Start failed: hr = 0x%08x\n", hr);
    audioCaptureClient->Release();
    audioClient->Release();
    return hr;
  }

  // loopback capture loop
  for (UINT32 i = 0; true; i++) {
    UINT32 nextPacketSize;
    hr = audioCaptureClient->GetNextPacketSize(&nextPacketSize);
    if (FAILED(hr)) {
      printf("IAudioCaptureClient::GetNextPacketSize failed on pass %u: hr = 0x%08x\n", i, hr);
      audioClient->Stop();
      audioCaptureClient->Release();
      audioClient->Release();            
      return hr;
    }

    if (nextPacketSize == 0) { // no data yet
      continue;
    }

    // get the captured data
    BYTE *data;
    UINT32 frameCount;
    DWORD bufferFlags;

    hr = audioCaptureClient->GetBuffer(&data, &frameCount, &bufferFlags, NULL, NULL);
    if (FAILED(hr)) {
      printf("IAudioCaptureClient::GetBuffer failed on pass %u: hr = 0x%08x\n", i, hr);
      audioClient->Stop();
      audioCaptureClient->Release();
      audioClient->Release();            
      return hr;            
    }

    if (bufferFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY) {
      printf("IAudioCaptureClient::GetBuffer reports 'data discontinuity' on pass %u\n", i);
    }
    if (bufferFlags & AUDCLNT_BUFFERFLAGS_SILENT) {
      printf("IAudioCaptureClient::GetBuffer reports 'silent' on pass %u\n", i);
    }
    if (bufferFlags & AUDCLNT_BUFFERFLAGS_TIMESTAMP_ERROR) {
      printf("IAudioCaptureClient::GetBuffer reports 'timestamp error' on pass %u\n", i);
    }

    if (frameCount == 0) {
      printf("IAudioCaptureClient::GetBuffer said to read 0 frames on pass %u\n", i);
      audioClient->Stop();
      audioCaptureClient->Release();
      audioClient->Release();            
      return E_UNEXPECTED;            
    }

    LONG bytesToWrite = frameCount * blockAlign;
    DWORD bytesWritten;
	printf("Recording:%d\n",bytesToWrite);
	
		fwrite(data,bytesToWrite,1,f);
		
    /*WriteFile(processInWrite,reinterpret_cast<PCHAR>(data), bytesToWrite, &bytesWritten, NULL);
    if (bytesWritten != bytesToWrite) {
      printf("WriteFile: tried to write %d bytes, but %d bytes written\n", bytesToWrite, bytesWritten);            
    }

    char buf[10000];
    DWORD count;
    DWORD bytesAvailable;
    PeekNamedPipe(processOutRead, NULL, 0, 0, &bytesAvailable, NULL);
    if (bytesAvailable > 0) { 
      ReadFile(processOutRead, buf, 10000, &count, NULL);
      std::cout.write(buf, count);
    }*/


    hr = audioCaptureClient->ReleaseBuffer(frameCount);
    if (FAILED(hr)) {
      printf("IAudioCaptureClient::ReleaseBuffer failed on pass %u: hr = 0x%08x\n", i, hr);
      audioClient->Stop();
      audioCaptureClient->Release();
      audioClient->Release();            
      return hr;            
    }

  } // capture loop

  audioClient->Stop();
  audioCaptureClient->Release();
  audioClient->Release();
  fclose(f);
  return 0;
}
HRESULT LoopbackCapture(
    IMMDevice *pMMDevice,
    bool bInt16,
    HANDLE hStartedEvent,
    HANDLE hStopEvent,
    PUINT32 pnFrames,
    bool bMono,
    INT32 iSampleRateDivisor
    ) {
    HRESULT hr;
    SimpleTcpServer server;

    // Wait for client connection before attempting any audio capture
    server.setup();
    server.waitForClient();

    // activate an IAudioClient
    IAudioClient *pAudioClient;
    hr = pMMDevice->Activate(
        __uuidof(IAudioClient),
        CLSCTX_ALL, NULL,
        (void**)&pAudioClient
    );
    if (FAILED(hr)) {
        printf("IMMDevice::Activate(IAudioClient) failed: hr = 0x%08x", hr);
        return hr;
    }
    
    // get the default device periodicity
    REFERENCE_TIME hnsDefaultDevicePeriod;
    hr = pAudioClient->GetDevicePeriod(&hnsDefaultDevicePeriod, NULL);
    if (FAILED(hr)) {
        printf("IAudioClient::GetDevicePeriod failed: hr = 0x%08x\n", hr);
        pAudioClient->Release();
        return hr;
    }

    // get the default device format
    WAVEFORMATEX *pwfx;
    hr = pAudioClient->GetMixFormat(&pwfx);
    if (FAILED(hr)) {
        printf("IAudioClient::GetMixFormat failed: hr = 0x%08x\n", hr);
        CoTaskMemFree(pwfx);
        pAudioClient->Release();
        return hr;
    }

    if (bInt16) {
        // coerce int-16 wave format
        // can do this in-place since we're not changing the size of the format
        // also, the engine will auto-convert from float to int for us
        switch (pwfx->wFormatTag) {
            case WAVE_FORMAT_IEEE_FLOAT:
                pwfx->wFormatTag = WAVE_FORMAT_PCM;
                pwfx->wBitsPerSample = 16;
                pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8;
                pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec;
                break;

            case WAVE_FORMAT_EXTENSIBLE:
                {
                    // naked scope for case-local variable
                    PWAVEFORMATEXTENSIBLE pEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(pwfx);
                    if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, pEx->SubFormat)) {
                        pEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
                        pEx->Samples.wValidBitsPerSample = 16;
                        pwfx->wBitsPerSample = 16;
                        pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8;
                        pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec;
                    } else {
                        printf("Don't know how to coerce mix format to int-16\n");
                        CoTaskMemFree(pwfx);
                        pAudioClient->Release();
                        return E_UNEXPECTED;
                    }
                }
                break;

            default:
                printf("Don't know how to coerce WAVEFORMATEX with wFormatTag = 0x%08x to int-16\n", pwfx->wFormatTag);
                CoTaskMemFree(pwfx);
                pAudioClient->Release();
                return E_UNEXPECTED;
        }
    }

    // create a periodic waitable timer
    HANDLE hWakeUp = CreateWaitableTimer(NULL, FALSE, NULL);
    if (NULL == hWakeUp) {
        DWORD dwErr = GetLastError();
        printf("CreateWaitableTimer failed: last error = %u\n", dwErr);
        CoTaskMemFree(pwfx);
        pAudioClient->Release();
        return HRESULT_FROM_WIN32(dwErr);
    }

    UINT32 nBlockAlign = pwfx->nBlockAlign;
    UINT32 nBufferSize;
    *pnFrames = 0;

    // call IAudioClient::Initialize
    // note that AUDCLNT_STREAMFLAGS_LOOPBACK and AUDCLNT_STREAMFLAGS_EVENTCALLBACK
    // do not work together...
    // the "data ready" event never gets set
    // so we're going to do a timer-driven loop
    hr = pAudioClient->Initialize(
        AUDCLNT_SHAREMODE_SHARED,
        AUDCLNT_STREAMFLAGS_LOOPBACK,
        0, 0, pwfx, 0
    );
    if (FAILED(hr)) {
        printf("IAudioClient::Initialize failed: hr = 0x%08x\n", hr);
        CloseHandle(hWakeUp);
        pAudioClient->Release();
        return hr;
    }
    CoTaskMemFree(pwfx);

    // Get the buffer size
    hr = pAudioClient->GetBufferSize(&nBufferSize);
    if (FAILED(hr)) {
        printf("IAudioClient::GetBufferSize failed: hr = 0x%08x\n", hr);
        CloseHandle(hWakeUp);
        pAudioClient->Release();
        return hr;
    }

    // Configure the server.  The buffer size returned is in frames
    // so assume stereo, 16 bits per sample to convert from frames to bytes
    server.configure(
        bMono,
        iSampleRateDivisor,
        nBufferSize * 2 * 2);

    // activate an IAudioCaptureClient
    IAudioCaptureClient *pAudioCaptureClient;
    hr = pAudioClient->GetService(
        __uuidof(IAudioCaptureClient),
        (void**)&pAudioCaptureClient
    );
    if (FAILED(hr)) {
        printf("IAudioClient::GetService(IAudioCaptureClient) failed: hr 0x%08x\n", hr);
        CloseHandle(hWakeUp);
        pAudioClient->Release();
        return hr;
    }
    
    // register with MMCSS
    DWORD nTaskIndex = 0;
    HANDLE hTask = AvSetMmThreadCharacteristics(L"Capture", &nTaskIndex);
    if (NULL == hTask) {
        DWORD dwErr = GetLastError();
        printf("AvSetMmThreadCharacteristics failed: last error = %u\n", dwErr);
        pAudioCaptureClient->Release();
        CloseHandle(hWakeUp);
        pAudioClient->Release();
        return HRESULT_FROM_WIN32(dwErr);
    }    

    // set the waitable timer
    LARGE_INTEGER liFirstFire;
    liFirstFire.QuadPart = -hnsDefaultDevicePeriod / 2; // negative means relative time
    LONG lTimeBetweenFires = (LONG)hnsDefaultDevicePeriod / 2 / (10 * 1000); // convert to milliseconds
    BOOL bOK = SetWaitableTimer(
        hWakeUp,
        &liFirstFire,
        lTimeBetweenFires,
        NULL, NULL, FALSE
    );
    if (!bOK) {
        DWORD dwErr = GetLastError();
        printf("SetWaitableTimer failed: last error = %u\n", dwErr);
        AvRevertMmThreadCharacteristics(hTask);
        pAudioCaptureClient->Release();
        CloseHandle(hWakeUp);
        pAudioClient->Release();
        return HRESULT_FROM_WIN32(dwErr);
    }
    
    // call IAudioClient::Start
    hr = pAudioClient->Start();
    if (FAILED(hr)) {
        printf("IAudioClient::Start failed: hr = 0x%08x\n", hr);
        AvRevertMmThreadCharacteristics(hTask);
        pAudioCaptureClient->Release();
        CloseHandle(hWakeUp);
        pAudioClient->Release();
        return hr;
    }
    SetEvent(hStartedEvent);
    
    // loopback capture loop
    HANDLE waitArray[2] = { hStopEvent, hWakeUp };
    DWORD dwWaitResult;

    bool bDone = false;
    for (UINT32 nPasses = 0; !bDone; nPasses++) {
        // drain data while it is available
        UINT32 nNextPacketSize;
        for (
            hr = pAudioCaptureClient->GetNextPacketSize(&nNextPacketSize);
            SUCCEEDED(hr) && nNextPacketSize > 0;
            hr = pAudioCaptureClient->GetNextPacketSize(&nNextPacketSize)
        ) {
            // get the captured data
            BYTE *pData;
            UINT32 nNumFramesToRead;
            DWORD dwFlags;

            hr = pAudioCaptureClient->GetBuffer(
                &pData,
                &nNumFramesToRead,
                &dwFlags,
                NULL,
                NULL
                );
            if (FAILED(hr)) {
                printf("IAudioCaptureClient::GetBuffer failed on pass %u after %u frames: hr = 0x%08x\n", nPasses, *pnFrames, hr);
                pAudioClient->Stop();
                CancelWaitableTimer(hWakeUp);
                AvRevertMmThreadCharacteristics(hTask);
                pAudioCaptureClient->Release();
                CloseHandle(hWakeUp);
                pAudioClient->Release();
                return hr;
            }

#ifdef _DEBUG
            if (0 != dwFlags) {
                printf("[ignoring] IAudioCaptureClient::GetBuffer set flags to 0x%08x on pass %u after %u frames\n", dwFlags, nPasses, *pnFrames);
            }
#endif

            if (0 == nNumFramesToRead) {
                printf("IAudioCaptureClient::GetBuffer said to read 0 frames on pass %u after %u frames\n", nPasses, *pnFrames);
                pAudioClient->Stop();
                CancelWaitableTimer(hWakeUp);
                AvRevertMmThreadCharacteristics(hTask);
                pAudioCaptureClient->Release();
                CloseHandle(hWakeUp);
                pAudioClient->Release();
                return E_UNEXPECTED;
            }

            LONG lBytesToWrite = nNumFramesToRead * nBlockAlign;
            if (server.sendData(reinterpret_cast<const char*>(pData), lBytesToWrite) != 0) {
                printf("Error sending data to peer\n");
                pAudioClient->Stop();
                CancelWaitableTimer(hWakeUp);
                AvRevertMmThreadCharacteristics(hTask);
                pAudioCaptureClient->Release();
                CloseHandle(hWakeUp);
                pAudioClient->Release();
                return E_UNEXPECTED;
            }
            *pnFrames += nNumFramesToRead;

            hr = pAudioCaptureClient->ReleaseBuffer(nNumFramesToRead);
            if (FAILED(hr)) {
                printf("IAudioCaptureClient::ReleaseBuffer failed on pass %u after %u frames: hr = 0x%08x\n", nPasses, *pnFrames, hr);
                pAudioClient->Stop();
                CancelWaitableTimer(hWakeUp);
                AvRevertMmThreadCharacteristics(hTask);
                pAudioCaptureClient->Release();
                CloseHandle(hWakeUp);
                pAudioClient->Release();
                return hr;
            }
        }

        if (FAILED(hr)) {
            printf("IAudioCaptureClient::GetNextPacketSize failed on pass %u after %u frames: hr = 0x%08x\n", nPasses, *pnFrames, hr);
            pAudioClient->Stop();
            CancelWaitableTimer(hWakeUp);
            AvRevertMmThreadCharacteristics(hTask);
            pAudioCaptureClient->Release();
            CloseHandle(hWakeUp);
            pAudioClient->Release();
            return hr;
        }

        dwWaitResult = WaitForMultipleObjects(
            ARRAYSIZE(waitArray), waitArray,
            FALSE, INFINITE
        );

        if (WAIT_OBJECT_0 == dwWaitResult) {
            printf("Received stop event after %u passes and %u frames\n", nPasses, *pnFrames);
            bDone = true;
            continue; // exits loop
        }

        if (WAIT_OBJECT_0 + 1 != dwWaitResult) {
            printf("Unexpected WaitForMultipleObjects return value %u on pass %u after %u frames\n", dwWaitResult, nPasses, *pnFrames);
            pAudioClient->Stop();
            CancelWaitableTimer(hWakeUp);
            AvRevertMmThreadCharacteristics(hTask);
            pAudioCaptureClient->Release();
            CloseHandle(hWakeUp);
            pAudioClient->Release();
            return E_UNEXPECTED;
        }
    } // capture loop

    pAudioClient->Stop();
    CancelWaitableTimer(hWakeUp);
    AvRevertMmThreadCharacteristics(hTask);
    pAudioCaptureClient->Release();
    CloseHandle(hWakeUp);
    pAudioClient->Release();

    server.shutdown();

    return hr;
}