HRESULT CMpcAudioRenderer::InitCoopLevel() { HRESULT hr = S_OK; IVideoWindow* pVideoWindow = NULL; HWND hWnd = NULL; CComBSTR bstrCaption; hr = m_pGraph->QueryInterface (__uuidof(IVideoWindow), (void**) &pVideoWindow); if (SUCCEEDED (hr)) { pVideoWindow->get_Owner((long*)&hWnd); SAFE_RELEASE (pVideoWindow); } if (!hWnd) { hWnd = GetTopWindow(NULL); } ATLASSERT(hWnd != NULL); if (!useWASAPI) hr = m_pDS->SetCooperativeLevel(hWnd, DSSCL_PRIORITY); else if (hTask == NULL) { // Ask MMCSS to temporarily boost the thread priority // to reduce glitches while the low-latency stream plays. DWORD taskIndex = 0; hTask = AvSetMmThreadCharacteristics(TEXT("Pro Audio"), &taskIndex); hr=GetLastError(); if (hTask == NULL) return hr; } return hr; }
void WWThreadCharacteristics::Setup(void) { HRESULT hr = S_OK; if (WWSTTNone != m_schedulerTaskType) { // マルチメディアクラススケジューラーサービスのスレッド特性設定。 dprintf("D: %s() AvSetMmThreadCharacteristics(%S)\n", __FUNCTION__, WWSchedulerTaskTypeToStr(m_schedulerTaskType)); m_mmcssHandle = AvSetMmThreadCharacteristics(WWSchedulerTaskTypeToStr(m_schedulerTaskType), &m_mmcssTaskIndex); if (nullptr == m_mmcssHandle) { dprintf("Failed to enable MMCSS on render thread: 0x%08x\n", GetLastError()); m_mmcssTaskIndex = 0; m_result.avSetMmThreadCharacteristicsResult = false; } else { m_result.avSetMmThreadCharacteristicsResult = true; } if (m_result.avSetMmThreadCharacteristicsResult && WWTPNone != m_threadPriority) { // スレッド優先度設定。 dprintf("D: %s() AvSetMmThreadPriority(%S)\n", __FUNCTION__, WWMMThreadPriorityTypeToStr(m_threadPriority)); assert(m_mmcssHandle != nullptr); m_result.avSetMmThreadPriorityResult = !!AvSetMmThreadPriority(m_mmcssHandle, WWMMThreadPriorityTypeToAvrtPriority(m_threadPriority)); } } if (WWMMCSSDoNotCall != m_mmcssCallType) { // MMCSSの有効、無効の設定。 hr = DwmEnableMMCSS(m_mmcssCallType==WWMMCSSEnable); dprintf("D: %s() DwmEnableMMCSS(%d) 0x%08x\n", __FUNCTION__, (int)(m_mmcssCallType==WWMMCSSEnable), hr); // 失敗することがあるが、続行する。 m_result.dwmEnableMMCSSResult = hr; } }
void propagateWithRawCurrentFormat(WAVEFORMATEX *toThis) { WAVEFORMATEX *pwfx; IMMDevice *pMMDevice; IAudioClient *pAudioClient; HANDLE hTask; DWORD nTaskIndex = 0; hTask = AvSetMmThreadCharacteristics(L"Capture", &nTaskIndex); HRESULT hr = get_default_device(&pMMDevice); if (FAILED(hr)) { assert(false); } // activate an (the default, for us, since we want loopback) IAudioClient hr = pMMDevice->Activate( __uuidof(IAudioClient), CLSCTX_ALL, NULL, (void**)&pAudioClient ); if (FAILED(hr)) { ShowOutput("IMMDevice::Activate(IAudioClient) failed: hr = 0x%08x", hr); assert(false); } hr = pAudioClient->GetMixFormat(&pwfx); if (FAILED(hr)) { ShowOutput("IAudioClient::GetMixFormat failed: hr = 0x%08x\n", hr); CoTaskMemFree(pwfx); pAudioClient->Release(); assert(false); } pAudioClient->Stop(); AvRevertMmThreadCharacteristics(hTask); pAudioClient->Release(); pMMDevice->Release(); memcpy(toThis, pwfx, sizeof(WAVEFORMATEX)); CoTaskMemFree(pwfx); }
DWORD CWASAPICapture::DoCaptureThread() { bool stillPlaying = true; HANDLE waitArray[2] = {_ShutdownEvent, _StreamSwitchEvent}; HANDLE mmcssHandle = NULL; DWORD mmcssTaskIndex = 0; HRESULT hr = CoInitializeEx(NULL, COINIT_MULTITHREADED); PersistentAssert(SUCCEEDED(hr), "CoInitializeEx failed"); if (!DisableMMCSS) { mmcssHandle = AvSetMmThreadCharacteristics("Audio", &mmcssTaskIndex); PersistentAssert(mmcssHandle != NULL, "AvSetMmThreadCharacteristics failed"); } while (stillPlaying) { HRESULT hr; // // In Timer Driven mode, we want to wait for half the desired latency in milliseconds. // // That way we'll wake up half way through the processing period to pull the // next set of samples from the engine. // DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, _EngineLatencyInMS / 2); switch (waitResult) { case WAIT_OBJECT_0 + 0: // _ShutdownEvent stillPlaying = false; // We're done, exit the loop. break; case WAIT_OBJECT_0 + 1: // _StreamSwitchEvent PersistentSignalError("StreamSwitch event unexpected"); stillPlaying = false; break; case WAIT_TIMEOUT: // Timeout // // We need to retrieve the next buffer of samples from the audio capturer. // BYTE *pData; UINT32 framesAvailable; DWORD flags; // // Find out how much capture data is available. We need to make sure we don't run over the length // of our capture buffer. We'll discard any samples that don't fit in the buffer. // UINT64 CaptureStartTime; hr = _CaptureClient->GetBuffer(&pData, &framesAvailable, &flags, NULL, &CaptureStartTime); if (SUCCEEDED(hr)) { UINT32 framesToCopy = min(framesAvailable, static_cast<UINT32>((_CaptureBufferSize - _CurrentCaptureIndex) / _FrameSize)); const UINT BytesToCopy = framesToCopy * _FrameSize; if (framesToCopy != 0) { // // The flags on capture tell us information about the data. // // We only really care about the silent flag since we want to put frames of silence into the buffer // when we receive silence. We rely on the fact that a logical bit 0 is silence for both float and int formats. // if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { // // Fill 0s from the capture buffer to the output buffer. // ZeroMemory(&_CaptureBuffer[_CurrentCaptureIndex], BytesToCopy); } else { // // Copy data from the audio engine buffer to the output buffer. // CopyMemory(&_CaptureBuffer[_CurrentCaptureIndex], pData, BytesToCopy); } // // Bump the capture buffer pointer. // if(_Compressor == NULL) { _CurrentCaptureIndex += BytesToCopy; } } hr = _CaptureClient->ReleaseBuffer(framesAvailable); PersistentAssert(SUCCEEDED(hr), "_CaptureClient->ReleaseBuffer failed"); if(_Compressor && framesToCopy != 0) { _Compressor->AudioSample32Bit2Channel((float *)_CaptureBuffer, framesToCopy, CaptureStartTime); } } break; } } if (!DisableMMCSS) { AvRevertMmThreadCharacteristics(mmcssHandle); } CoUninitialize(); return 0; }
void OBS::MainAudioLoop() { const unsigned int audioSamplesPerSec = App->GetSampleRateHz(); const unsigned int audioSampleSize = audioSamplesPerSec/100; DWORD taskID = 0; HANDLE hTask = AvSetMmThreadCharacteristics(TEXT("Pro Audio"), &taskID); bufferedAudioTimes.Clear(); bPushToTalkOn = false; micMax = desktopMax = VOL_MIN; micPeak = desktopPeak = VOL_MIN; UINT audioFramesSinceMeterUpdate = 0; UINT audioFramesSinceMicMaxUpdate = 0; UINT audioFramesSinceDesktopMaxUpdate = 0; List<float> mixBuffer, levelsBuffer; mixBuffer.SetSize(audioSampleSize*2); levelsBuffer.SetSize(audioSampleSize*2); latestAudioTime = 0; //--------------------------------------------- // the audio loop of doom while (true) { OSSleep(5); //screw it, just run it every 5ms if (!bRunning) break; //----------------------------------------------- float *desktopBuffer, *micBuffer; curDesktopVol = desktopVol * desktopBoost; if (bUsingPushToTalk) curMicVol = bPushToTalkOn ? micVol : 0.0f; else curMicVol = micVol; curMicVol *= micBoost; bool bDesktopMuted = (curDesktopVol < EPSILON); bool bMicEnabled = (micAudio != NULL); while (QueryNewAudio()) { QWORD timestamp = bufferedAudioTimes[0]; bufferedAudioTimes.Remove(0); zero(mixBuffer.Array(), audioSampleSize*2*sizeof(float)); zero(levelsBuffer.Array(), audioSampleSize*2*sizeof(float)); //---------------------------------------------------------------------------- // get latest sample for calculating the volume levels float *latestDesktopBuffer = NULL, *latestMicBuffer = NULL; desktopAudio->GetBuffer(&desktopBuffer, timestamp); desktopAudio->GetNewestFrame(&latestDesktopBuffer); if (micAudio != NULL) { micAudio->GetBuffer(&micBuffer, timestamp); micAudio->GetNewestFrame(&latestMicBuffer); } //---------------------------------------------------------------------------- // mix desktop samples if (desktopBuffer) MixAudio(mixBuffer.Array(), desktopBuffer, audioSampleSize*2, false); if (latestDesktopBuffer) MixAudio(levelsBuffer.Array(), latestDesktopBuffer, audioSampleSize*2, false); //---------------------------------------------------------------------------- // get latest aux volume level samples and mix OSEnterMutex(hAuxAudioMutex); for (UINT i=0; i<auxAudioSources.Num(); i++) { float *latestAuxBuffer; if(auxAudioSources[i]->GetNewestFrame(&latestAuxBuffer)) MixAudio(levelsBuffer.Array(), latestAuxBuffer, audioSampleSize*2, false); } //---------------------------------------------------------------------------- // mix output aux sound samples with the desktop for (UINT i=0; i<auxAudioSources.Num(); i++) { float *auxBuffer; if(auxAudioSources[i]->GetBuffer(&auxBuffer, timestamp)) MixAudio(mixBuffer.Array(), auxBuffer, audioSampleSize*2, false); } OSLeaveMutex(hAuxAudioMutex); //---------------------------------------------------------------------------- // multiply samples by volume and compute RMS and max of samples // Use 1.0f instead of curDesktopVol, since aux audio sources already have their volume set, and shouldn't be boosted anyway. float desktopRMS = 0, micRMS = 0, desktopMx = 0, micMx = 0; if (latestDesktopBuffer) CalculateVolumeLevels(levelsBuffer.Array(), audioSampleSize*2, 1.0f, desktopRMS, desktopMx); if (bMicEnabled && latestMicBuffer) CalculateVolumeLevels(latestMicBuffer, audioSampleSize*2, curMicVol, micRMS, micMx); //---------------------------------------------------------------------------- // convert RMS and Max of samples to dB desktopRMS = toDB(desktopRMS); micRMS = toDB(micRMS); desktopMx = toDB(desktopMx); micMx = toDB(micMx); //---------------------------------------------------------------------------- // update max if sample max is greater or after 1 second float maxAlpha = 0.15f; UINT peakMeterDelayFrames = audioSamplesPerSec * 3; if (micMx > micMax) micMax = micMx; else micMax = maxAlpha * micMx + (1.0f - maxAlpha) * micMax; if(desktopMx > desktopMax) desktopMax = desktopMx; else desktopMax = maxAlpha * desktopMx + (1.0f - maxAlpha) * desktopMax; //---------------------------------------------------------------------------- // update delayed peak meter if (micMax > micPeak || audioFramesSinceMicMaxUpdate > peakMeterDelayFrames) { micPeak = micMax; audioFramesSinceMicMaxUpdate = 0; } else { audioFramesSinceMicMaxUpdate += audioSampleSize; } if (desktopMax > desktopPeak || audioFramesSinceDesktopMaxUpdate > peakMeterDelayFrames) { desktopPeak = desktopMax; audioFramesSinceDesktopMaxUpdate = 0; } else { audioFramesSinceDesktopMaxUpdate += audioSampleSize; } //---------------------------------------------------------------------------- // low pass the level sampling float rmsAlpha = 0.15f; desktopMag = rmsAlpha * desktopRMS + desktopMag * (1.0f - rmsAlpha); micMag = rmsAlpha * micRMS + micMag * (1.0f - rmsAlpha); //---------------------------------------------------------------------------- // update the meter about every 50ms audioFramesSinceMeterUpdate += audioSampleSize; if (audioFramesSinceMeterUpdate >= (audioSampleSize*5)) { PostMessage(hwndMain, WM_COMMAND, MAKEWPARAM(ID_MICVOLUMEMETER, VOLN_METERED), 0); audioFramesSinceMeterUpdate = 0; } //---------------------------------------------------------------------------- // mix mic and desktop sound // also, it's perfectly fine to just mix into the returned buffer if (bMicEnabled && micBuffer) MixAudio(mixBuffer.Array(), micBuffer, audioSampleSize*2, bForceMicMono); EncodeAudioSegment(mixBuffer.Array(), audioSampleSize, timestamp); } //----------------------------------------------- if (!bRecievedFirstAudioFrame && pendingAudioFrames.Num()) bRecievedFirstAudioFrame = true; } desktopMag = desktopMax = desktopPeak = VOL_MIN; micMag = micMax = micPeak = VOL_MIN; PostMessage(hwndMain, WM_COMMAND, MAKEWPARAM(ID_MICVOLUMEMETER, VOLN_METERED), 0); for (UINT i=0; i<pendingAudioFrames.Num(); i++) pendingAudioFrames[i].audioData.Clear(); AvRevertMmThreadCharacteristics(hTask); }
/// <summary> /// Audio capture thread. Captures audio data in a loop until it is signaled to stop. /// </summary> /// <returns>Non-zero if thread ended successfully, zero in case of failure</returns> DWORD WINAPI KinectAudioStream::CaptureThread() { HANDLE mmHandle = NULL; DWORD mmTaskIndex = 0; HRESULT hr = S_OK; bool bContinue = true; BYTE *pbOutputBuffer = NULL; CStaticMediaBuffer outputBuffer; DMO_OUTPUT_DATA_BUFFER OutputBufferStruct = {0}; OutputBufferStruct.pBuffer = &outputBuffer; DWORD dwStatus = 0; ULONG cbProduced = 0; // Set high priority to avoid getting preempted while capturing sound mmHandle = AvSetMmThreadCharacteristics("Audio", &mmTaskIndex); while (bContinue) { if (WaitForSingleObject(m_hStopEvent, 0) == WAIT_OBJECT_0) { bContinue = false; continue; } do { outputBuffer.Init(0); OutputBufferStruct.dwStatus = 0; hr = m_pKinectDmo->ProcessOutput(0, 1, &OutputBufferStruct, &dwStatus); if (FAILED(hr)) { bContinue = false; break; } if (hr == S_FALSE) { cbProduced = 0; } else { outputBuffer.GetBufferAndLength(&pbOutputBuffer, &cbProduced); } // Queue audio data to be read by IStream client if (cbProduced > 0) { QueueCapturedData(pbOutputBuffer, cbProduced); } } while (OutputBufferStruct.dwStatus & DMO_OUTPUT_DATA_BUFFERF_INCOMPLETE); Sleep(10); //sleep 10ms } SetEvent(m_hDataReady); AvRevertMmThreadCharacteristics(mmHandle); if (FAILED(hr)) { return 0; } return 1; }
// we only call this once...per hit of the play button :) HRESULT LoopbackCaptureSetup() { assert(shouldStop); // duplicate starts would be odd... shouldStop = false; // allow graphs to restart, if they so desire... pnFrames = 0; bool bInt16 = true; // makes it actually work, for some reason...LODO HRESULT hr; hr = get_default_device(&m_pMMDevice); // so it can re-place our pointer... if (FAILED(hr)) { return hr; } // tell it to not overflow one buffer's worth <sigh> not sure if this is right or not, and thus we don't "cache" or "buffer" more than that much currently... // but a buffer size is a buffer size...hmm...as long as we keep it small though... assert(expectedMaxBufferSize <= pBufOriginalSize); // activate an (the default, for us, since we want loopback) IAudioClient hr = m_pMMDevice->Activate( __uuidof(IAudioClient), CLSCTX_ALL, NULL, (void**)&pAudioClient ); if (FAILED(hr)) { ShowOutput("IMMDevice::Activate(IAudioClient) failed: hr = 0x%08x", hr); return hr; } // get the default device periodicity, why? I don't know... REFERENCE_TIME hnsDefaultDevicePeriod; hr = pAudioClient->GetDevicePeriod(&hnsDefaultDevicePeriod, NULL); if (FAILED(hr)) { ShowOutput("IAudioClient::GetDevicePeriod failed: hr = 0x%08x\n", hr); pAudioClient->Release(); return hr; } // get the default device format (incoming...) WAVEFORMATEX *pwfx; // incoming wave... // apparently propogated by GetMixFormat... hr = pAudioClient->GetMixFormat(&pwfx); if (FAILED(hr)) { ShowOutput("IAudioClient::GetMixFormat failed: hr = 0x%08x\n", hr); CoTaskMemFree(pwfx); pAudioClient->Release(); return hr; } if (true /*bInt16*/) { // coerce int-16 wave format // can do this in-place since we're not changing the size of the format // also, the engine will auto-convert from float to int for us switch (pwfx->wFormatTag) { case WAVE_FORMAT_IEEE_FLOAT: assert(false);// we never get here...I hope... pwfx->wFormatTag = WAVE_FORMAT_PCM; pwfx->wBitsPerSample = 16; pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8; pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec; break; case WAVE_FORMAT_EXTENSIBLE: { // naked scope for case-local variable PWAVEFORMATEXTENSIBLE pEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(pwfx); if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, pEx->SubFormat)) { // WE GET HERE! pEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM; pEx->Samples.wValidBitsPerSample = 16; pwfx->wBitsPerSample = 16; pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8; pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec; /* scawah lodo... if(ifNotNullThenJustSetTypeOnly) { PWAVEFORMATEXTENSIBLE pEx2 = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(ifNotNullThenJustSetTypeOnly); pEx2->SubFormat = pEx->SubFormat; pEx2->Samples.wValidBitsPerSample = pEx->Samples.wValidBitsPerSample; } */ } else { ShowOutput("Don't know how to coerce mix format to int-16\n"); CoTaskMemFree(pwfx); pAudioClient->Release(); return E_UNEXPECTED; } } break; default: ShowOutput("Don't know how to coerce WAVEFORMATEX with wFormatTag = 0x%08x to int-16\n", pwfx->wFormatTag); CoTaskMemFree(pwfx); pAudioClient->Release(); return E_UNEXPECTED; } } /* scawah setting stream types up to match...didn't seem to work well... if(ifNotNullThenJustSetTypeOnly) { // pwfx is set at this point... WAVEFORMATEX* pwfex = ifNotNullThenJustSetTypeOnly; // copy them all out as the possible format...hmm... pwfx->wFormatTag = WAVE_FORMAT_PCM; pwfx->wBitsPerSample = 16; pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8; pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec; pwfex->wFormatTag = pwfx->wFormatTag; pwfex->nChannels = pwfx->nChannels; pwfex->nSamplesPerSec = pwfx->nSamplesPerSec; pwfex->wBitsPerSample = pwfx->wBitsPerSample; pwfex->nBlockAlign = pwfx->nBlockAlign; pwfex->nAvgBytesPerSec = pwfx->nAvgBytesPerSec; pwfex->cbSize = pwfx->cbSize; //FILE *fp = fopen("/normal2", "w"); // fails on me? maybe juts a VLC thing... //fShowOutput(fp, "hello world %d %d %d %d %d %d %d", pwfex->wFormatTag, pwfex->nChannels, // pwfex->nSamplesPerSec, pwfex->wBitsPerSample, pwfex->nBlockAlign, pwfex->nAvgBytesPerSec, pwfex->cbSize ); //fclose(fp); // cleanup // I might be leaking here... CoTaskMemFree(pwfx); pAudioClient->Release(); //m_pMMDevice->Release(); return hr; }*/ MMCKINFO ckRIFF = {0}; MMCKINFO ckData = {0}; nBlockAlign = pwfx->nBlockAlign; // avoid stuttering on close // http://social.msdn.microsoft.com/forums/en-US/windowspro-audiodevelopment/thread/c7ba0a04-46ce-43ff-ad15-ce8932c00171/ //IAudioClient *pAudioClient = NULL; //IAudioCaptureClient *pCaptureClient = NULL; IMMDeviceEnumerator *pEnumerator = NULL; IMMDevice *pDevice = NULL; IAudioRenderClient *pRenderClient = NULL; WAVEFORMATEXTENSIBLE *captureDataFormat = NULL; BYTE *captureData; REFERENCE_TIME hnsRequestedDuration = REFTIMES_PER_SEC; hr = CoCreateInstance( CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void**)&pEnumerator); EXIT_ON_ERROR(hr) hr = pEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &pDevice); EXIT_ON_ERROR(hr) hr = pDevice->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&pAudioClient); EXIT_ON_ERROR(hr) hr = pAudioClient->GetMixFormat((WAVEFORMATEX **)&captureDataFormat); EXIT_ON_ERROR(hr) // Silence: initialise in sharedmode [this is the "silence" bug overwriter, so buffer doesn't matter as much...] hr = pAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED, 0, REFTIMES_PER_SEC, // buffer size a full 1.0s, though prolly doesn't matter here. 0, pwfx, NULL); EXIT_ON_ERROR(hr) // get the frame count UINT32 bufferFrameCount; hr = pAudioClient->GetBufferSize(&bufferFrameCount); EXIT_ON_ERROR(hr) // create a render client hr = pAudioClient->GetService(IID_IAudioRenderClient, (void**)&pRenderClient); EXIT_ON_ERROR(hr) // get the buffer hr = pRenderClient->GetBuffer(bufferFrameCount, &captureData); EXIT_ON_ERROR(hr) // release it hr = pRenderClient->ReleaseBuffer(bufferFrameCount, AUDCLNT_BUFFERFLAGS_SILENT); EXIT_ON_ERROR(hr) // release the audio client pAudioClient->Release(); EXIT_ON_ERROR(hr) // create a new IAudioClient hr = pDevice->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&pAudioClient); EXIT_ON_ERROR(hr) // -============================ now the sniffing code initialization stuff, direct from mauritius... =================================== // call IAudioClient::Initialize // note that AUDCLNT_STREAMFLAGS_LOOPBACK and AUDCLNT_STREAMFLAGS_EVENTCALLBACK // do not work together... // the "data ready" event never gets set // so we're going to have to do this in a timer-driven loop... hr = pAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_LOOPBACK, REFTIMES_PER_SEC, // buffer size a full 1.0s, seems ok VLC 0, pwfx, 0 ); if (FAILED(hr)) { ShowOutput("IAudioClient::Initialize failed: hr = 0x%08x\n", hr); pAudioClient->Release(); return hr; } CoTaskMemFree(pwfx); // activate an IAudioCaptureClient hr = pAudioClient->GetService( __uuidof(IAudioCaptureClient), (void**)&pAudioCaptureClient // CARE INSTANTIATION ); if (FAILED(hr)) { ShowOutput("IAudioClient::GetService(IAudioCaptureClient) failed: hr 0x%08x\n", hr); pAudioClient->Release(); return hr; } // register with MMCSS DWORD nTaskIndex = 0; hTask = AvSetMmThreadCharacteristics(L"Capture", &nTaskIndex); if (NULL == hTask) { DWORD dwErr = GetLastError(); ShowOutput("AvSetMmThreadCharacteristics failed: last error = %u\n", dwErr); pAudioCaptureClient->Release(); pAudioClient->Release(); return HRESULT_FROM_WIN32(dwErr); } // call IAudioClient::Start hr = pAudioClient->Start(); if (FAILED(hr)) { ShowOutput("IAudioClient::Start failed: hr = 0x%08x\n", hr); AvRevertMmThreadCharacteristics(hTask); pAudioCaptureClient->Release(); pAudioClient->Release(); return hr; } bFirstPacket = true; // start the forever grabbing thread... DWORD dwThreadID; m_hThread = CreateThread(NULL, 0, propagateBufferForever, 0, 0, &dwThreadID); if(!m_hThread) { DWORD dwErr = GetLastError(); return HRESULT_FROM_WIN32(dwErr); } else { // we...shouldn't need this...maybe? // seems to make no difference... hr = SetThreadPriority(m_hThread, THREAD_PRIORITY_TIME_CRITICAL); if (FAILED(hr)) { // of course we always want to be a high prio thread, right? [we don't use much cpu...] return hr; } } return hr; } // end LoopbackCaptureSetup
void OBS::MainAudioLoop() { DWORD taskID = 0; HANDLE hTask = AvSetMmThreadCharacteristics(TEXT("Pro Audio"), &taskID); bPushToTalkOn = false; micMax = desktopMax = VOL_MIN; micPeak = desktopPeak = VOL_MIN; UINT audioFramesSinceMeterUpdate = 0; UINT audioFramesSinceMicMaxUpdate = 0; UINT audioFramesSinceDesktopMaxUpdate = 0; List<float> mixedLatestDesktopSamples; List<float> blank10msSample; blank10msSample.SetSize(882); QWORD lastAudioTime = 0; while(TRUE) { OSSleep(5); //screw it, just run it every 5ms if(!bRunning) break; //----------------------------------------------- float *desktopBuffer, *micBuffer; UINT desktopAudioFrames = 0, micAudioFrames = 0; UINT latestDesktopAudioFrames = 0, latestMicAudioFrames = 0; curDesktopVol = desktopVol * desktopBoost; if(bUsingPushToTalk) curMicVol = bPushToTalkOn ? micVol : 0.0f; else curMicVol = micVol; curMicVol *= micBoost; bool bDesktopMuted = (curDesktopVol < EPSILON); bool bMicEnabled = (micAudio != NULL); QWORD timestamp; while(QueryNewAudio(timestamp)) { if (!lastAudioTime) lastAudioTime = App->GetSceneTimestamp(); if (lastAudioTime < timestamp) { while ((lastAudioTime+=10) < timestamp) EncodeAudioSegment(blank10msSample.Array(), 441, lastAudioTime); } //---------------------------------------------------------------------------- // get latest sample for calculating the volume levels float *latestDesktopBuffer = NULL, *latestMicBuffer = NULL; desktopAudio->GetBuffer(&desktopBuffer, &desktopAudioFrames, timestamp-10); desktopAudio->GetNewestFrame(&latestDesktopBuffer, &latestDesktopAudioFrames); UINT totalFloats = desktopAudioFrames*2; if(bDesktopMuted) { // Clearing the desktop audio buffer before mixing in the auxiliary audio sources. zero(desktopBuffer, sizeof(*desktopBuffer)*totalFloats); } if(micAudio != NULL) { micAudio->GetBuffer(&micBuffer, &micAudioFrames, timestamp-10); micAudio->GetNewestFrame(&latestMicBuffer, &latestMicAudioFrames); } //---------------------------------------------------------------------------- // get latest aux volume level samples and mix OSEnterMutex(hAuxAudioMutex); mixedLatestDesktopSamples.CopyArray(latestDesktopBuffer, latestDesktopAudioFrames*2); for(UINT i=0; i<auxAudioSources.Num(); i++) { float *latestAuxBuffer; if(auxAudioSources[i]->GetNewestFrame(&latestAuxBuffer, &latestDesktopAudioFrames)) MixAudio(mixedLatestDesktopSamples.Array(), latestAuxBuffer, latestDesktopAudioFrames*2, false); } //---------------------------------------------------------------------------- // mix output aux sound samples with the desktop for(UINT i=0; i<auxAudioSources.Num(); i++) { float *auxBuffer; if(auxAudioSources[i]->GetBuffer(&auxBuffer, &desktopAudioFrames, timestamp-10)) MixAudio(desktopBuffer, auxBuffer, desktopAudioFrames*2, false); } OSLeaveMutex(hAuxAudioMutex); //---------------------------------------------------------------------------- //UINT totalFloats = desktopAudioFrames*2; //---------------------------------------------------------------------------- /*multiply samples by volume and compute RMS and max of samples*/ float desktopRMS = 0, micRMS = 0, desktopMx = 0, micMx = 0; // Use 1.0f instead of curDesktopVol, since aux audio sources already have their volume set, and shouldn't be boosted anyway. if(latestDesktopBuffer) CalculateVolumeLevels(mixedLatestDesktopSamples.Array(), latestDesktopAudioFrames*2, 1.0f, desktopRMS, desktopMx); if(bMicEnabled && latestMicBuffer) CalculateVolumeLevels(latestMicBuffer, latestMicAudioFrames*2, curMicVol, micRMS, micMx); /*convert RMS and Max of samples to dB*/ desktopRMS = toDB(desktopRMS); micRMS = toDB(micRMS); desktopMx = toDB(desktopMx); micMx = toDB(micMx); /* update max if sample max is greater or after 1 second */ float maxAlpha = 0.15f; UINT peakMeterDelayFrames = 44100 * 3; if(micMx > micMax) { micMax = micMx; } else { micMax = maxAlpha * micMx + (1.0f - maxAlpha) * micMax; } if(desktopMx > desktopMax) { desktopMax = desktopMx; } else { desktopMax = maxAlpha * desktopMx + (1.0f - maxAlpha) * desktopMax; } /*update delayed peak meter*/ if(micMax > micPeak || audioFramesSinceMicMaxUpdate > peakMeterDelayFrames) { micPeak = micMax; audioFramesSinceMicMaxUpdate = 0; } else { audioFramesSinceMicMaxUpdate += desktopAudioFrames; } if(desktopMax > desktopPeak || audioFramesSinceDesktopMaxUpdate > peakMeterDelayFrames) { desktopPeak = desktopMax; audioFramesSinceDesktopMaxUpdate = 0; } else { audioFramesSinceDesktopMaxUpdate += desktopAudioFrames; } /*low pass the level sampling*/ float rmsAlpha = 0.15f; desktopMag = rmsAlpha * desktopRMS + desktopMag * (1.0f - rmsAlpha); micMag = rmsAlpha * micRMS + micMag * (1.0f - rmsAlpha); /*update the meter about every 50ms*/ audioFramesSinceMeterUpdate += desktopAudioFrames; if(audioFramesSinceMeterUpdate >= 2205) { PostMessage(hwndMain, WM_COMMAND, MAKEWPARAM(ID_MICVOLUMEMETER, VOLN_METERED), 0); audioFramesSinceMeterUpdate = 0; } //---------------------------------------------------------------------------- // mix mic and desktop sound, using SSE2 if available // also, it's perfectly fine to just mix into the returned buffer if(bMicEnabled) MixAudio(desktopBuffer, micBuffer, totalFloats, bForceMicMono); EncodeAudioSegment(desktopBuffer, totalFloats>>1, lastAudioTime); } //----------------------------------------------- if(!bRecievedFirstAudioFrame && pendingAudioFrames.Num()) bRecievedFirstAudioFrame = true; } desktopMag = desktopMax = desktopPeak = VOL_MIN; micMag = micMax = micPeak = VOL_MIN; PostMessage(hwndMain, WM_COMMAND, MAKEWPARAM(ID_MICVOLUMEMETER, VOLN_METERED), 0); for(UINT i=0; i<pendingAudioFrames.Num(); i++) pendingAudioFrames[i].audioData.Clear(); AvRevertMmThreadCharacteristics(hTask); }
void LoopbackCaptureFor(IMMDevice* mmDevice, std::string filename, int secs) { // open new file MMIOINFO mi = { 0 }; // some flags cause mmioOpen write to this buffer // but not any that we're using std::wstring wsFilename(filename.begin(), filename.end()); // mmioOpen wants a wstring HMMIO file = mmioOpen(const_cast<LPWSTR>(wsFilename.c_str()), &mi, MMIO_WRITE | MMIO_CREATE); time_t startTime = time(nullptr); // activate an IAudioClient IAudioClient* audioClient; HRESULT hr = mmDevice->Activate(__uuidof(IAudioClient), CLSCTX_ALL, nullptr, (void**)&audioClient); if (FAILED(hr)) { fprintf(stderr, "IMMDevice::Activate(IAudioClient) failed: hr = 0x%08x", hr); return; } // get the default device periodicity REFERENCE_TIME hnsDefaultDevicePeriod; hr = audioClient->GetDevicePeriod(&hnsDefaultDevicePeriod, nullptr); if (FAILED(hr)) { fprintf(stderr, "IAudioClient::GetDevicePeriod failed: hr = 0x%08x\n", hr); audioClient->Release(); return; } // get the default device format WAVEFORMATEX* waveform; hr = audioClient->GetMixFormat(&waveform); if (FAILED(hr)) { fprintf(stderr, "IAudioClient::GetMixFormat failed: hr = 0x%08x\n", hr); CoTaskMemFree(waveform); audioClient->Release(); return; } // coerce int-16 wave format // can do this in-place since we're not changing the size of the format // also, the engine will auto-convert from float to int for us switch (waveform->wFormatTag) { case WAVE_FORMAT_IEEE_FLOAT: waveform->wFormatTag = WAVE_FORMAT_PCM; waveform->wBitsPerSample = BITS_PER_SAMPLE; waveform->nBlockAlign = BLOCK_ALIGN; waveform->nAvgBytesPerSec = BYTE_RATE; break; case WAVE_FORMAT_EXTENSIBLE: { // naked scope for case-local variable PWAVEFORMATEXTENSIBLE pEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(waveform); if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, pEx->SubFormat)) { pEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM; pEx->Samples.wValidBitsPerSample = BITS_PER_SAMPLE; waveform->wBitsPerSample = BITS_PER_SAMPLE; waveform->nBlockAlign = waveform->nChannels * BYTE_PER_SAMPLE; waveform->nAvgBytesPerSec = waveform->nBlockAlign * waveform->nSamplesPerSec; } break; } } MMCKINFO ckRIFF = { 0 }; MMCKINFO ckData = { 0 }; hr = WriteWaveHeader(file, waveform, &ckRIFF, &ckData); // create a periodic waitable timer HANDLE hWakeUp = CreateWaitableTimer(nullptr, FALSE, nullptr); UINT32 nBlockAlign = waveform->nBlockAlign; // call IAudioClient::Initialize // note that AUDCLNT_STREAMFLAGS_LOOPBACK and AUDCLNT_STREAMFLAGS_EVENTCALLBACK // do not work together... // the "data ready" event never gets set // so we're going to do a timer-driven loop hr = audioClient->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_LOOPBACK, 0, 0, waveform, 0); if (FAILED(hr)) { fprintf(stderr, "IAudioClient::Initialize failed: hr = 0x%08x\n", hr); CloseHandle(hWakeUp); audioClient->Release(); return; } // free up waveform CoTaskMemFree(waveform); // activate an IAudioCaptureClient IAudioCaptureClient* audioCaptureClient; hr = audioClient->GetService(__uuidof(IAudioCaptureClient), (void**)&audioCaptureClient); // register with MMCSS DWORD nTaskIndex = 0; HANDLE hTask = AvSetMmThreadCharacteristics(L"Capture", &nTaskIndex); if (hTask == nullptr) { DWORD dwErr = GetLastError(); fprintf(stderr, "AvSetMmThreadCharacteristics failed: last error = %u\n", dwErr); audioCaptureClient->Release(); CloseHandle(hWakeUp); audioClient->Release(); return; } // set the waitable timer LARGE_INTEGER liFirstFire; liFirstFire.QuadPart = -hnsDefaultDevicePeriod / 2; // negative means relative time LONG lTimeBetweenFires = (LONG)hnsDefaultDevicePeriod / 2 / (10 * 1000); // convert to milliseconds if (!SetWaitableTimer(hWakeUp, &liFirstFire, lTimeBetweenFires, nullptr, nullptr, FALSE)) { DWORD dwErr = GetLastError(); fprintf(stderr, "SetWaitableTimer failed: last error = %u\n", dwErr); AvRevertMmThreadCharacteristics(hTask); audioCaptureClient->Release(); CloseHandle(hWakeUp); audioClient->Release(); return; } // call IAudioClient::Start hr = audioClient->Start(); // loopback capture loop DWORD dwWaitResult; UINT32 frames = 0; for (UINT32 passes = 0; ; passes++) { // drain data while it is available UINT32 nextPacketSize; for (hr = audioCaptureClient->GetNextPacketSize(&nextPacketSize); SUCCEEDED(hr) && nextPacketSize > 0; hr = audioCaptureClient->GetNextPacketSize(&nextPacketSize)) { // get the captured data BYTE* data; UINT32 framesToRead; DWORD dwFlags; hr = audioCaptureClient->GetBuffer(&data, &framesToRead, &dwFlags, nullptr, nullptr); if (FAILED(hr)) { fprintf(stderr, "IAudioCaptureClient::GetBuffer failed on pass %u after %u frames: hr = 0x%08x\n", passes, frames, hr); audioClient->Stop(); CancelWaitableTimer(hWakeUp); AvRevertMmThreadCharacteristics(hTask); audioCaptureClient->Release(); CloseHandle(hWakeUp); audioClient->Release(); return; } // this type of error seems to happen often, ignore it if (dwFlags == AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY) ; else if (dwFlags != 0) { fprintf(stderr, "IAudioCaptureClient::GetBuffer set flags to 0x%08x on pass %u after %u frames\n", dwFlags, passes, frames); audioClient->Stop(); CancelWaitableTimer(hWakeUp); AvRevertMmThreadCharacteristics(hTask); audioCaptureClient->Release(); CloseHandle(hWakeUp); audioClient->Release(); return; } if (framesToRead == 0) { fprintf(stderr, "IAudioCaptureClient::GetBuffer said to read 0 frames on pass %u after %u frames\n", passes, frames); audioClient->Stop(); CancelWaitableTimer(hWakeUp); AvRevertMmThreadCharacteristics(hTask); audioCaptureClient->Release(); CloseHandle(hWakeUp); audioClient->Release(); return; } LONG lBytesToWrite = framesToRead * nBlockAlign; #pragma prefast(suppress: __WARNING_INCORRECT_ANNOTATION, "IAudioCaptureClient::GetBuffer SAL annotation implies a 1-byte buffer") LONG lBytesWritten = mmioWrite(file, reinterpret_cast<PCHAR>(data), lBytesToWrite); if (lBytesToWrite != lBytesWritten) { fprintf(stderr, "mmioWrite wrote %u bytes on pass %u after %u frames: expected %u bytes\n", lBytesWritten, passes, frames, lBytesToWrite); audioClient->Stop(); CancelWaitableTimer(hWakeUp); AvRevertMmThreadCharacteristics(hTask); audioCaptureClient->Release(); CloseHandle(hWakeUp); audioClient->Release(); return; } frames += framesToRead; hr = audioCaptureClient->ReleaseBuffer(framesToRead); } dwWaitResult = WaitForSingleObject(hWakeUp, INFINITE); if (time(nullptr) - startTime > secs) break; } FinishWaveFile(file, &ckData, &ckRIFF); audioClient->Stop(); CancelWaitableTimer(hWakeUp); AvRevertMmThreadCharacteristics(hTask); audioCaptureClient->Release(); CloseHandle(hWakeUp); audioClient->Release(); // everything went well... fixup the fact chunk in the file MMRESULT result = mmioClose(file, 0); file = nullptr; if (result != MMSYSERR_NOERROR) { fprintf(stderr, "mmioClose failed: MMSYSERR = %u\n", result); return; } // reopen the file in read/write mode mi = { 0 }; file = mmioOpen(const_cast<LPWSTR>(wsFilename.c_str()), &mi, MMIO_READWRITE); if (file == nullptr) { fprintf(stderr, "mmioOpen(\"%ls\", ...) failed. wErrorRet == %u\n", filename, mi.wErrorRet); return; } // descend into the RIFF/WAVE chunk ckRIFF = { 0 }; ckRIFF.ckid = MAKEFOURCC('W', 'A', 'V', 'E'); // this is right for mmioDescend result = mmioDescend(file, &ckRIFF, nullptr, MMIO_FINDRIFF); if (result != MMSYSERR_NOERROR) { fprintf(stderr, "mmioDescend(\"WAVE\") failed: MMSYSERR = %u\n", result); return; } // descend into the fact chunk MMCKINFO ckFact = { 0 }; ckFact.ckid = MAKEFOURCC('f', 'a', 'c', 't'); result = mmioDescend(file, &ckFact, &ckRIFF, MMIO_FINDCHUNK); if (result != MMSYSERR_NOERROR) { fprintf(stderr, "mmioDescend(\"fact\") failed: MMSYSERR = %u\n", result); return; } // write the correct data to the fact chunk LONG lBytesWritten = mmioWrite(file, reinterpret_cast<PCHAR>(&frames), sizeof(frames)); if (lBytesWritten != sizeof(frames)) { fprintf(stderr, "Updating the fact chunk wrote %u bytes; expected %u\n", lBytesWritten, (UINT32)sizeof(frames)); return; } // ascend out of the fact chunk result = mmioAscend(file, &ckFact, 0); if (result != MMSYSERR_NOERROR) fprintf(stderr, "mmioAscend(\"fact\") failed: MMSYSERR = %u\n", result); }
//HRESULT LoopbackCapture( // IMMDevice *pMMDevice, // bool bInt16, // HANDLE hStartedEvent, // HANDLE hStopEvent, // PUINT32 pnFrames, // HMMIO hFile, // AudioBuffer *pBuffer //) HRESULT LoopbackCapture::Process() { HRESULT hr; // activate an IAudioClient IAudioClient *pAudioClient; hr = pMMDevice->Activate( __uuidof(IAudioClient), CLSCTX_ALL, NULL, (void**)&pAudioClient ); if (FAILED(hr)) { printf("IMMDevice::Activate(IAudioClient) failed: hr = 0x%08x", hr); return hr; } // get the default device periodicity REFERENCE_TIME hnsDefaultDevicePeriod; hr = pAudioClient->GetDevicePeriod(&hnsDefaultDevicePeriod, NULL); if (FAILED(hr)) { printf("IAudioClient::GetDevicePeriod failed: hr = 0x%08x\n", hr); pAudioClient->Release(); return hr; } // get the default device format WAVEFORMATEX *pwfx; hr = pAudioClient->GetMixFormat(&pwfx); if (FAILED(hr)) { printf("IAudioClient::GetMixFormat failed: hr = 0x%08x\n", hr); CoTaskMemFree(pwfx); pAudioClient->Release(); return hr; } if (pwfx->wFormatTag == WAVE_FORMAT_EXTENSIBLE) { PWAVEFORMATEXTENSIBLE pEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(pwfx); //pEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM; printf("WAVE_FORMAT_EXTENSIBLE\n"); if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, pEx->SubFormat)) { printf("float\n"); }// else if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_PCM, pEx->SubFormat)) { printf("PCM\n"); }//KSDATAFORMAT_SUBTYPE_WAVEFORMATEX else if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_WAVEFORMATEX, pEx->SubFormat)) { printf("WAVEFORMATEX\n"); } } if (bInt16) { // coerce int-16 wave format // can do this in-place since we're not changing the size of the format // also, the engine will auto-convert from float to int for us switch (pwfx->wFormatTag) { case WAVE_FORMAT_IEEE_FLOAT: pwfx->wFormatTag = WAVE_FORMAT_PCM; pwfx->wBitsPerSample = 16; pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8; pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec; break; case WAVE_FORMAT_EXTENSIBLE: { // naked scope for case-local variable PWAVEFORMATEXTENSIBLE pEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(pwfx); if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, pEx->SubFormat)) { pEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM; pEx->Samples.wValidBitsPerSample = 16; pwfx->wBitsPerSample = 16; pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8; pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec; } else { printf("Don't know how to coerce mix format to int-16\n"); CoTaskMemFree(pwfx); pAudioClient->Release(); return E_UNEXPECTED; } } break; default: printf("Don't know how to coerce WAVEFORMATEX with wFormatTag = 0x%08x to int-16\n", pwfx->wFormatTag); CoTaskMemFree(pwfx); pAudioClient->Release(); return E_UNEXPECTED; } } MMCKINFO ckRIFF = {0}; MMCKINFO ckData = {0}; if (hFile!=NULL) hr = WriteWaveHeader(hFile, pwfx, &ckRIFF, &ckData); if (pBuffer) { bool isFloat = false; switch (pwfx->wFormatTag) { case WAVE_FORMAT_IEEE_FLOAT: isFloat = true; break; case WAVE_FORMAT_EXTENSIBLE: { // naked scope for case-local variable PWAVEFORMATEXTENSIBLE pEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(pwfx); if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, pEx->SubFormat)) { isFloat = true; } } break; default: break; } pBuffer->SetAudioInfo(pwfx->nSamplesPerSec,pwfx->nBlockAlign,pwfx->nChannels,pwfx->wBitsPerSample,isFloat); } if (FAILED(hr)) { // WriteWaveHeader does its own logging CoTaskMemFree(pwfx); pAudioClient->Release(); return hr; } // create a periodic waitable timer HANDLE hWakeUp = CreateWaitableTimer(NULL, FALSE, NULL); if (NULL == hWakeUp) { DWORD dwErr = GetLastError(); printf("CreateWaitableTimer failed: last error = %u\n", dwErr); CoTaskMemFree(pwfx); pAudioClient->Release(); return HRESULT_FROM_WIN32(dwErr); } UINT32 nBlockAlign = pwfx->nBlockAlign; UINT32 nChannels = pwfx->nChannels; nFrames = 0; // call IAudioClient::Initialize // note that AUDCLNT_STREAMFLAGS_LOOPBACK and AUDCLNT_STREAMFLAGS_EVENTCALLBACK // do not work together... // the "data ready" event never gets set // so we're going to do a timer-driven loop hr = pAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_LOOPBACK, 0, 0, pwfx, 0 ); if (FAILED(hr)) { printf("IAudioClient::Initialize failed: hr = 0x%08x\n", hr); CloseHandle(hWakeUp); pAudioClient->Release(); return hr; } CoTaskMemFree(pwfx); // activate an IAudioCaptureClient IAudioCaptureClient *pAudioCaptureClient; hr = pAudioClient->GetService( __uuidof(IAudioCaptureClient), (void**)&pAudioCaptureClient ); if (FAILED(hr)) { printf("IAudioClient::GetService(IAudioCaptureClient) failed: hr 0x%08x\n", hr); CloseHandle(hWakeUp); pAudioClient->Release(); return hr; } // register with MMCSS DWORD nTaskIndex = 0; HANDLE hTask = AvSetMmThreadCharacteristics(L"Capture", &nTaskIndex); if (NULL == hTask) { DWORD dwErr = GetLastError(); printf("AvSetMmThreadCharacteristics failed: last error = %u\n", dwErr); pAudioCaptureClient->Release(); CloseHandle(hWakeUp); pAudioClient->Release(); return HRESULT_FROM_WIN32(dwErr); } // set the waitable timer LARGE_INTEGER liFirstFire; liFirstFire.QuadPart = -hnsDefaultDevicePeriod / 2; // negative means relative time LONG lTimeBetweenFires = (LONG)hnsDefaultDevicePeriod / 2 / (10 * 1000); // convert to milliseconds BOOL bOK = SetWaitableTimer( hWakeUp, &liFirstFire, lTimeBetweenFires, NULL, NULL, FALSE ); if (!bOK) { DWORD dwErr = GetLastError(); printf("SetWaitableTimer failed: last error = %u\n", dwErr); AvRevertMmThreadCharacteristics(hTask); pAudioCaptureClient->Release(); CloseHandle(hWakeUp); pAudioClient->Release(); return HRESULT_FROM_WIN32(dwErr); } // call IAudioClient::Start hr = pAudioClient->Start(); if (FAILED(hr)) { printf("IAudioClient::Start failed: hr = 0x%08x\n", hr); AvRevertMmThreadCharacteristics(hTask); pAudioCaptureClient->Release(); CloseHandle(hWakeUp); pAudioClient->Release(); return hr; } SetEvent(hStartedEvent); // loopback capture loop HANDLE waitArray[2] = { hStopEvent, hWakeUp }; DWORD dwWaitResult; DWORD immdState; bool bDone = false; bool bFirstPacket = true; for (UINT32 nPasses = 0; !bDone; nPasses++) { dwWaitResult = WaitForMultipleObjects( ARRAYSIZE(waitArray), waitArray, FALSE, INFINITE ); if (WAIT_OBJECT_0 == dwWaitResult) { //printf("Received stop event after %u passes and %u frames\n", nPasses, nFrames); bDone = true; continue; // exits loop } if (WAIT_OBJECT_0 + 1 != dwWaitResult) { printf("Unexpected WaitForMultipleObjects return value %u on pass %u after %u frames\n", dwWaitResult, nPasses, nFrames); pAudioClient->Stop(); CancelWaitableTimer(hWakeUp); AvRevertMmThreadCharacteristics(hTask); pAudioCaptureClient->Release(); CloseHandle(hWakeUp); pAudioClient->Release(); return E_UNEXPECTED; } printf("'"); // got a "wake up" event - see if there's data UINT32 nNextPacketSize; hr = pAudioCaptureClient->GetNextPacketSize(&nNextPacketSize); if (FAILED(hr)) { if (hr == AUDCLNT_E_SERVICE_NOT_RUNNING) printf("AUDCLNT_E_SERVICE_NOT_RUNNING : \n"); else if (hr == AUDCLNT_E_DEVICE_INVALIDATED) printf("AUDCLNT_E_DEVICE_INVALIDATED : \n"); else printf("UNKNOWN ERROR!!! : \n"); printf("IAudioCaptureClient::GetNextPacketSize failed on pass %u after %u frames: hr = 0x%08x\n", nPasses, nFrames, hr); pAudioClient->Stop(); CancelWaitableTimer(hWakeUp); AvRevertMmThreadCharacteristics(hTask); pAudioCaptureClient->Release(); CloseHandle(hWakeUp); pAudioClient->Release(); return hr; } if (0 == nNextPacketSize) { // no data yet continue; } // get the captured data BYTE *pData; UINT32 nNumFramesToRead; DWORD dwFlags; hr = pAudioCaptureClient->GetBuffer( &pData, &nNumFramesToRead, &dwFlags, NULL, NULL ); if (FAILED(hr)) { printf("IAudioCaptureClient::GetBuffer failed on pass %u after %u frames: hr = 0x%08x\n", nPasses, nFrames, hr); pAudioClient->Stop(); CancelWaitableTimer(hWakeUp); AvRevertMmThreadCharacteristics(hTask); pAudioCaptureClient->Release(); CloseHandle(hWakeUp); pAudioClient->Release(); return hr; } if (bFirstPacket && AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY == dwFlags) { printf("Probably spurious glitch reported on first packet\n"); } else if (dwFlags & AUDCLNT_BUFFERFLAGS_SILENT) { printf("#"); } else if (dwFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY) { printf("!"); } else if (0 != dwFlags) { printf("IAudioCaptureClient::GetBuffer set flags to 0x%08x on pass %u after %u frames\n", dwFlags, nPasses, nFrames); pAudioClient->Stop(); CancelWaitableTimer(hWakeUp); AvRevertMmThreadCharacteristics(hTask); pAudioCaptureClient->Release(); CloseHandle(hWakeUp); pAudioClient->Release(); return E_UNEXPECTED; } if (0 == nNumFramesToRead) { // no data yet continue; } //if (0 == nNumFramesToRead) { // printf("IAudioCaptureClient::GetBuffer said to read 0 frames on pass %u after %u frames\n", nPasses, nFrames); // pAudioClient->Stop(); // CancelWaitableTimer(hWakeUp); // AvRevertMmThreadCharacteristics(hTask); // pAudioCaptureClient->Release(); // CloseHandle(hWakeUp); // pAudioClient->Release(); // return E_UNEXPECTED; //} LONG lBytesToWrite = nNumFramesToRead * nBlockAlign; #pragma prefast(suppress: __WARNING_INCORRECT_ANNOTATION, "IAudioCaptureClient::GetBuffer SAL annotation implies a 1-byte buffer") if (hFile!=NULL) { LONG lBytesWritten = mmioWrite(hFile, reinterpret_cast<PCHAR>(pData), lBytesToWrite); if (lBytesToWrite != lBytesWritten) { printf("mmioWrite wrote %u bytes on pass %u after %u frames: expected %u bytes\n", lBytesWritten, nPasses, nFrames, lBytesToWrite); pAudioClient->Stop(); CancelWaitableTimer(hWakeUp); AvRevertMmThreadCharacteristics(hTask); pAudioCaptureClient->Release(); CloseHandle(hWakeUp); pAudioClient->Release(); return E_UNEXPECTED; } } if (pBuffer) { //switch (nBlockAlign/nChannels) //{ //case 1: // ShowPCM((unsigned char*)pData,nNumFramesToRead,nChannels,1024,60,"SYS_Byte"); // break; //case 2: // ShowPCM((short*)pData,nNumFramesToRead,nChannels,1024,60,"SYS_Short"); // break; //case 4: // ShowPCM((int*)pData,nNumFramesToRead,nChannels,1024,60,"SYS_Int"); // //ShowPCM((float*)pData,nNumFramesToRead,nChannels,1024,60,"SYS_float"); // break; //} pBuffer->PushBuffer(pData,lBytesToWrite); } nFrames += nNumFramesToRead; hr = pAudioCaptureClient->ReleaseBuffer(nNumFramesToRead); if (FAILED(hr)) { printf("IAudioCaptureClient::ReleaseBuffer failed on pass %u after %u frames: hr = 0x%08x\n", nPasses, nFrames, hr); pAudioClient->Stop(); CancelWaitableTimer(hWakeUp); AvRevertMmThreadCharacteristics(hTask); pAudioCaptureClient->Release(); CloseHandle(hWakeUp); pAudioClient->Release(); return hr; } bFirstPacket = false; } // capture loop if (hFile!=NULL) hr = FinishWaveFile(hFile, &ckData, &ckRIFF); if (FAILED(hr)) { // FinishWaveFile does it's own logging pAudioClient->Stop(); CancelWaitableTimer(hWakeUp); AvRevertMmThreadCharacteristics(hTask); pAudioCaptureClient->Release(); CloseHandle(hWakeUp); pAudioClient->Release(); return hr; } pAudioClient->Stop(); CancelWaitableTimer(hWakeUp); AvRevertMmThreadCharacteristics(hTask); pAudioCaptureClient->Release(); CloseHandle(hWakeUp); pAudioClient->Release(); return hr; }
DWORD CWASAPICapture::DoCaptureThread() { HANDLE mmcssHandle = NULL; IXAudio2* xaudio = 0; IXAudio2MasteringVoice* mastering_voice = 0; IXAudio2SourceVoice* source_voice = 0; try { bool stillPlaying = true; DWORD mmcssTaskIndex = 0; HRESULT hr = CoInitializeEx(NULL, COINIT_MULTITHREADED); if (FAILED(hr)) { printf_s("Unable to initialize COM in render thread: %x\n", hr); return hr; } mmcssHandle = AvSetMmThreadCharacteristics(L"Audio", &mmcssTaskIndex); if (mmcssHandle == NULL) { printf_s("Unable to enable MMCSS on capture thread: %d\n", GetLastError()); } // // XAudioの初期化 // { UINT32 flags = 0; #ifdef _DEBUG flags |= XAUDIO2_DEBUG_ENGINE; #endif if( FAILED( hr = XAudio2Create( &xaudio, flags ) ) ) throw "XAudio2Create"; // Create a mastering voice if( FAILED( hr = xaudio->CreateMasteringVoice( &mastering_voice ) ) ) throw "CreateMasteringVoice"; // WAVファイルのWAVEFORMATEXを使ってSourceVoiceを作成 if( FAILED( xaudio->CreateSourceVoice( &source_voice, MixFormat() ) ) ) throw "CreateSourceVoice"; // 再生 source_voice->Start(); } while (stillPlaying) { HRESULT hr; // // In Timer Driven mode, we want to wait for half the desired latency in milliseconds. // // That way we'll wake up half way through the processing period to pull the // next set of samples from the engine. // DWORD waitResult = WaitForSingleObject(_ShutdownEvent, _EngineLatencyInMS / 2); switch (waitResult) { case WAIT_OBJECT_0 + 0: // _ShutdownEvent stillPlaying = false; // We're done, exit the loop. break; case WAIT_TIMEOUT: // Timeout // // We need to retrieve the next buffer of samples from the audio capturer. // BYTE *pData; UINT32 framesAvailable; DWORD flags; // // Find out how much capture data is available. We need to make sure we don't run over the length // of our capture buffer. We'll discard any samples that don't fit in the buffer. // hr = _CaptureClient->GetBuffer(&pData, &framesAvailable, &flags, NULL, NULL); if (SUCCEEDED(hr)) { UINT32 framesToCopy = min(framesAvailable, static_cast<UINT32>((_CaptureBufferSize - _CurrentCaptureIndex) / _FrameSize)); if (framesToCopy != 0) { // // The flags on capture tell us information about the data. // // We only really care about the silent flag since we want to put frames of silence into the buffer // when we receive silence. We rely on the fact that a logical bit 0 is silence for both float and int formats. // if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { // // Fill 0s from the capture buffer to the output buffer. // ZeroMemory(&_CaptureBuffer[_CurrentCaptureIndex], framesToCopy*_FrameSize); } else { // // Copy data from the audio engine buffer to the output buffer. // CopyMemory(&_CaptureBuffer[_CurrentCaptureIndex], pData, framesToCopy*_FrameSize); // SourceVoiceにデータを送信 XAUDIO2_BUFFER buffer = { 0 }; buffer.AudioBytes = framesToCopy * _FrameSize; //バッファのバイト数 buffer.pAudioData = &pData[ 0 ]; //バッファの先頭アドレス source_voice->SubmitSourceBuffer( &buffer ); } // // Bump the capture buffer pointer. // _CurrentCaptureIndex += framesToCopy*_FrameSize; } hr = _CaptureClient->ReleaseBuffer(framesAvailable); if (FAILED(hr)) { printf_s("Unable to release capture buffer: %x!\n", hr); } } break; } } } catch( const char* e ) { std::cout << e << std::endl; } // Cleanup XAudio2 if( mastering_voice != 0 ) { // ここで落ちる //mastering_voice->DestroyVoice(); mastering_voice = 0; } if( xaudio != 0 ) { // ここでも落ちる //xaudio->Release(); xaudio = 0; } AvRevertMmThreadCharacteristics(mmcssHandle); CoUninitialize(); return 0; }
HRESULT LoopbackCapture( IMMDevice *pMMDevice, bool bInt16, HANDLE hStartedEvent, HANDLE hStopEvent, PUINT32 pnFrames, bool bMono, INT32 iSampleRateDivisor ) { HRESULT hr; SimpleTcpServer server; // Wait for client connection before attempting any audio capture server.setup(); server.waitForClient(); // activate an IAudioClient IAudioClient *pAudioClient; hr = pMMDevice->Activate( __uuidof(IAudioClient), CLSCTX_ALL, NULL, (void**)&pAudioClient ); if (FAILED(hr)) { printf("IMMDevice::Activate(IAudioClient) failed: hr = 0x%08x", hr); return hr; } // get the default device periodicity REFERENCE_TIME hnsDefaultDevicePeriod; hr = pAudioClient->GetDevicePeriod(&hnsDefaultDevicePeriod, NULL); if (FAILED(hr)) { printf("IAudioClient::GetDevicePeriod failed: hr = 0x%08x\n", hr); pAudioClient->Release(); return hr; } // get the default device format WAVEFORMATEX *pwfx; hr = pAudioClient->GetMixFormat(&pwfx); if (FAILED(hr)) { printf("IAudioClient::GetMixFormat failed: hr = 0x%08x\n", hr); CoTaskMemFree(pwfx); pAudioClient->Release(); return hr; } if (bInt16) { // coerce int-16 wave format // can do this in-place since we're not changing the size of the format // also, the engine will auto-convert from float to int for us switch (pwfx->wFormatTag) { case WAVE_FORMAT_IEEE_FLOAT: pwfx->wFormatTag = WAVE_FORMAT_PCM; pwfx->wBitsPerSample = 16; pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8; pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec; break; case WAVE_FORMAT_EXTENSIBLE: { // naked scope for case-local variable PWAVEFORMATEXTENSIBLE pEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(pwfx); if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, pEx->SubFormat)) { pEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM; pEx->Samples.wValidBitsPerSample = 16; pwfx->wBitsPerSample = 16; pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8; pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec; } else { printf("Don't know how to coerce mix format to int-16\n"); CoTaskMemFree(pwfx); pAudioClient->Release(); return E_UNEXPECTED; } } break; default: printf("Don't know how to coerce WAVEFORMATEX with wFormatTag = 0x%08x to int-16\n", pwfx->wFormatTag); CoTaskMemFree(pwfx); pAudioClient->Release(); return E_UNEXPECTED; } } // create a periodic waitable timer HANDLE hWakeUp = CreateWaitableTimer(NULL, FALSE, NULL); if (NULL == hWakeUp) { DWORD dwErr = GetLastError(); printf("CreateWaitableTimer failed: last error = %u\n", dwErr); CoTaskMemFree(pwfx); pAudioClient->Release(); return HRESULT_FROM_WIN32(dwErr); } UINT32 nBlockAlign = pwfx->nBlockAlign; UINT32 nBufferSize; *pnFrames = 0; // call IAudioClient::Initialize // note that AUDCLNT_STREAMFLAGS_LOOPBACK and AUDCLNT_STREAMFLAGS_EVENTCALLBACK // do not work together... // the "data ready" event never gets set // so we're going to do a timer-driven loop hr = pAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_LOOPBACK, 0, 0, pwfx, 0 ); if (FAILED(hr)) { printf("IAudioClient::Initialize failed: hr = 0x%08x\n", hr); CloseHandle(hWakeUp); pAudioClient->Release(); return hr; } CoTaskMemFree(pwfx); // Get the buffer size hr = pAudioClient->GetBufferSize(&nBufferSize); if (FAILED(hr)) { printf("IAudioClient::GetBufferSize failed: hr = 0x%08x\n", hr); CloseHandle(hWakeUp); pAudioClient->Release(); return hr; } // Configure the server. The buffer size returned is in frames // so assume stereo, 16 bits per sample to convert from frames to bytes server.configure( bMono, iSampleRateDivisor, nBufferSize * 2 * 2); // activate an IAudioCaptureClient IAudioCaptureClient *pAudioCaptureClient; hr = pAudioClient->GetService( __uuidof(IAudioCaptureClient), (void**)&pAudioCaptureClient ); if (FAILED(hr)) { printf("IAudioClient::GetService(IAudioCaptureClient) failed: hr 0x%08x\n", hr); CloseHandle(hWakeUp); pAudioClient->Release(); return hr; } // register with MMCSS DWORD nTaskIndex = 0; HANDLE hTask = AvSetMmThreadCharacteristics(L"Capture", &nTaskIndex); if (NULL == hTask) { DWORD dwErr = GetLastError(); printf("AvSetMmThreadCharacteristics failed: last error = %u\n", dwErr); pAudioCaptureClient->Release(); CloseHandle(hWakeUp); pAudioClient->Release(); return HRESULT_FROM_WIN32(dwErr); } // set the waitable timer LARGE_INTEGER liFirstFire; liFirstFire.QuadPart = -hnsDefaultDevicePeriod / 2; // negative means relative time LONG lTimeBetweenFires = (LONG)hnsDefaultDevicePeriod / 2 / (10 * 1000); // convert to milliseconds BOOL bOK = SetWaitableTimer( hWakeUp, &liFirstFire, lTimeBetweenFires, NULL, NULL, FALSE ); if (!bOK) { DWORD dwErr = GetLastError(); printf("SetWaitableTimer failed: last error = %u\n", dwErr); AvRevertMmThreadCharacteristics(hTask); pAudioCaptureClient->Release(); CloseHandle(hWakeUp); pAudioClient->Release(); return HRESULT_FROM_WIN32(dwErr); } // call IAudioClient::Start hr = pAudioClient->Start(); if (FAILED(hr)) { printf("IAudioClient::Start failed: hr = 0x%08x\n", hr); AvRevertMmThreadCharacteristics(hTask); pAudioCaptureClient->Release(); CloseHandle(hWakeUp); pAudioClient->Release(); return hr; } SetEvent(hStartedEvent); // loopback capture loop HANDLE waitArray[2] = { hStopEvent, hWakeUp }; DWORD dwWaitResult; bool bDone = false; for (UINT32 nPasses = 0; !bDone; nPasses++) { // drain data while it is available UINT32 nNextPacketSize; for ( hr = pAudioCaptureClient->GetNextPacketSize(&nNextPacketSize); SUCCEEDED(hr) && nNextPacketSize > 0; hr = pAudioCaptureClient->GetNextPacketSize(&nNextPacketSize) ) { // get the captured data BYTE *pData; UINT32 nNumFramesToRead; DWORD dwFlags; hr = pAudioCaptureClient->GetBuffer( &pData, &nNumFramesToRead, &dwFlags, NULL, NULL ); if (FAILED(hr)) { printf("IAudioCaptureClient::GetBuffer failed on pass %u after %u frames: hr = 0x%08x\n", nPasses, *pnFrames, hr); pAudioClient->Stop(); CancelWaitableTimer(hWakeUp); AvRevertMmThreadCharacteristics(hTask); pAudioCaptureClient->Release(); CloseHandle(hWakeUp); pAudioClient->Release(); return hr; } #ifdef _DEBUG if (0 != dwFlags) { printf("[ignoring] IAudioCaptureClient::GetBuffer set flags to 0x%08x on pass %u after %u frames\n", dwFlags, nPasses, *pnFrames); } #endif if (0 == nNumFramesToRead) { printf("IAudioCaptureClient::GetBuffer said to read 0 frames on pass %u after %u frames\n", nPasses, *pnFrames); pAudioClient->Stop(); CancelWaitableTimer(hWakeUp); AvRevertMmThreadCharacteristics(hTask); pAudioCaptureClient->Release(); CloseHandle(hWakeUp); pAudioClient->Release(); return E_UNEXPECTED; } LONG lBytesToWrite = nNumFramesToRead * nBlockAlign; if (server.sendData(reinterpret_cast<const char*>(pData), lBytesToWrite) != 0) { printf("Error sending data to peer\n"); pAudioClient->Stop(); CancelWaitableTimer(hWakeUp); AvRevertMmThreadCharacteristics(hTask); pAudioCaptureClient->Release(); CloseHandle(hWakeUp); pAudioClient->Release(); return E_UNEXPECTED; } *pnFrames += nNumFramesToRead; hr = pAudioCaptureClient->ReleaseBuffer(nNumFramesToRead); if (FAILED(hr)) { printf("IAudioCaptureClient::ReleaseBuffer failed on pass %u after %u frames: hr = 0x%08x\n", nPasses, *pnFrames, hr); pAudioClient->Stop(); CancelWaitableTimer(hWakeUp); AvRevertMmThreadCharacteristics(hTask); pAudioCaptureClient->Release(); CloseHandle(hWakeUp); pAudioClient->Release(); return hr; } } if (FAILED(hr)) { printf("IAudioCaptureClient::GetNextPacketSize failed on pass %u after %u frames: hr = 0x%08x\n", nPasses, *pnFrames, hr); pAudioClient->Stop(); CancelWaitableTimer(hWakeUp); AvRevertMmThreadCharacteristics(hTask); pAudioCaptureClient->Release(); CloseHandle(hWakeUp); pAudioClient->Release(); return hr; } dwWaitResult = WaitForMultipleObjects( ARRAYSIZE(waitArray), waitArray, FALSE, INFINITE ); if (WAIT_OBJECT_0 == dwWaitResult) { printf("Received stop event after %u passes and %u frames\n", nPasses, *pnFrames); bDone = true; continue; // exits loop } if (WAIT_OBJECT_0 + 1 != dwWaitResult) { printf("Unexpected WaitForMultipleObjects return value %u on pass %u after %u frames\n", dwWaitResult, nPasses, *pnFrames); pAudioClient->Stop(); CancelWaitableTimer(hWakeUp); AvRevertMmThreadCharacteristics(hTask); pAudioCaptureClient->Release(); CloseHandle(hWakeUp); pAudioClient->Release(); return E_UNEXPECTED; } } // capture loop pAudioClient->Stop(); CancelWaitableTimer(hWakeUp); AvRevertMmThreadCharacteristics(hTask); pAudioCaptureClient->Release(); CloseHandle(hWakeUp); pAudioClient->Release(); server.shutdown(); return hr; }