JNIEXPORT jlong JNICALL Java_org_jitsi_impl_neomedia_jmfext_media_protocol_wasapi_WASAPI_IAudioClient_1IsFormatSupported (JNIEnv *env, jclass clazz, jlong thiz, jint shareMode, jlong pFormat) { HRESULT hr; WAVEFORMATEX *pFormat_ = (WAVEFORMATEX *) (intptr_t) pFormat; WAVEFORMATEX *pClosestMatch = NULL; hr = IAudioClient_IsFormatSupported( (IAudioClient *) (intptr_t) thiz, (AUDCLNT_SHAREMODE) shareMode, pFormat_, &pClosestMatch); switch (hr) { case S_OK: /* Succeeded and the IAudioClient supports the specified format. */ if (!pClosestMatch) pClosestMatch = pFormat_; break; case AUDCLNT_E_UNSUPPORTED_FORMAT: /* Succeeded but the specified format is not supported. */ case S_FALSE: /* Succeeded with a closest match to the specified format. */ break; default: WASAPI_throwNewHResultException(env, hr, __func__, __LINE__); } return (jlong) (intptr_t) pClosestMatch; }
static int try_format(struct wasapi_state *state, struct ao *const ao, int bits, int samplerate, const struct mp_chmap channels) { WAVEFORMATEXTENSIBLE wformat; set_format(&wformat, bits / 8, samplerate, channels.num, mp_chmap_to_waveext(&channels)); int af_format = format_set_bits(ao->format, bits, bits == 32); EnterCriticalSection(&state->print_lock); mp_msg(MSGT_AO, MSGL_V, "ao-wasapi: trying %dch %s @ %dhz\n", channels.num, af_fmt_to_str(af_format), samplerate); LeaveCriticalSection(&state->print_lock); union WAVEFMT u; u.extensible = &wformat; WAVEFORMATEX *closestMatch; HRESULT hr = IAudioClient_IsFormatSupported(state->pAudioClient, state->share_mode, u.ex, &closestMatch); if (closestMatch) { if (closestMatch->wFormatTag == WAVE_FORMAT_EXTENSIBLE) { u.ex = closestMatch; wformat = *u.extensible; } else { wformat.Format = *closestMatch; } CoTaskMemFree(closestMatch); } if (hr == S_FALSE) { if (set_ao_format(state, ao, wformat)) { EnterCriticalSection(&state->print_lock); mp_msg(MSGT_AO, MSGL_V, "ao-wasapi: accepted as %dch %s @ %dhz\n", ao->channels.num, af_fmt_to_str(ao->format), ao->samplerate); LeaveCriticalSection(&state->print_lock); return 1; } } if (hr == S_OK || (!state->opt_exclusive && hr == AUDCLNT_E_UNSUPPORTED_FORMAT)) { // AUDCLNT_E_UNSUPPORTED_FORMAT here means "works in shared, doesn't in exclusive" if (set_ao_format(state, ao, wformat)) { EnterCriticalSection(&state->print_lock); mp_msg(MSGT_AO, MSGL_V, "ao-wasapi: %dch %s @ %dhz accepted\n", ao->channels.num, af_fmt_to_str(af_format), samplerate); LeaveCriticalSection(&state->print_lock); return 1; } } return 0; }
static bool try_format_exclusive(struct ao *ao, WAVEFORMATEXTENSIBLE *wformat) { struct wasapi_state *state = ao->priv; MP_VERBOSE(ao, "Trying %s (exclusive)\n", waveformat_to_str(&wformat->Format)); HRESULT hr = IAudioClient_IsFormatSupported(state->pAudioClient, AUDCLNT_SHAREMODE_EXCLUSIVE, &wformat->Format, NULL); if (hr != AUDCLNT_E_UNSUPPORTED_FORMAT) EXIT_ON_ERROR(hr); return SUCCEEDED(hr); exit_label: MP_ERR(state, "Error testing exclusive format: %s\n", mp_HRESULT_to_str(hr)); return false; }
static void test_audioclient(void) { IAudioClient *ac; IUnknown *unk; HRESULT hr; ULONG ref; WAVEFORMATEX *pwfx, *pwfx2; REFERENCE_TIME t1, t2; HANDLE handle; hr = IMMDevice_Activate(dev, &IID_IAudioClient, CLSCTX_INPROC_SERVER, NULL, (void**)&ac); ok(hr == S_OK, "Activation failed with %08x\n", hr); if(hr != S_OK) return; handle = CreateEventW(NULL, FALSE, FALSE, NULL); hr = IAudioClient_QueryInterface(ac, &IID_IUnknown, NULL); ok(hr == E_POINTER, "QueryInterface(NULL) returned %08x\n", hr); unk = (void*)(LONG_PTR)0x12345678; hr = IAudioClient_QueryInterface(ac, &IID_NULL, (void**)&unk); ok(hr == E_NOINTERFACE, "QueryInterface(IID_NULL) returned %08x\n", hr); ok(!unk, "QueryInterface(IID_NULL) returned non-null pointer %p\n", unk); hr = IAudioClient_QueryInterface(ac, &IID_IUnknown, (void**)&unk); ok(hr == S_OK, "QueryInterface(IID_IUnknown) returned %08x\n", hr); if (unk) { ref = IUnknown_Release(unk); ok(ref == 1, "Released count is %u\n", ref); } hr = IAudioClient_QueryInterface(ac, &IID_IAudioClient, (void**)&unk); ok(hr == S_OK, "QueryInterface(IID_IAudioClient) returned %08x\n", hr); if (unk) { ref = IUnknown_Release(unk); ok(ref == 1, "Released count is %u\n", ref); } hr = IAudioClient_GetDevicePeriod(ac, NULL, NULL); ok(hr == E_POINTER, "Invalid GetDevicePeriod call returns %08x\n", hr); hr = IAudioClient_GetDevicePeriod(ac, &t1, NULL); ok(hr == S_OK, "Valid GetDevicePeriod call returns %08x\n", hr); hr = IAudioClient_GetDevicePeriod(ac, NULL, &t2); ok(hr == S_OK, "Valid GetDevicePeriod call returns %08x\n", hr); hr = IAudioClient_GetDevicePeriod(ac, &t1, &t2); ok(hr == S_OK, "Valid GetDevicePeriod call returns %08x\n", hr); trace("Returned periods: %u.%05u ms %u.%05u ms\n", (UINT)(t1/10000), (UINT)(t1 % 10000), (UINT)(t2/10000), (UINT)(t2 % 10000)); hr = IAudioClient_GetMixFormat(ac, NULL); ok(hr == E_POINTER, "GetMixFormat returns %08x\n", hr); hr = IAudioClient_GetMixFormat(ac, &pwfx); ok(hr == S_OK, "Valid GetMixFormat returns %08x\n", hr); if (hr == S_OK) { trace("pwfx: %p\n", pwfx); trace("Tag: %04x\n", pwfx->wFormatTag); trace("bits: %u\n", pwfx->wBitsPerSample); trace("chan: %u\n", pwfx->nChannels); trace("rate: %u\n", pwfx->nSamplesPerSec); trace("align: %u\n", pwfx->nBlockAlign); trace("extra: %u\n", pwfx->cbSize); ok(pwfx->wFormatTag == WAVE_FORMAT_EXTENSIBLE, "wFormatTag is %x\n", pwfx->wFormatTag); if (pwfx->wFormatTag == WAVE_FORMAT_EXTENSIBLE) { WAVEFORMATEXTENSIBLE *pwfxe = (void*)pwfx; trace("Res: %u\n", pwfxe->Samples.wReserved); trace("Mask: %x\n", pwfxe->dwChannelMask); trace("Alg: %s\n", IsEqualGUID(&pwfxe->SubFormat, &KSDATAFORMAT_SUBTYPE_PCM)?"PCM": (IsEqualGUID(&pwfxe->SubFormat, &KSDATAFORMAT_SUBTYPE_IEEE_FLOAT)?"FLOAT":"Other")); } hr = IAudioClient_IsFormatSupported(ac, AUDCLNT_SHAREMODE_SHARED, pwfx, &pwfx2); ok(hr == S_OK, "Valid IsFormatSupported(Shared) call returns %08x\n", hr); ok(pwfx2 == NULL, "pwfx2 is non-null\n"); CoTaskMemFree(pwfx2); hr = IAudioClient_IsFormatSupported(ac, AUDCLNT_SHAREMODE_SHARED, NULL, NULL); ok(hr == E_POINTER, "IsFormatSupported(NULL) call returns %08x\n", hr); hr = IAudioClient_IsFormatSupported(ac, AUDCLNT_SHAREMODE_SHARED, pwfx, NULL); ok(hr == E_POINTER, "IsFormatSupported(Shared,NULL) call returns %08x\n", hr); hr = IAudioClient_IsFormatSupported(ac, AUDCLNT_SHAREMODE_EXCLUSIVE, pwfx, NULL); ok(hr == S_OK || hr == AUDCLNT_E_UNSUPPORTED_FORMAT, "IsFormatSupported(Exclusive) call returns %08x\n", hr); hr = IAudioClient_IsFormatSupported(ac, AUDCLNT_SHAREMODE_EXCLUSIVE, pwfx, &pwfx2); ok(hr == S_OK || hr == AUDCLNT_E_UNSUPPORTED_FORMAT, "IsFormatSupported(Exclusive) call returns %08x\n", hr); ok(pwfx2 == NULL, "pwfx2 non-null on exclusive IsFormatSupported\n"); hr = IAudioClient_IsFormatSupported(ac, 0xffffffff, pwfx, NULL); ok(hr == E_INVALIDARG || hr == AUDCLNT_E_UNSUPPORTED_FORMAT, "IsFormatSupported(0xffffffff) call returns %08x\n", hr); } test_uninitialized(ac); hr = IAudioClient_Initialize(ac, 3, 0, 5000000, 0, pwfx, NULL); ok(hr == AUDCLNT_E_NOT_INITIALIZED, "Initialize with invalid sharemode returns %08x\n", hr); hr = IAudioClient_Initialize(ac, AUDCLNT_SHAREMODE_SHARED, 0xffffffff, 5000000, 0, pwfx, NULL); ok(hr == E_INVALIDARG, "Initialize with invalid flags returns %08x\n", hr); /* It seems that if length > 2s or periodicity != 0 the length is ignored and call succeeds * Since we can only initialize successfully once, skip those tests. */ hr = IAudioClient_Initialize(ac, AUDCLNT_SHAREMODE_SHARED, 0, 5000000, 0, NULL, NULL); ok(hr == E_POINTER, "Initialize with null format returns %08x\n", hr); hr = IAudioClient_Initialize(ac, AUDCLNT_SHAREMODE_SHARED, 0, 5000000, 0, pwfx, NULL); ok(hr == S_OK, "Valid Initialize returns %08x\n", hr); if (hr != S_OK) { skip("Cannot initialize %08x, remainder of tests is useless\n", hr); CoTaskMemFree(pwfx); return; } hr = IAudioClient_GetStreamLatency(ac, NULL); ok(hr == E_POINTER, "GetStreamLatency(NULL) call returns %08x\n", hr); hr = IAudioClient_GetStreamLatency(ac, &t1); ok(hr == S_OK, "Valid GetStreamLatency call returns %08x\n", hr); trace("Returned latency: %u.%05u ms\n", (UINT)(t1/10000), (UINT)(t1 % 10000)); hr = IAudioClient_Initialize(ac, AUDCLNT_SHAREMODE_SHARED, 0, 5000000, 0, pwfx, NULL); ok(hr == AUDCLNT_E_ALREADY_INITIALIZED, "Calling Initialize twice returns %08x\n", hr); hr = IAudioClient_SetEventHandle(ac, NULL); ok(hr == E_INVALIDARG, "SetEventHandle(NULL) returns %08x\n", hr); hr = IAudioClient_SetEventHandle(ac, handle); ok(hr == AUDCLNT_E_EVENTHANDLE_NOT_EXPECTED || broken(hr == HRESULT_FROM_WIN32(ERROR_INVALID_NAME)) || broken(hr == HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND)) /* Some 2k8 */ || broken(hr == HRESULT_FROM_WIN32(ERROR_BAD_PATHNAME)) /* Some Vista */ , "SetEventHandle returns %08x\n", hr); hr = IAudioClient_Reset(ac); ok(hr == S_OK, "Reset on a resetted stream returns %08x\n", hr); hr = IAudioClient_Stop(ac); ok(hr == S_FALSE, "Stop on a stopped stream returns %08x\n", hr); hr = IAudioClient_Start(ac); ok(hr == S_OK, "Start on a stopped stream returns %08x\n", hr); IAudioClient_Release(ac); CloseHandle(handle); CoTaskMemFree(pwfx); }
static HRESULT DSOUND_WaveFormat(DirectSoundDevice *device, IAudioClient *client, BOOL forcewave, WAVEFORMATEX **wfx) { WAVEFORMATEXTENSIBLE *retwfe = NULL; WAVEFORMATEX *w; HRESULT hr; if (!forcewave) { WAVEFORMATEXTENSIBLE *mixwfe; hr = IAudioClient_GetMixFormat(client, (WAVEFORMATEX**)&mixwfe); if (FAILED(hr)) return hr; if (mixwfe->Format.nChannels < device->num_speakers) { device->speaker_config = DSOUND_FindSpeakerConfig(device->mmdevice, mixwfe->Format.nChannels); DSOUND_ParseSpeakerConfig(device); } else if (mixwfe->Format.nChannels > device->num_speakers) { mixwfe->Format.nChannels = device->num_speakers; mixwfe->Format.nBlockAlign = mixwfe->Format.nChannels * mixwfe->Format.wBitsPerSample / 8; mixwfe->Format.nAvgBytesPerSec = mixwfe->Format.nSamplesPerSec * mixwfe->Format.nBlockAlign; mixwfe->dwChannelMask = speaker_config_to_channel_mask(device->speaker_config); } if (!IsEqualGUID(&mixwfe->SubFormat, &KSDATAFORMAT_SUBTYPE_IEEE_FLOAT)) { WAVEFORMATEXTENSIBLE testwfe = *mixwfe; testwfe.SubFormat = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT; testwfe.Samples.wValidBitsPerSample = testwfe.Format.wBitsPerSample = 32; testwfe.Format.nBlockAlign = testwfe.Format.nChannels * testwfe.Format.wBitsPerSample / 8; testwfe.Format.nAvgBytesPerSec = testwfe.Format.nSamplesPerSec * testwfe.Format.nBlockAlign; if (FAILED(IAudioClient_IsFormatSupported(client, AUDCLNT_SHAREMODE_SHARED, &testwfe.Format, (WAVEFORMATEX**)&retwfe))) w = DSOUND_CopyFormat(&mixwfe->Format); else if (retwfe) w = DSOUND_CopyFormat(&retwfe->Format); else w = DSOUND_CopyFormat(&testwfe.Format); CoTaskMemFree(retwfe); retwfe = NULL; } else w = DSOUND_CopyFormat(&mixwfe->Format); CoTaskMemFree(mixwfe); } else if (device->primary_pwfx->wFormatTag == WAVE_FORMAT_PCM || device->primary_pwfx->wFormatTag == WAVE_FORMAT_IEEE_FLOAT) { WAVEFORMATEX *wi = device->primary_pwfx; WAVEFORMATEXTENSIBLE *wfe; /* Convert to WAVEFORMATEXTENSIBLE */ w = HeapAlloc(GetProcessHeap(), 0, sizeof(WAVEFORMATEXTENSIBLE)); wfe = (WAVEFORMATEXTENSIBLE*)w; if (!wfe) return DSERR_OUTOFMEMORY; wfe->Format = *wi; w->wFormatTag = WAVE_FORMAT_EXTENSIBLE; w->cbSize = sizeof(*wfe) - sizeof(*w); w->nBlockAlign = w->nChannels * w->wBitsPerSample / 8; w->nAvgBytesPerSec = w->nSamplesPerSec * w->nBlockAlign; wfe->dwChannelMask = 0; if (wi->wFormatTag == WAVE_FORMAT_IEEE_FLOAT) { w->wBitsPerSample = 32; wfe->SubFormat = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT; } else wfe->SubFormat = KSDATAFORMAT_SUBTYPE_PCM; wfe->Samples.wValidBitsPerSample = w->wBitsPerSample; } else w = DSOUND_CopyFormat(device->primary_pwfx); if (!w) return DSERR_OUTOFMEMORY; hr = IAudioClient_IsFormatSupported(client, AUDCLNT_SHAREMODE_SHARED, w, (WAVEFORMATEX**)&retwfe); if (retwfe) { memcpy(w, retwfe, sizeof(WAVEFORMATEX) + retwfe->Format.cbSize); CoTaskMemFree(retwfe); } if (FAILED(hr)) { WARN("IsFormatSupported failed: %08x\n", hr); HeapFree(GetProcessHeap(), 0, w); return hr; } *wfx = w; return S_OK; }
.cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX), }, .Samples.wValidBitsPerSample = 16, .dwChannelMask = mp_chmap_to_waveext(&ao->channels), .SubFormat = local_KSDATAFORMAT_SUBTYPE_PCM, }; wformat.SubFormat.Data1 = WAVE_FORMAT_DOLBY_AC3_SPDIF; // see INIT_WAVEFORMATEX_GUID macro union WAVEFMT u; u.extensible = &wformat; MP_VERBOSE(ao, "trying passthrough for %s...\n", af_fmt_to_str((ao->format&~AF_FORMAT_END_MASK) | AF_FORMAT_LE)); HRESULT hr = IAudioClient_IsFormatSupported(state->pAudioClient, state->share_mode, u.ex, NULL); if (!FAILED(hr)) { ao->format = (ao->format&~AF_FORMAT_END_MASK) | AF_FORMAT_LE; state->format = wformat; return 1; } return 0; } static int find_formats(struct ao *const ao) { struct wasapi_state *state = (struct wasapi_state *)ao->priv; if (AF_FORMAT_IS_IEC61937(ao->format) || ao->format == AF_FORMAT_MPEG2) { if (try_passthrough(state, ao))
static HRESULT DoReset(ALCdevice *device) { MMDevApiData *data = device->ExtraData; WAVEFORMATEXTENSIBLE OutputType; WAVEFORMATEX *wfx = NULL; REFERENCE_TIME min_per, buf_time; UINT32 buffer_len, min_len; HRESULT hr; hr = IAudioClient_GetMixFormat(data->client, &wfx); if(FAILED(hr)) { ERR("Failed to get mix format: 0x%08lx\n", hr); return hr; } if(!MakeExtensible(&OutputType, wfx)) { CoTaskMemFree(wfx); return E_FAIL; } CoTaskMemFree(wfx); wfx = NULL; buf_time = ((REFERENCE_TIME)device->UpdateSize*device->NumUpdates*10000000 + device->Frequency-1) / device->Frequency; if(!(device->Flags&DEVICE_FREQUENCY_REQUEST)) device->Frequency = OutputType.Format.nSamplesPerSec; if(!(device->Flags&DEVICE_CHANNELS_REQUEST)) { if(OutputType.Format.nChannels == 1 && OutputType.dwChannelMask == MONO) device->FmtChans = DevFmtMono; else if(OutputType.Format.nChannels == 2 && OutputType.dwChannelMask == STEREO) device->FmtChans = DevFmtStereo; else if(OutputType.Format.nChannels == 4 && OutputType.dwChannelMask == QUAD) device->FmtChans = DevFmtQuad; else if(OutputType.Format.nChannels == 6 && OutputType.dwChannelMask == X5DOT1) device->FmtChans = DevFmtX51; else if(OutputType.Format.nChannels == 6 && OutputType.dwChannelMask == X5DOT1SIDE) device->FmtChans = DevFmtX51Side; else if(OutputType.Format.nChannels == 7 && OutputType.dwChannelMask == X6DOT1) device->FmtChans = DevFmtX61; else if(OutputType.Format.nChannels == 8 && OutputType.dwChannelMask == X7DOT1) device->FmtChans = DevFmtX71; else ERR("Unhandled channel config: %d -- 0x%08lx\n", OutputType.Format.nChannels, OutputType.dwChannelMask); } switch(device->FmtChans) { case DevFmtMono: OutputType.Format.nChannels = 1; OutputType.dwChannelMask = MONO; break; case DevFmtStereo: OutputType.Format.nChannels = 2; OutputType.dwChannelMask = STEREO; break; case DevFmtQuad: OutputType.Format.nChannels = 4; OutputType.dwChannelMask = QUAD; break; case DevFmtX51: OutputType.Format.nChannels = 6; OutputType.dwChannelMask = X5DOT1; break; case DevFmtX51Side: OutputType.Format.nChannels = 6; OutputType.dwChannelMask = X5DOT1SIDE; break; case DevFmtX61: OutputType.Format.nChannels = 7; OutputType.dwChannelMask = X6DOT1; break; case DevFmtX71: OutputType.Format.nChannels = 8; OutputType.dwChannelMask = X7DOT1; break; } switch(device->FmtType) { case DevFmtByte: device->FmtType = DevFmtUByte; /* fall-through */ case DevFmtUByte: OutputType.Format.wBitsPerSample = 8; OutputType.Samples.wValidBitsPerSample = 8; OutputType.SubFormat = KSDATAFORMAT_SUBTYPE_PCM; break; case DevFmtUShort: device->FmtType = DevFmtShort; /* fall-through */ case DevFmtShort: OutputType.Format.wBitsPerSample = 16; OutputType.Samples.wValidBitsPerSample = 16; OutputType.SubFormat = KSDATAFORMAT_SUBTYPE_PCM; break; case DevFmtUInt: device->FmtType = DevFmtInt; /* fall-through */ case DevFmtInt: OutputType.Format.wBitsPerSample = 32; OutputType.Samples.wValidBitsPerSample = 32; OutputType.SubFormat = KSDATAFORMAT_SUBTYPE_PCM; break; case DevFmtFloat: OutputType.Format.wBitsPerSample = 32; OutputType.Samples.wValidBitsPerSample = 32; OutputType.SubFormat = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT; break; } OutputType.Format.nSamplesPerSec = device->Frequency; OutputType.Format.nBlockAlign = OutputType.Format.nChannels * OutputType.Format.wBitsPerSample / 8; OutputType.Format.nAvgBytesPerSec = OutputType.Format.nSamplesPerSec * OutputType.Format.nBlockAlign; hr = IAudioClient_IsFormatSupported(data->client, AUDCLNT_SHAREMODE_SHARED, &OutputType.Format, &wfx); if(FAILED(hr)) { ERR("Failed to check format support: 0x%08lx\n", hr); hr = IAudioClient_GetMixFormat(data->client, &wfx); } if(FAILED(hr)) { ERR("Failed to find a supported format: 0x%08lx\n", hr); return hr; } if(wfx != NULL) { if(!MakeExtensible(&OutputType, wfx)) { CoTaskMemFree(wfx); return E_FAIL; } CoTaskMemFree(wfx); wfx = NULL; device->Frequency = OutputType.Format.nSamplesPerSec; if(OutputType.Format.nChannels == 1 && OutputType.dwChannelMask == MONO) device->FmtChans = DevFmtMono; else if(OutputType.Format.nChannels == 2 && OutputType.dwChannelMask == STEREO) device->FmtChans = DevFmtStereo; else if(OutputType.Format.nChannels == 4 && OutputType.dwChannelMask == QUAD) device->FmtChans = DevFmtQuad; else if(OutputType.Format.nChannels == 6 && OutputType.dwChannelMask == X5DOT1) device->FmtChans = DevFmtX51; else if(OutputType.Format.nChannels == 6 && OutputType.dwChannelMask == X5DOT1SIDE) device->FmtChans = DevFmtX51Side; else if(OutputType.Format.nChannels == 7 && OutputType.dwChannelMask == X6DOT1) device->FmtChans = DevFmtX61; else if(OutputType.Format.nChannels == 8 && OutputType.dwChannelMask == X7DOT1) device->FmtChans = DevFmtX71; else { ERR("Unhandled extensible channels: %d -- 0x%08lx\n", OutputType.Format.nChannels, OutputType.dwChannelMask); device->FmtChans = DevFmtStereo; OutputType.Format.nChannels = 2; OutputType.dwChannelMask = STEREO; } if(IsEqualGUID(&OutputType.SubFormat, &KSDATAFORMAT_SUBTYPE_PCM)) { if(OutputType.Format.wBitsPerSample == 8) device->FmtType = DevFmtUByte; else if(OutputType.Format.wBitsPerSample == 16) device->FmtType = DevFmtShort; else if(OutputType.Format.wBitsPerSample == 32) device->FmtType = DevFmtInt; else { device->FmtType = DevFmtShort; OutputType.Format.wBitsPerSample = 16; } } else if(IsEqualGUID(&OutputType.SubFormat, &KSDATAFORMAT_SUBTYPE_IEEE_FLOAT)) { device->FmtType = DevFmtFloat; OutputType.Format.wBitsPerSample = 32; } else { ERR("Unhandled format sub-type\n"); device->FmtType = DevFmtShort; OutputType.Format.wBitsPerSample = 16; OutputType.SubFormat = KSDATAFORMAT_SUBTYPE_PCM; } OutputType.Samples.wValidBitsPerSample = OutputType.Format.wBitsPerSample; } SetDefaultWFXChannelOrder(device); hr = IAudioClient_Initialize(data->client, AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_EVENTCALLBACK, buf_time, 0, &OutputType.Format, NULL); if(FAILED(hr)) { ERR("Failed to initialize audio client: 0x%08lx\n", hr); return hr; } hr = IAudioClient_GetDevicePeriod(data->client, &min_per, NULL); if(SUCCEEDED(hr)) { min_len = (UINT32)((min_per*device->Frequency + 10000000-1) / 10000000); /* Find the nearest multiple of the period size to the update size */ if(min_len < device->UpdateSize) min_len *= (device->UpdateSize + min_len/2)/min_len; hr = IAudioClient_GetBufferSize(data->client, &buffer_len); } if(FAILED(hr)) { ERR("Failed to get audio buffer info: 0x%08lx\n", hr); return hr; } device->UpdateSize = min_len; device->NumUpdates = buffer_len / device->UpdateSize; if(device->NumUpdates <= 1) { ERR("Audio client returned buffer_len < period*2; expect break up\n"); device->NumUpdates = 2; device->UpdateSize = buffer_len / device->NumUpdates; } return hr; }