예제 #1
0
파일: capture.c 프로젝트: Barrell/wine
static DWORD WINAPI DSOUND_capture_thread(void *user)
{
    IDirectSoundCaptureBufferImpl *buffer = user;
    HRESULT hr;
    DWORD ret, wait_ms;
    REFERENCE_TIME period;

    hr = IAudioClient_GetDevicePeriod(buffer->device->client, &period, NULL);
    if(FAILED(hr)){
        WARN("GetDevicePeriod failed: %08x\n", hr);
        wait_ms = 5;
    }else
        wait_ms = MulDiv(5, period, 10000);

    while(buffer->ref){
        ret = WaitForSingleObject(buffer->sleepev, wait_ms);

        if(!buffer->device->ref)
            break;

        if(ret == WAIT_OBJECT_0){
            EnterCriticalSection(&buffer->device->lock);

            DSOUND_capture_data(buffer->device);

            LeaveCriticalSection(&buffer->device->lock);
        }else if(ret != WAIT_TIMEOUT)
            WARN("WaitForSingleObject failed: %u\n", GetLastError());
    }

    return 0;
}
예제 #2
0
static DWORD DSOUND_fraglen(DirectSoundDevice *device)
{
    REFERENCE_TIME period;
    HRESULT hr;
    DWORD ret;

    hr = IAudioClient_GetDevicePeriod(device->client, &period, NULL);
    if(FAILED(hr)){
        /* just guess at 10ms */
        WARN("GetDevicePeriod failed: %08x\n", hr);
        ret = MulDiv(device->pwfx->nBlockAlign, device->pwfx->nSamplesPerSec, 100);
    }else
        ret = MulDiv(device->pwfx->nSamplesPerSec * device->pwfx->nBlockAlign, period, 10000000);

    ret -= ret % device->pwfx->nBlockAlign;
    return ret;
}
JNIEXPORT jlong JNICALL
Java_org_jitsi_impl_neomedia_jmfext_media_protocol_wasapi_WASAPI_IAudioClient_1GetMinimumDevicePeriod
    (JNIEnv *env, jclass clazz, jlong thiz)
{
    HRESULT hr;
    REFERENCE_TIME hnsDefaultDevicePeriod;
    REFERENCE_TIME hnsMinimumDevicePeriod;

    hr
        = IAudioClient_GetDevicePeriod(
                (IAudioClient *) (intptr_t) thiz,
                &hnsDefaultDevicePeriod,
                &hnsMinimumDevicePeriod);
    if (FAILED(hr))
    {
        hnsMinimumDevicePeriod = 0;
        WASAPI_throwNewHResultException(env, hr, __func__, __LINE__);
    }
    return (jlong) hnsMinimumDevicePeriod;
}
예제 #4
0
파일: render.c 프로젝트: diosmosis/wine
static void test_audioclient(void)
{
    IAudioClient *ac;
    IUnknown *unk;
    HRESULT hr;
    ULONG ref;
    WAVEFORMATEX *pwfx, *pwfx2;
    REFERENCE_TIME t1, t2;
    HANDLE handle;

    hr = IMMDevice_Activate(dev, &IID_IAudioClient, CLSCTX_INPROC_SERVER,
            NULL, (void**)&ac);
    ok(hr == S_OK, "Activation failed with %08x\n", hr);
    if(hr != S_OK)
        return;

    handle = CreateEventW(NULL, FALSE, FALSE, NULL);

    hr = IAudioClient_QueryInterface(ac, &IID_IUnknown, NULL);
    ok(hr == E_POINTER, "QueryInterface(NULL) returned %08x\n", hr);

    unk = (void*)(LONG_PTR)0x12345678;
    hr = IAudioClient_QueryInterface(ac, &IID_NULL, (void**)&unk);
    ok(hr == E_NOINTERFACE, "QueryInterface(IID_NULL) returned %08x\n", hr);
    ok(!unk, "QueryInterface(IID_NULL) returned non-null pointer %p\n", unk);

    hr = IAudioClient_QueryInterface(ac, &IID_IUnknown, (void**)&unk);
    ok(hr == S_OK, "QueryInterface(IID_IUnknown) returned %08x\n", hr);
    if (unk)
    {
        ref = IUnknown_Release(unk);
        ok(ref == 1, "Released count is %u\n", ref);
    }

    hr = IAudioClient_QueryInterface(ac, &IID_IAudioClient, (void**)&unk);
    ok(hr == S_OK, "QueryInterface(IID_IAudioClient) returned %08x\n", hr);
    if (unk)
    {
        ref = IUnknown_Release(unk);
        ok(ref == 1, "Released count is %u\n", ref);
    }

    hr = IAudioClient_GetDevicePeriod(ac, NULL, NULL);
    ok(hr == E_POINTER, "Invalid GetDevicePeriod call returns %08x\n", hr);

    hr = IAudioClient_GetDevicePeriod(ac, &t1, NULL);
    ok(hr == S_OK, "Valid GetDevicePeriod call returns %08x\n", hr);

    hr = IAudioClient_GetDevicePeriod(ac, NULL, &t2);
    ok(hr == S_OK, "Valid GetDevicePeriod call returns %08x\n", hr);

    hr = IAudioClient_GetDevicePeriod(ac, &t1, &t2);
    ok(hr == S_OK, "Valid GetDevicePeriod call returns %08x\n", hr);
    trace("Returned periods: %u.%05u ms %u.%05u ms\n",
          (UINT)(t1/10000), (UINT)(t1 % 10000),
          (UINT)(t2/10000), (UINT)(t2 % 10000));

    hr = IAudioClient_GetMixFormat(ac, NULL);
    ok(hr == E_POINTER, "GetMixFormat returns %08x\n", hr);

    hr = IAudioClient_GetMixFormat(ac, &pwfx);
    ok(hr == S_OK, "Valid GetMixFormat returns %08x\n", hr);

    if (hr == S_OK)
    {
        trace("pwfx: %p\n", pwfx);
        trace("Tag: %04x\n", pwfx->wFormatTag);
        trace("bits: %u\n", pwfx->wBitsPerSample);
        trace("chan: %u\n", pwfx->nChannels);
        trace("rate: %u\n", pwfx->nSamplesPerSec);
        trace("align: %u\n", pwfx->nBlockAlign);
        trace("extra: %u\n", pwfx->cbSize);
        ok(pwfx->wFormatTag == WAVE_FORMAT_EXTENSIBLE, "wFormatTag is %x\n", pwfx->wFormatTag);
        if (pwfx->wFormatTag == WAVE_FORMAT_EXTENSIBLE)
        {
            WAVEFORMATEXTENSIBLE *pwfxe = (void*)pwfx;
            trace("Res: %u\n", pwfxe->Samples.wReserved);
            trace("Mask: %x\n", pwfxe->dwChannelMask);
            trace("Alg: %s\n",
                  IsEqualGUID(&pwfxe->SubFormat, &KSDATAFORMAT_SUBTYPE_PCM)?"PCM":
                  (IsEqualGUID(&pwfxe->SubFormat,
                               &KSDATAFORMAT_SUBTYPE_IEEE_FLOAT)?"FLOAT":"Other"));
        }

        hr = IAudioClient_IsFormatSupported(ac, AUDCLNT_SHAREMODE_SHARED, pwfx, &pwfx2);
        ok(hr == S_OK, "Valid IsFormatSupported(Shared) call returns %08x\n", hr);
        ok(pwfx2 == NULL, "pwfx2 is non-null\n");
        CoTaskMemFree(pwfx2);

        hr = IAudioClient_IsFormatSupported(ac, AUDCLNT_SHAREMODE_SHARED, NULL, NULL);
        ok(hr == E_POINTER, "IsFormatSupported(NULL) call returns %08x\n", hr);

        hr = IAudioClient_IsFormatSupported(ac, AUDCLNT_SHAREMODE_SHARED, pwfx, NULL);
        ok(hr == E_POINTER, "IsFormatSupported(Shared,NULL) call returns %08x\n", hr);

        hr = IAudioClient_IsFormatSupported(ac, AUDCLNT_SHAREMODE_EXCLUSIVE, pwfx, NULL);
        ok(hr == S_OK || hr == AUDCLNT_E_UNSUPPORTED_FORMAT, "IsFormatSupported(Exclusive) call returns %08x\n", hr);

        hr = IAudioClient_IsFormatSupported(ac, AUDCLNT_SHAREMODE_EXCLUSIVE, pwfx, &pwfx2);
        ok(hr == S_OK || hr == AUDCLNT_E_UNSUPPORTED_FORMAT, "IsFormatSupported(Exclusive) call returns %08x\n", hr);
        ok(pwfx2 == NULL, "pwfx2 non-null on exclusive IsFormatSupported\n");

        hr = IAudioClient_IsFormatSupported(ac, 0xffffffff, pwfx, NULL);
        ok(hr == E_INVALIDARG ||
           hr == AUDCLNT_E_UNSUPPORTED_FORMAT,
           "IsFormatSupported(0xffffffff) call returns %08x\n", hr);
    }

    test_uninitialized(ac);

    hr = IAudioClient_Initialize(ac, 3, 0, 5000000, 0, pwfx, NULL);
    ok(hr == AUDCLNT_E_NOT_INITIALIZED, "Initialize with invalid sharemode returns %08x\n", hr);

    hr = IAudioClient_Initialize(ac, AUDCLNT_SHAREMODE_SHARED, 0xffffffff, 5000000, 0, pwfx, NULL);
    ok(hr == E_INVALIDARG, "Initialize with invalid flags returns %08x\n", hr);

    /* It seems that if length > 2s or periodicity != 0 the length is ignored and call succeeds
     * Since we can only initialize successfully once, skip those tests.
     */
    hr = IAudioClient_Initialize(ac, AUDCLNT_SHAREMODE_SHARED, 0, 5000000, 0, NULL, NULL);
    ok(hr == E_POINTER, "Initialize with null format returns %08x\n", hr);

    hr = IAudioClient_Initialize(ac, AUDCLNT_SHAREMODE_SHARED, 0, 5000000, 0, pwfx, NULL);
    ok(hr == S_OK, "Valid Initialize returns %08x\n", hr);

    if (hr != S_OK)
    {
        skip("Cannot initialize %08x, remainder of tests is useless\n", hr);
        CoTaskMemFree(pwfx);
        return;
    }

    hr = IAudioClient_GetStreamLatency(ac, NULL);
    ok(hr == E_POINTER, "GetStreamLatency(NULL) call returns %08x\n", hr);

    hr = IAudioClient_GetStreamLatency(ac, &t1);
    ok(hr == S_OK, "Valid GetStreamLatency call returns %08x\n", hr);
    trace("Returned latency: %u.%05u ms\n",
          (UINT)(t1/10000), (UINT)(t1 % 10000));

    hr = IAudioClient_Initialize(ac, AUDCLNT_SHAREMODE_SHARED, 0, 5000000, 0, pwfx, NULL);
    ok(hr == AUDCLNT_E_ALREADY_INITIALIZED, "Calling Initialize twice returns %08x\n", hr);

    hr = IAudioClient_SetEventHandle(ac, NULL);
    ok(hr == E_INVALIDARG, "SetEventHandle(NULL) returns %08x\n", hr);

    hr = IAudioClient_SetEventHandle(ac, handle);
    ok(hr == AUDCLNT_E_EVENTHANDLE_NOT_EXPECTED ||
       broken(hr == HRESULT_FROM_WIN32(ERROR_INVALID_NAME)) ||
       broken(hr == HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND)) /* Some 2k8 */ ||
       broken(hr == HRESULT_FROM_WIN32(ERROR_BAD_PATHNAME)) /* Some Vista */
       , "SetEventHandle returns %08x\n", hr);

    hr = IAudioClient_Reset(ac);
    ok(hr == S_OK, "Reset on a resetted stream returns %08x\n", hr);

    hr = IAudioClient_Stop(ac);
    ok(hr == S_FALSE, "Stop on a stopped stream returns %08x\n", hr);

    hr = IAudioClient_Start(ac);
    ok(hr == S_OK, "Start on a stopped stream returns %08x\n", hr);

    IAudioClient_Release(ac);

    CloseHandle(handle);
    CoTaskMemFree(pwfx);
}
예제 #5
0
파일: render.c 프로젝트: diosmosis/wine
static void test_padding(void)
{
    HRESULT hr;
    IAudioClient *ac;
    IAudioRenderClient *arc;
    WAVEFORMATEX *pwfx;
    REFERENCE_TIME minp, defp;
    BYTE *buf;
    UINT32 psize, pad, written;

    hr = IMMDevice_Activate(dev, &IID_IAudioClient, CLSCTX_INPROC_SERVER,
            NULL, (void**)&ac);
    ok(hr == S_OK, "Activation failed with %08x\n", hr);
    if(hr != S_OK)
        return;

    hr = IAudioClient_GetMixFormat(ac, &pwfx);
    ok(hr == S_OK, "GetMixFormat failed: %08x\n", hr);
    if(hr != S_OK)
        return;

    hr = IAudioClient_Initialize(ac, AUDCLNT_SHAREMODE_SHARED,
            0, 5000000, 0, pwfx, NULL);
    ok(hr == S_OK, "Initialize failed: %08x\n", hr);

    hr = IAudioClient_GetDevicePeriod(ac, &defp, &minp);
    ok(hr == S_OK, "GetDevicePeriod failed: %08x\n", hr);
    ok(defp != 0, "Default period is 0\n");
    ok(minp != 0, "Minimum period is 0\n");
    ok(minp <= defp, "Mininum period is greater than default period\n");

    hr = IAudioClient_GetService(ac, &IID_IAudioRenderClient, (void**)&arc);
    ok(hr == S_OK, "GetService failed: %08x\n", hr);

    psize = (defp / 10000000.) * pwfx->nSamplesPerSec * pwfx->nBlockAlign;

    written = 0;
    hr = IAudioClient_GetCurrentPadding(ac, &pad);
    ok(hr == S_OK, "GetCurrentPadding failed: %08x\n", hr);
    ok(pad == written, "GetCurrentPadding returned %u, should be %u\n", pad, written);

    hr = IAudioRenderClient_GetBuffer(arc, psize, &buf);
    ok(hr == S_OK, "GetBuffer failed: %08x\n", hr);
    ok(buf != NULL, "NULL buffer returned\n");

    hr = IAudioRenderClient_ReleaseBuffer(arc, psize,
            AUDCLNT_BUFFERFLAGS_SILENT);
    ok(hr == S_OK, "ReleaseBuffer failed: %08x\n", hr);
    written += psize;

    hr = IAudioClient_GetCurrentPadding(ac, &pad);
    ok(hr == S_OK, "GetCurrentPadding failed: %08x\n", hr);
    ok(pad == written, "GetCurrentPadding returned %u, should be %u\n", pad, written);

    psize = (minp / 10000000.) * pwfx->nSamplesPerSec * pwfx->nBlockAlign;

    hr = IAudioRenderClient_GetBuffer(arc, psize, &buf);
    ok(hr == S_OK, "GetBuffer failed: %08x\n", hr);
    ok(buf != NULL, "NULL buffer returned\n");

    hr = IAudioRenderClient_ReleaseBuffer(arc, psize,
            AUDCLNT_BUFFERFLAGS_SILENT);
    ok(hr == S_OK, "ReleaseBuffer failed: %08x\n", hr);
    written += psize;

    hr = IAudioClient_GetCurrentPadding(ac, &pad);
    ok(hr == S_OK, "GetCurrentPadding failed: %08x\n", hr);
    ok(pad == written, "GetCurrentPadding returned %u, should be %u\n", pad, written);

    /* overfull buffer. requested 1/2s buffer size, so try
     * to get a 1/2s buffer, which should fail */
    psize = pwfx->nSamplesPerSec / 2.;
    hr = IAudioRenderClient_GetBuffer(arc, psize, &buf);
    ok(hr == AUDCLNT_E_BUFFER_TOO_LARGE, "GetBuffer gave wrong error: %08x\n", hr);

    hr = IAudioRenderClient_ReleaseBuffer(arc, psize, 0);
    ok(hr == AUDCLNT_E_OUT_OF_ORDER, "ReleaseBuffer gave wrong error: %08x\n", hr);

    hr = IAudioClient_GetCurrentPadding(ac, &pad);
    ok(hr == S_OK, "GetCurrentPadding failed: %08x\n", hr);
    ok(pad == written, "GetCurrentPadding returned %u, should be %u\n", pad, written);

    CoTaskMemFree(pwfx);

    IAudioRenderClient_Release(arc);
    IAudioClient_Release(ac);
}
static gboolean
gst_wasapi_sink_prepare (GstAudioSink * asink, GstAudioRingBufferSpec * spec)
{
  GstWasapiSink *self = GST_WASAPI_SINK (asink);
  gboolean res = FALSE;
  HRESULT hr;
  REFERENCE_TIME latency_rt, def_period, min_period;
  WAVEFORMATEXTENSIBLE format;
  IAudioRenderClient *render_client = NULL;

  hr = IAudioClient_GetDevicePeriod (self->client, &def_period, &min_period);
  if (hr != S_OK) {
    GST_ERROR_OBJECT (self, "IAudioClient::GetDevicePeriod () failed");
    goto beach;
  }

  gst_wasapi_util_audio_info_to_waveformatex (&spec->info, &format);
  self->info = spec->info;

  hr = IAudioClient_Initialize (self->client, AUDCLNT_SHAREMODE_SHARED,
      AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
      spec->buffer_time / 100, 0, (WAVEFORMATEX *) & format, NULL);
  if (hr != S_OK) {
    GST_ELEMENT_ERROR (self, RESOURCE, OPEN_READ, (NULL),
        ("IAudioClient::Initialize () failed: %s",
            gst_wasapi_util_hresult_to_string (hr)));
    goto beach;
  }

  hr = IAudioClient_GetStreamLatency (self->client, &latency_rt);
  if (hr != S_OK) {
    GST_ERROR_OBJECT (self, "IAudioClient::GetStreamLatency () failed");
    goto beach;
  }

  GST_INFO_OBJECT (self, "default period: %d (%d ms), "
      "minimum period: %d (%d ms), "
      "latency: %d (%d ms)",
      (guint32) def_period, (guint32) def_period / 10000,
      (guint32) min_period, (guint32) min_period / 10000,
      (guint32) latency_rt, (guint32) latency_rt / 10000);

  /* FIXME: What to do with the latency? */

  hr = IAudioClient_SetEventHandle (self->client, self->event_handle);
  if (hr != S_OK) {
    GST_ERROR_OBJECT (self, "IAudioClient::SetEventHandle () failed");
    goto beach;
  }

  if (!gst_wasapi_util_get_render_client (GST_ELEMENT (self), self->client,
          &render_client)) {
    goto beach;
  }

  hr = IAudioClient_Start (self->client);
  if (hr != S_OK) {
    GST_ERROR_OBJECT (self, "IAudioClient::Start failed");
    goto beach;
  }

  self->render_client = render_client;
  render_client = NULL;

  res = TRUE;

beach:
  if (render_client != NULL)
    IUnknown_Release (render_client);

  return res;
}
예제 #7
0
파일: mmdevapi.c 프로젝트: jims/openal-soft
static HRESULT DoReset(ALCdevice *device)
{
    MMDevApiData *data = device->ExtraData;
    WAVEFORMATEXTENSIBLE OutputType;
    WAVEFORMATEX *wfx = NULL;
    REFERENCE_TIME min_per, buf_time;
    UINT32 buffer_len, min_len;
    HRESULT hr;

    hr = IAudioClient_GetMixFormat(data->client, &wfx);
    if(FAILED(hr))
    {
        ERR("Failed to get mix format: 0x%08lx\n", hr);
        return hr;
    }

    if(!MakeExtensible(&OutputType, wfx))
    {
        CoTaskMemFree(wfx);
        return E_FAIL;
    }
    CoTaskMemFree(wfx);
    wfx = NULL;

    buf_time = ((REFERENCE_TIME)device->UpdateSize*device->NumUpdates*10000000 +
                                device->Frequency-1) / device->Frequency;

    if(!(device->Flags&DEVICE_FREQUENCY_REQUEST))
        device->Frequency = OutputType.Format.nSamplesPerSec;
    if(!(device->Flags&DEVICE_CHANNELS_REQUEST))
    {
        if(OutputType.Format.nChannels == 1 && OutputType.dwChannelMask == MONO)
            device->FmtChans = DevFmtMono;
        else if(OutputType.Format.nChannels == 2 && OutputType.dwChannelMask == STEREO)
            device->FmtChans = DevFmtStereo;
        else if(OutputType.Format.nChannels == 4 && OutputType.dwChannelMask == QUAD)
            device->FmtChans = DevFmtQuad;
        else if(OutputType.Format.nChannels == 6 && OutputType.dwChannelMask == X5DOT1)
            device->FmtChans = DevFmtX51;
        else if(OutputType.Format.nChannels == 6 && OutputType.dwChannelMask == X5DOT1SIDE)
            device->FmtChans = DevFmtX51Side;
        else if(OutputType.Format.nChannels == 7 && OutputType.dwChannelMask == X6DOT1)
            device->FmtChans = DevFmtX61;
        else if(OutputType.Format.nChannels == 8 && OutputType.dwChannelMask == X7DOT1)
            device->FmtChans = DevFmtX71;
        else
            ERR("Unhandled channel config: %d -- 0x%08lx\n", OutputType.Format.nChannels, OutputType.dwChannelMask);
    }

    switch(device->FmtChans)
    {
        case DevFmtMono:
            OutputType.Format.nChannels = 1;
            OutputType.dwChannelMask = MONO;
            break;
        case DevFmtStereo:
            OutputType.Format.nChannels = 2;
            OutputType.dwChannelMask = STEREO;
            break;
        case DevFmtQuad:
            OutputType.Format.nChannels = 4;
            OutputType.dwChannelMask = QUAD;
            break;
        case DevFmtX51:
            OutputType.Format.nChannels = 6;
            OutputType.dwChannelMask = X5DOT1;
            break;
        case DevFmtX51Side:
            OutputType.Format.nChannels = 6;
            OutputType.dwChannelMask = X5DOT1SIDE;
            break;
        case DevFmtX61:
            OutputType.Format.nChannels = 7;
            OutputType.dwChannelMask = X6DOT1;
            break;
        case DevFmtX71:
            OutputType.Format.nChannels = 8;
            OutputType.dwChannelMask = X7DOT1;
            break;
    }
    switch(device->FmtType)
    {
        case DevFmtByte:
            device->FmtType = DevFmtUByte;
            /* fall-through */
        case DevFmtUByte:
            OutputType.Format.wBitsPerSample = 8;
            OutputType.Samples.wValidBitsPerSample = 8;
            OutputType.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
            break;
        case DevFmtUShort:
            device->FmtType = DevFmtShort;
            /* fall-through */
        case DevFmtShort:
            OutputType.Format.wBitsPerSample = 16;
            OutputType.Samples.wValidBitsPerSample = 16;
            OutputType.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
            break;
        case DevFmtUInt:
            device->FmtType = DevFmtInt;
            /* fall-through */
        case DevFmtInt:
            OutputType.Format.wBitsPerSample = 32;
            OutputType.Samples.wValidBitsPerSample = 32;
            OutputType.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
            break;
        case DevFmtFloat:
            OutputType.Format.wBitsPerSample = 32;
            OutputType.Samples.wValidBitsPerSample = 32;
            OutputType.SubFormat = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;
            break;
    }
    OutputType.Format.nSamplesPerSec = device->Frequency;

    OutputType.Format.nBlockAlign = OutputType.Format.nChannels *
                                    OutputType.Format.wBitsPerSample / 8;
    OutputType.Format.nAvgBytesPerSec = OutputType.Format.nSamplesPerSec *
                                        OutputType.Format.nBlockAlign;

    hr = IAudioClient_IsFormatSupported(data->client, AUDCLNT_SHAREMODE_SHARED, &OutputType.Format, &wfx);
    if(FAILED(hr))
    {
        ERR("Failed to check format support: 0x%08lx\n", hr);
        hr = IAudioClient_GetMixFormat(data->client, &wfx);
    }
    if(FAILED(hr))
    {
        ERR("Failed to find a supported format: 0x%08lx\n", hr);
        return hr;
    }

    if(wfx != NULL)
    {
        if(!MakeExtensible(&OutputType, wfx))
        {
            CoTaskMemFree(wfx);
            return E_FAIL;
        }
        CoTaskMemFree(wfx);
        wfx = NULL;

        device->Frequency = OutputType.Format.nSamplesPerSec;
        if(OutputType.Format.nChannels == 1 && OutputType.dwChannelMask == MONO)
            device->FmtChans = DevFmtMono;
        else if(OutputType.Format.nChannels == 2 && OutputType.dwChannelMask == STEREO)
            device->FmtChans = DevFmtStereo;
        else if(OutputType.Format.nChannels == 4 && OutputType.dwChannelMask == QUAD)
            device->FmtChans = DevFmtQuad;
        else if(OutputType.Format.nChannels == 6 && OutputType.dwChannelMask == X5DOT1)
            device->FmtChans = DevFmtX51;
        else if(OutputType.Format.nChannels == 6 && OutputType.dwChannelMask == X5DOT1SIDE)
            device->FmtChans = DevFmtX51Side;
        else if(OutputType.Format.nChannels == 7 && OutputType.dwChannelMask == X6DOT1)
            device->FmtChans = DevFmtX61;
        else if(OutputType.Format.nChannels == 8 && OutputType.dwChannelMask == X7DOT1)
            device->FmtChans = DevFmtX71;
        else
        {
            ERR("Unhandled extensible channels: %d -- 0x%08lx\n", OutputType.Format.nChannels, OutputType.dwChannelMask);
            device->FmtChans = DevFmtStereo;
            OutputType.Format.nChannels = 2;
            OutputType.dwChannelMask = STEREO;
        }

        if(IsEqualGUID(&OutputType.SubFormat, &KSDATAFORMAT_SUBTYPE_PCM))
        {
            if(OutputType.Format.wBitsPerSample == 8)
                device->FmtType = DevFmtUByte;
            else if(OutputType.Format.wBitsPerSample == 16)
                device->FmtType = DevFmtShort;
            else if(OutputType.Format.wBitsPerSample == 32)
                device->FmtType = DevFmtInt;
            else
            {
                device->FmtType = DevFmtShort;
                OutputType.Format.wBitsPerSample = 16;
            }
        }
        else if(IsEqualGUID(&OutputType.SubFormat, &KSDATAFORMAT_SUBTYPE_IEEE_FLOAT))
        {
            device->FmtType = DevFmtFloat;
            OutputType.Format.wBitsPerSample = 32;
        }
        else
        {
            ERR("Unhandled format sub-type\n");
            device->FmtType = DevFmtShort;
            OutputType.Format.wBitsPerSample = 16;
            OutputType.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
        }
        OutputType.Samples.wValidBitsPerSample = OutputType.Format.wBitsPerSample;
    }

    SetDefaultWFXChannelOrder(device);

    hr = IAudioClient_Initialize(data->client, AUDCLNT_SHAREMODE_SHARED,
                                 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
                                 buf_time, 0, &OutputType.Format, NULL);
    if(FAILED(hr))
    {
        ERR("Failed to initialize audio client: 0x%08lx\n", hr);
        return hr;
    }

    hr = IAudioClient_GetDevicePeriod(data->client, &min_per, NULL);
    if(SUCCEEDED(hr))
    {
        min_len = (UINT32)((min_per*device->Frequency + 10000000-1) / 10000000);
        /* Find the nearest multiple of the period size to the update size */
        if(min_len < device->UpdateSize)
            min_len *= (device->UpdateSize + min_len/2)/min_len;
        hr = IAudioClient_GetBufferSize(data->client, &buffer_len);
    }
    if(FAILED(hr))
    {
        ERR("Failed to get audio buffer info: 0x%08lx\n", hr);
        return hr;
    }

    device->UpdateSize = min_len;
    device->NumUpdates = buffer_len / device->UpdateSize;
    if(device->NumUpdates <= 1)
    {
        ERR("Audio client returned buffer_len < period*2; expect break up\n");
        device->NumUpdates = 2;
        device->UpdateSize = buffer_len / device->NumUpdates;
    }

    return hr;
}
예제 #8
0
파일: capture.c 프로젝트: hejin/wine
static void test_capture(IAudioClient *ac, HANDLE handle, WAVEFORMATEX *wfx)
{
    IAudioCaptureClient *acc;
    HRESULT hr;
    UINT32 frames, next, pad, sum = 0;
    BYTE *data;
    DWORD flags;
    UINT64 pos, qpc;
    REFERENCE_TIME period;

    hr = IAudioClient_GetService(ac, &IID_IAudioCaptureClient, (void**)&acc);
    ok(hr == S_OK, "IAudioClient_GetService(IID_IAudioCaptureClient) returns %08x\n", hr);
    if (hr != S_OK)
        return;

    frames = 0xabadcafe;
    data = (void*)0xdeadf00d;
    flags = 0xabadcafe;
    pos = qpc = 0xdeadbeef;
    hr = IAudioCaptureClient_GetBuffer(acc, &data, &frames, &flags, &pos, &qpc);
    ok(hr == AUDCLNT_S_BUFFER_EMPTY, "Initial IAudioCaptureClient_GetBuffer returns %08x\n", hr);

    /* should be empty right after start. Otherwise consume one packet */
    if(hr == S_OK){
        hr = IAudioCaptureClient_ReleaseBuffer(acc, frames);
        ok(hr == S_OK, "Releasing buffer returns %08x\n", hr);
        sum += frames;

        frames = 0xabadcafe;
        data = (void*)0xdeadf00d;
        flags = 0xabadcafe;
        pos = qpc = 0xdeadbeef;
        hr = IAudioCaptureClient_GetBuffer(acc, &data, &frames, &flags, &pos, &qpc);
        ok(hr == AUDCLNT_S_BUFFER_EMPTY, "Initial IAudioCaptureClient_GetBuffer returns %08x\n", hr);
    }

    if(hr == AUDCLNT_S_BUFFER_EMPTY){
        ok(!frames, "frames changed to %u\n", frames);
        ok(data == (void*)0xdeadf00d, "data changed to %p\n", data);
        ok(flags == 0xabadcafe, "flags changed to %x\n", flags);
        ok(pos == 0xdeadbeef, "position changed to %u\n", (UINT)pos);
        ok(qpc == 0xdeadbeef, "timer changed to %u\n", (UINT)qpc);

        /* GetNextPacketSize yields 0 if no data is yet available
         * it is not constantly period_size * SamplesPerSec */
        hr = IAudioCaptureClient_GetNextPacketSize(acc, &next);
        ok(hr == S_OK, "IAudioCaptureClient_GetNextPacketSize returns %08x\n", hr);
        ok(!next, "GetNextPacketSize %u\n", next);
    }

    hr = IAudioCaptureClient_ReleaseBuffer(acc, frames);
    ok(hr == S_OK, "Releasing buffer returns %08x\n", hr);
    sum += frames;

    ok(ResetEvent(handle), "ResetEvent\n");

    hr = IAudioCaptureClient_GetNextPacketSize(acc, &next);
    ok(hr == S_OK, "IAudioCaptureClient_GetNextPacketSize returns %08x\n", hr);

    hr = IAudioClient_GetCurrentPadding(ac, &pad);
    ok(hr == S_OK, "GetCurrentPadding call returns %08x\n", hr);
    ok(next == pad, "GetNextPacketSize %u vs. GCP %u\n", next, pad);
    /* later GCP will grow, while GNPS is 0 or period size */

    hr = IAudioCaptureClient_GetNextPacketSize(acc, NULL);
    ok(hr == E_POINTER, "IAudioCaptureClient_GetNextPacketSize(NULL) returns %08x\n", hr);

    data = (void*)0xdeadf00d;
    frames = 0xdeadbeef;
    flags = 0xabadcafe;
    hr = IAudioCaptureClient_GetBuffer(acc, &data, NULL, NULL, NULL, NULL);
    ok(hr == E_POINTER, "IAudioCaptureClient_GetBuffer(data, NULL, NULL) returns %08x\n", hr);

    hr = IAudioCaptureClient_GetBuffer(acc, NULL, &frames, NULL, NULL, NULL);
    ok(hr == E_POINTER, "IAudioCaptureClient_GetBuffer(NULL, &frames, NULL) returns %08x\n", hr);

    hr = IAudioCaptureClient_GetBuffer(acc, NULL, NULL, &flags, NULL, NULL);
    ok(hr == E_POINTER, "IAudioCaptureClient_GetBuffer(NULL, NULL, &flags) returns %08x\n", hr);

    hr = IAudioCaptureClient_GetBuffer(acc, &data, &frames, NULL, NULL, NULL);
    ok(hr == E_POINTER, "IAudioCaptureClient_GetBuffer(&ata, &frames, NULL) returns %08x\n", hr);
    ok((DWORD_PTR)data == 0xdeadf00d, "data is reset to %p\n", data);
    ok(frames == 0xdeadbeef, "frames is reset to %08x\n", frames);
    ok(flags == 0xabadcafe, "flags is reset to %08x\n", flags);

    hr = IAudioClient_GetDevicePeriod(ac, &period, NULL);
    ok(hr == S_OK, "GetDevicePeriod failed: %08x\n", hr);
    period = MulDiv(period, wfx->nSamplesPerSec, 10000000); /* as in render.c */

    ok(WaitForSingleObject(handle, 1000) == WAIT_OBJECT_0, "Waiting on event handle failed!\n");

    data = (void*)0xdeadf00d;
    hr = IAudioCaptureClient_GetBuffer(acc, &data, &frames, &flags, &pos, &qpc);
    ok(hr == S_OK || hr == AUDCLNT_S_BUFFER_EMPTY, "Valid IAudioCaptureClient_GetBuffer returns %08x\n", hr);
    if (hr == S_OK){
        ok(frames, "Amount of frames locked is 0!\n");
        /* broken: some w7 machines return pad == 0 and DATA_DISCONTINUITY here,
         * AUDCLNT_S_BUFFER_EMPTY above, yet pos == 1-2 * period rather than 0 */
        ok(pos == sum || broken(pos == period || pos == 2*period),
           "Position %u expected %u\n", (UINT)pos, sum);
        sum = pos;
    }else if (hr == AUDCLNT_S_BUFFER_EMPTY){
        ok(!frames, "Amount of frames locked with empty buffer is %u!\n", frames);
        ok(data == (void*)0xdeadf00d, "No data changed to %p\n", data);
    }

    trace("Wait'ed position %d pad %u flags %x, amount of frames locked: %u\n",
          hr==S_OK ? (UINT)pos : -1, pad, flags, frames);

    hr = IAudioCaptureClient_GetNextPacketSize(acc, &next);
    ok(hr == S_OK, "IAudioCaptureClient_GetNextPacketSize returns %08x\n", hr);
    ok(next == frames, "GetNextPacketSize %u vs. GetBuffer %u\n", next, frames);

    hr = IAudioCaptureClient_ReleaseBuffer(acc, frames);
    ok(hr == S_OK, "Releasing buffer returns %08x\n", hr);

    hr = IAudioCaptureClient_ReleaseBuffer(acc, 0);
    ok(hr == S_OK, "Releasing 0 returns %08x\n", hr);

    hr = IAudioCaptureClient_GetNextPacketSize(acc, &next);
    ok(hr == S_OK, "IAudioCaptureClient_GetNextPacketSize returns %08x\n", hr);

    if (frames) {
        hr = IAudioCaptureClient_ReleaseBuffer(acc, frames);
        ok(hr == AUDCLNT_E_OUT_OF_ORDER, "Releasing buffer twice returns %08x\n", hr);
        sum += frames;
    }

    Sleep(350); /* for sure there's data now */

    hr = IAudioClient_GetCurrentPadding(ac, &pad);
    ok(hr == S_OK, "GetCurrentPadding call returns %08x\n", hr);

    /** GetNextPacketSize
     * returns either 0 or one period worth of frames
     * whereas GetCurrentPadding grows when input is not consumed. */
    hr = IAudioCaptureClient_GetNextPacketSize(acc, &next);
    ok(hr == S_OK, "IAudioCaptureClient_GetNextPacketSize returns %08x\n", hr);
    ok(next <  pad, "GetNextPacketSize %u vs. GCP %u\n", next, pad);

    hr = IAudioCaptureClient_GetBuffer(acc, &data, &frames, &flags, &pos, &qpc);
    ok(hr == S_OK, "Valid IAudioCaptureClient_GetBuffer returns %08x\n", hr);
    ok(next == frames, "GetNextPacketSize %u vs. GetBuffer %u\n", next, frames);

    if(hr == S_OK){
        UINT32 frames2 = frames;
        UINT64 pos2, qpc2;
        ok(frames, "Amount of frames locked is 0!\n");
        ok(pos == sum, "Position %u expected %u\n", (UINT)pos, sum);

        hr = IAudioCaptureClient_ReleaseBuffer(acc, 0);
        ok(hr == S_OK, "Releasing 0 returns %08x\n", hr);

        /* GCP did not decrement, no data consumed */
        hr = IAudioClient_GetCurrentPadding(ac, &frames);
        ok(hr == S_OK, "GetCurrentPadding call returns %08x\n", hr);
        ok(frames == pad || frames == pad + next /* concurrent feeder */,
           "GCP %u past ReleaseBuffer(0) initially %u\n", frames, pad);

        /* should re-get the same data */
        hr = IAudioCaptureClient_GetBuffer(acc, &data, &frames, &flags, &pos2, &qpc2);
        ok(hr == S_OK, "Valid IAudioCaptureClient_GetBuffer returns %08x\n", hr);
        ok(frames2 == frames, "GetBuffer after ReleaseBuffer(0) %u/%u\n", frames2, frames);
        ok(pos2 == pos, "Position after ReleaseBuffer(0) %u/%u\n", (UINT)pos2, (UINT)pos);
        todo_wine ok(qpc2 == qpc, "HPC after ReleaseBuffer(0) %u vs. %u\n", (UINT)qpc2, (UINT)qpc);
    }

    /* trace after the GCP test because log output to MS-DOS console disturbs timing */
    trace("Sleep.1 position %d pad %u flags %x, amount of frames locked: %u\n",
          hr==S_OK ? (UINT)pos : -1, pad, flags, frames);

    if(hr == S_OK){
        UINT32 frames2 = 0xabadcafe;
        BYTE *data2 = (void*)0xdeadf00d;
        flags = 0xabadcafe;

        ok(pos == sum, "Position %u expected %u\n", (UINT)pos, sum);

        pos = qpc = 0xdeadbeef;
        hr = IAudioCaptureClient_GetBuffer(acc, &data2, &frames2, &flags, &pos, &qpc);
        ok(hr == AUDCLNT_E_OUT_OF_ORDER, "Out of order IAudioCaptureClient_GetBuffer returns %08x\n", hr);
        ok(frames2 == 0xabadcafe, "Out of order frames changed to %x\n", frames2);
        ok(data2 == (void*)0xdeadf00d, "Out of order data changed to %p\n", data2);
        ok(flags == 0xabadcafe, "Out of order flags changed to %x\n", flags);
        ok(pos == 0xdeadbeef, "Out of order position changed to %x\n", (UINT)pos);
        ok(qpc == 0xdeadbeef, "Out of order timer changed to %x\n", (UINT)qpc);

        hr = IAudioCaptureClient_ReleaseBuffer(acc, frames+1);
        ok(hr == AUDCLNT_E_INVALID_SIZE, "Releasing buffer+1 returns %08x\n", hr);

        hr = IAudioCaptureClient_ReleaseBuffer(acc, 1);
        ok(hr == AUDCLNT_E_INVALID_SIZE, "Releasing 1 returns %08x\n", hr);

        hr = IAudioClient_Reset(ac);
        ok(hr == AUDCLNT_E_NOT_STOPPED, "Reset failed: %08x\n", hr);
    }

    hr = IAudioCaptureClient_ReleaseBuffer(acc, frames);
    ok(hr == S_OK, "Releasing buffer returns %08x\n", hr);

    if (frames) {
        sum += frames;
        hr = IAudioCaptureClient_ReleaseBuffer(acc, frames);
        ok(hr == AUDCLNT_E_OUT_OF_ORDER, "Releasing buffer twice returns %08x\n", hr);
    }

    frames = period;
    ok(next == frames, "GetNextPacketSize %u vs. GetDevicePeriod %u\n", next, frames);

    /* GetBufferSize is not a multiple of the period size! */
    hr = IAudioClient_GetBufferSize(ac, &next);
    ok(hr == S_OK, "GetBufferSize failed: %08x\n", hr);
    trace("GetBufferSize %u period size %u\n", next, frames);

    Sleep(400); /* overrun */

    hr = IAudioClient_GetCurrentPadding(ac, &pad);
    ok(hr == S_OK, "GetCurrentPadding call returns %08x\n", hr);

    hr = IAudioCaptureClient_GetBuffer(acc, &data, &frames, &flags, &pos, &qpc);
    ok(hr == S_OK, "Valid IAudioCaptureClient_GetBuffer returns %08x\n", hr);

    trace("Overrun position %d pad %u flags %x, amount of frames locked: %u\n",
          hr==S_OK ? (UINT)pos : -1, pad, flags, frames);

    if(hr == S_OK){
        /* The discontinuity is reported here, but is this an old or new packet? */
        todo_wine ok(flags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY, "expect DISCONTINUITY %x\n", flags);
        ok(pad == next, "GCP %u vs. BufferSize %u\n", (UINT32)pad, next);

        /* Native's position is one period further than what we read.
         * Perhaps that's precisely the meaning of DATA_DISCONTINUITY:
         * signal when the position jump left a gap. */
        todo_wine ok(pos == sum + frames, "Position %u gap %d\n",
                     (UINT)pos, (UINT)pos - sum);
        if(flags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY)
            sum = pos;
    }

    hr = IAudioCaptureClient_ReleaseBuffer(acc, frames);
    ok(hr == S_OK, "Releasing buffer returns %08x\n", hr);
    sum += frames;

    hr = IAudioClient_GetCurrentPadding(ac, &pad);
    ok(hr == S_OK, "GetCurrentPadding call returns %08x\n", hr);

    hr = IAudioCaptureClient_GetBuffer(acc, &data, &frames, &flags, &pos, &qpc);
    ok(hr == S_OK, "Valid IAudioCaptureClient_GetBuffer returns %08x\n", hr);

    trace("Cont'ed position %d pad %u flags %x, amount of frames locked: %u\n",
          hr==S_OK ? (UINT)pos : -1, pad, flags, frames);

    if(hr == S_OK){
        ok(pos == sum, "Position %u expected %u\n", (UINT)pos, sum);
        ok(!flags, "flags %u\n", flags);

        hr = IAudioCaptureClient_ReleaseBuffer(acc, frames);
        ok(hr == S_OK, "Releasing buffer returns %08x\n", hr);
        sum += frames;
    }

    hr = IAudioClient_Stop(ac);
    ok(hr == S_OK, "Stop on a started stream returns %08x\n", hr);

    hr = IAudioClient_Start(ac);
    ok(hr == S_OK, "Start on a stopped stream returns %08x\n", hr);

    hr = IAudioCaptureClient_GetBuffer(acc, &data, &frames, &flags, &pos, &qpc);
    ok(hr == S_OK, "Valid IAudioCaptureClient_GetBuffer returns %08x\n", hr);

    hr = IAudioClient_GetCurrentPadding(ac, &pad);
    ok(hr == S_OK, "GetCurrentPadding call returns %08x\n", hr);

    trace("Restart position %d pad %u flags %x, amount of frames locked: %u\n",
          hr==S_OK ? (UINT)pos : -1, pad, flags, frames);
    ok(pad > sum, "restarted GCP %u\n", pad); /* GCP is still near buffer size */

    if(frames){
        ok(pos == sum, "Position %u expected %u\n", (UINT)pos, sum);
        ok(!flags, "flags %u\n", flags);

        hr = IAudioCaptureClient_ReleaseBuffer(acc, frames);
        ok(hr == S_OK, "Releasing buffer returns %08x\n", hr);
        sum += frames;
    }

    hr = IAudioClient_Stop(ac);
    ok(hr == S_OK, "Stop on a started stream returns %08x\n", hr);

    hr = IAudioClient_Reset(ac);
    ok(hr == S_OK, "Reset on a stopped stream returns %08x\n", hr);
    sum += pad - frames;

    hr = IAudioClient_Start(ac);
    ok(hr == S_OK, "Start on a stopped stream returns %08x\n", hr);

    hr = IAudioClient_GetCurrentPadding(ac, &pad);
    ok(hr == S_OK, "GetCurrentPadding call returns %08x\n", hr);

    flags = 0xabadcafe;
    hr = IAudioCaptureClient_GetBuffer(acc, &data, &frames, &flags, &pos, &qpc);
    ok(hr == AUDCLNT_S_BUFFER_EMPTY || /*PulseAudio*/hr == S_OK,
       "Initial IAudioCaptureClient_GetBuffer returns %08x\n", hr);

    trace("Reset   position %d pad %u flags %x, amount of frames locked: %u\n",
          hr==S_OK ? (UINT)pos : -1, pad, flags, frames);

    if(hr == S_OK){
        /* Only PulseAudio goes here; despite snd_pcm_drop it manages
         * to fill GetBufferSize with a single snd_pcm_read */
        trace("Test marked todo: only PulseAudio gets here\n");
        todo_wine ok(flags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY, "expect DISCONTINUITY %x\n", flags);
        /* Reset zeroes padding, not the position */
        ok(pos >= sum, "Position %u last %u\n", (UINT)pos, sum);
        /*sum = pos; check after next GetBuffer */

        hr = IAudioCaptureClient_ReleaseBuffer(acc, frames);
        ok(hr == S_OK, "Releasing buffer returns %08x\n", hr);
        sum += frames;
    }
    else if(hr == AUDCLNT_S_BUFFER_EMPTY){
        ok(!pad, "resetted GCP %u\n", pad);
        Sleep(180);
    }

    hr = IAudioClient_GetCurrentPadding(ac, &pad);
    ok(hr == S_OK, "GetCurrentPadding call returns %08x\n", hr);

    hr = IAudioCaptureClient_GetBuffer(acc, &data, &frames, &flags, &pos, &qpc);
    ok(hr == S_OK, "Valid IAudioCaptureClient_GetBuffer returns %08x\n", hr);
    trace("Running position %d pad %u flags %x, amount of frames locked: %u\n",
          hr==S_OK ? (UINT)pos : -1, pad, flags, frames);

    if(hr == S_OK){
        /* Some w7 machines signal DATA_DISCONTINUITY here following the
         * previous AUDCLNT_S_BUFFER_EMPTY, others not.  What logic? */
        ok(pos >= sum, "Position %u gap %d\n", (UINT)pos, (UINT)pos - sum);
        IAudioCaptureClient_ReleaseBuffer(acc, frames);
    }

    IAudioCaptureClient_Release(acc);
}