示例#1
0
static GstClockTime
gst_wasapi_src_get_time (GstClock * clock, gpointer user_data)
{
  GstWasapiSrc *self = GST_WASAPI_SRC (user_data);
  HRESULT hr;
  guint64 devpos;
  GstClockTime result;

  if (G_UNLIKELY (self->client_clock == NULL))
    return GST_CLOCK_TIME_NONE;

  hr = IAudioClock_GetPosition (self->client_clock, &devpos, NULL);
  if (G_UNLIKELY (hr != S_OK))
    return GST_CLOCK_TIME_NONE;

  result = gst_util_uint64_scale_int (devpos, GST_SECOND,
      self->client_clock_freq);

  /*
     GST_DEBUG_OBJECT (self, "devpos = %" G_GUINT64_FORMAT
     " frequency = %" G_GUINT64_FORMAT
     " result = %" G_GUINT64_FORMAT " ms",
     devpos, self->client_clock_freq, GST_TIME_AS_MSECONDS (result));
   */

  return result;
}
示例#2
0
static double get_device_delay(struct wasapi_state *state) {
    UINT64 sample_count = atomic_load(&state->sample_count);
    UINT64 position, qpc_position;
    HRESULT hr;

    switch (hr = IAudioClock_GetPosition(state->pAudioClock, &position, &qpc_position)) {
        case S_OK: case S_FALSE:
            break;
        default:
            MP_ERR(state, "IAudioClock::GetPosition returned %s\n", wasapi_explain_err(hr));
    }

    LARGE_INTEGER qpc_count;
    QueryPerformanceCounter(&qpc_count);
    double qpc_diff = (qpc_count.QuadPart * 1e7 / state->qpc_frequency.QuadPart) - qpc_position;

    position += state->clock_frequency * (uint64_t)(qpc_diff / 1e7);

    /* convert position to the same base as sample_count */
    position = position * state->format.Format.nSamplesPerSec / state->clock_frequency;

    double diff = sample_count - position;
    double delay = diff / state->format.Format.nSamplesPerSec;

    MP_TRACE(state, "device delay: %g samples (%g ms)\n", diff, delay * 1000);

    return delay;
}
示例#3
0
文件: ao_wasapi.c 项目: BILIHUBSU/mpv
static HRESULT get_device_delay(struct wasapi_state *state, double *delay) {
    UINT64 sample_count = atomic_load(&state->sample_count);
    UINT64 position, qpc_position;
    HRESULT hr;

    hr = IAudioClock_GetPosition(state->pAudioClock, &position, &qpc_position);
    /* GetPosition succeeded, but the result may be inaccurate due to the length of the call */
    /* http://msdn.microsoft.com/en-us/library/windows/desktop/dd370889%28v=vs.85%29.aspx */
    if (hr == S_FALSE) {
        MP_DBG(state, "Possibly inaccurate device position.\n");
        hr = S_OK;
    }
    EXIT_ON_ERROR(hr);

    LARGE_INTEGER qpc_count;
    QueryPerformanceCounter(&qpc_count);
    double qpc_diff = (qpc_count.QuadPart * 1e7 / state->qpc_frequency.QuadPart) - qpc_position;

    position += state->clock_frequency * (uint64_t) (qpc_diff / 1e7);

    /* convert position to the same base as sample_count */
    position = position * state->format.Format.nSamplesPerSec / state->clock_frequency;

    double diff = sample_count - position;
    *delay = diff / state->format.Format.nSamplesPerSec;

    MP_TRACE(state, "Device delay: %g samples (%g ms)\n", diff, *delay * 1000);

    return S_OK;
exit_label:
    MP_ERR(state, "Error getting device delay: %s\n", mp_HRESULT_to_str(hr));
    return hr;
}
示例#4
0
文件: ao_wasapi.c 项目: ThreeGe/mpv
static HRESULT get_device_delay(struct wasapi_state *state, double *delay_us) {
    UINT64 sample_count = atomic_load(&state->sample_count);
    UINT64 position, qpc_position;
    HRESULT hr;

    hr = IAudioClock_GetPosition(state->pAudioClock, &position, &qpc_position);
    // GetPosition succeeded, but the result may be
    // inaccurate due to the length of the call
    // http://msdn.microsoft.com/en-us/library/windows/desktop/dd370889%28v=vs.85%29.aspx
    if (hr == S_FALSE) {
        MP_VERBOSE(state, "Possibly inaccurate device position.\n");
        hr = S_OK;
    }
    EXIT_ON_ERROR(hr);

    // convert position to number of samples careful to avoid overflow
    UINT64 sample_position = uint64_scale(position,
                                          state->format.Format.nSamplesPerSec,
                                          state->clock_frequency);
    INT64 diff = sample_count - sample_position;
    *delay_us = diff * 1e6 / state->format.Format.nSamplesPerSec;

    // Correct for any delay in IAudioClock_GetPosition above.
    // This should normally be very small (<1 us), but just in case. . .
    LARGE_INTEGER qpc;
    QueryPerformanceCounter(&qpc);
    INT64 qpc_diff = av_rescale(qpc.QuadPart, 10000000, state->qpc_frequency.QuadPart)
                     - qpc_position;
    // ignore the above calculation if it yeilds more than 10 seconds (due to
    // possible overflow inside IAudioClock_GetPosition)
    if (qpc_diff < 10 * 10000000) {
        *delay_us -= qpc_diff / 10.0; // convert to us
    } else {
        MP_VERBOSE(state, "Insane qpc delay correction of %g seconds. "
                   "Ignoring it.\n", qpc_diff / 10000000.0);
    }

    MP_TRACE(state, "Device delay: %g us\n", *delay_us);

    return S_OK;
exit_label:
    MP_ERR(state, "Error getting device delay: %s\n", mp_HRESULT_to_str(hr));
    return hr;
}
示例#5
0
文件: render.c 项目: diosmosis/wine
static void test_clock(void)
{
    HRESULT hr;
    IAudioClient *ac;
    IAudioClock *acl;
    IAudioRenderClient *arc;
    UINT64 freq, pos, pcpos, last;
    BYTE *data;
    WAVEFORMATEX *pwfx;

    hr = IMMDevice_Activate(dev, &IID_IAudioClient, CLSCTX_INPROC_SERVER,
            NULL, (void**)&ac);
    ok(hr == S_OK, "Activation failed with %08x\n", hr);
    if(hr != S_OK)
        return;

    hr = IAudioClient_GetMixFormat(ac, &pwfx);
    ok(hr == S_OK, "GetMixFormat failed: %08x\n", hr);
    if(hr != S_OK)
        return;

    hr = IAudioClient_Initialize(ac, AUDCLNT_SHAREMODE_SHARED,
            0, 5000000, 0, pwfx, NULL);
    ok(hr == S_OK, "Initialize failed: %08x\n", hr);

    hr = IAudioClient_GetService(ac, &IID_IAudioClock, (void**)&acl);
    ok(hr == S_OK, "GetService(IAudioClock) failed: %08x\n", hr);

    hr = IAudioClock_GetFrequency(acl, &freq);
    ok(hr == S_OK, "GetFrequency failed: %08x\n", hr);

    hr = IAudioClock_GetPosition(acl, NULL, NULL);
    ok(hr == E_POINTER, "GetPosition wrong error: %08x\n", hr);

    pcpos = 0;
    hr = IAudioClock_GetPosition(acl, &pos, &pcpos);
    ok(hr == S_OK, "GetPosition failed: %08x\n", hr);
    ok(pos == 0, "GetPosition returned non-zero pos before being started\n");
    ok(pcpos != 0, "GetPosition returned zero pcpos\n");
    last = pos;

    hr = IAudioClient_GetService(ac, &IID_IAudioRenderClient, (void**)&arc);
    ok(hr == S_OK, "GetService(IAudioRenderClient) failed: %08x\n", hr);

    hr = IAudioRenderClient_GetBuffer(arc, pwfx->nSamplesPerSec / 2., &data);
    ok(hr == S_OK, "GetBuffer failed: %08x\n", hr);

    hr = IAudioRenderClient_ReleaseBuffer(arc, pwfx->nSamplesPerSec / 2., AUDCLNT_BUFFERFLAGS_SILENT);
    ok(hr == S_OK, "ReleaseBuffer failed: %08x\n", hr);

    hr = IAudioClock_GetPosition(acl, &pos, NULL);
    ok(hr == S_OK, "GetPosition failed: %08x\n", hr);
    ok(pos == 0, "GetPosition returned non-zero pos before being started\n");

    hr = IAudioClient_Start(ac);
    ok(hr == S_OK, "Start failed: %08x\n", hr);

    Sleep(100);

    hr = IAudioClock_GetPosition(acl, &pos, NULL);
    ok(hr == S_OK, "GetPosition failed: %08x\n", hr);
    ok(pos > 0, "Position should have been further along...\n");
    last = pos;

    hr = IAudioClient_Stop(ac);
    ok(hr == S_OK, "Stop failed: %08x\n", hr);

    hr = IAudioClock_GetPosition(acl, &pos, NULL);
    ok(hr == S_OK, "GetPosition failed: %08x\n", hr);
    ok(pos >= last, "Position should have been further along...\n");
    last = pos;

    hr = IAudioClient_Start(ac);
    ok(hr == S_OK, "Start failed: %08x\n", hr);

    Sleep(100);

    hr = IAudioClient_Stop(ac);
    ok(hr == S_OK, "Stop failed: %08x\n", hr);

    hr = IAudioClock_GetPosition(acl, &pos, NULL);
    ok(hr == S_OK, "GetPosition failed: %08x\n", hr);
    ok(pos >= last, "Position should have been further along...\n");
    last = pos;

    hr = IAudioClock_GetPosition(acl, &pos, NULL);
    ok(hr == S_OK, "GetPosition failed: %08x\n", hr);
    ok(pos == last, "Position should have been further along...\n");

    hr = IAudioClient_Reset(ac);
    ok(hr == S_OK, "Reset failed: %08x\n", hr);

    hr = IAudioClock_GetPosition(acl, &pos, NULL);
    ok(hr == S_OK, "GetPosition failed: %08x\n", hr);
    ok(pos == 0, "GetPosition returned non-zero pos after Reset\n");
    last = pos;

    hr = IAudioRenderClient_GetBuffer(arc, pwfx->nSamplesPerSec / 2., &data);
    ok(hr == S_OK, "GetBuffer failed: %08x\n", hr);

    hr = IAudioRenderClient_ReleaseBuffer(arc, pwfx->nSamplesPerSec / 2., AUDCLNT_BUFFERFLAGS_SILENT);
    ok(hr == S_OK, "ReleaseBuffer failed: %08x\n", hr);

    hr = IAudioClock_GetPosition(acl, &pos, NULL);
    ok(hr == S_OK, "GetPosition failed: %08x\n", hr);
    ok(pos == 0, "GetPosition returned non-zero pos after Reset\n");
    last = pos;

    hr = IAudioClient_Start(ac);
    ok(hr == S_OK, "Start failed: %08x\n", hr);

    Sleep(100);

    hr = IAudioClock_GetPosition(acl, &pos, NULL);
    ok(hr == S_OK, "GetPosition failed: %08x\n", hr);
    ok(pos > last, "Position should have been further along...\n");

    hr = IAudioClient_Stop(ac);
    ok(hr == S_OK, "Stop failed: %08x\n", hr);

    hr = IAudioClock_GetPosition(acl, &pos, NULL);
    ok(hr == S_OK, "GetPosition failed: %08x\n", hr);
    ok(pos >= last, "Position should have been further along...\n");

    IAudioClock_Release(acl);
    IAudioClient_Release(ac);
}
示例#6
0
文件: wasapi.c 项目: eduardovra/vlc
static void Play(audio_output_t *aout, block_t *block)
{
    aout_sys_t *sys = aout->sys;
    HRESULT hr;

    Enter();
    if (likely(sys->clock != NULL))
    {
        UINT64 pos, qpcpos;

        IAudioClock_GetPosition(sys->clock, &pos, &qpcpos);
        qpcpos = (qpcpos + 5) / 10; /* 100ns -> 1µs */
        /* NOTE: this assumes mdate() uses QPC() (which it currently does). */
        aout_TimeReport(aout, qpcpos);
    }

    for (;;)
    {
        UINT32 frames;
        hr = IAudioClient_GetCurrentPadding(sys->client, &frames);
        if (FAILED(hr))
        {
            msg_Err(aout, "cannot get current padding (error 0x%lx)", hr);
            break;
        }

        assert(frames <= sys->frames);
        frames = sys->frames - frames;
        if (frames > block->i_nb_samples)
            frames = block->i_nb_samples;

        BYTE *dst;
        hr = IAudioRenderClient_GetBuffer(sys->render, frames, &dst);
        if (FAILED(hr))
        {
            msg_Err(aout, "cannot get buffer (error 0x%lx)", hr);
            break;
        }

        const size_t copy = frames * (size_t)aout->format.i_bytes_per_frame;

        memcpy(dst, block->p_buffer, copy);
        hr = IAudioRenderClient_ReleaseBuffer(sys->render, frames, 0);
        if (FAILED(hr))
        {
            msg_Err(aout, "cannot release buffer (error 0x%lx)", hr);
            break;
        }
        IAudioClient_Start(sys->client);

        block->p_buffer += copy;
        block->i_buffer -= copy;
        block->i_nb_samples -= frames;
        if (block->i_nb_samples == 0)
            break; /* done */

        /* Out of buffer space, sleep */
        msleep(AOUT_MIN_PREPARE_TIME
             + block->i_nb_samples * CLOCK_FREQ / aout->format.i_rate);
    }

    Leave();
    block_Release(block);
}
示例#7
0
文件: mixer.c 项目: AmesianX/RosWine
/**
 * Perform mixing for a Direct Sound device. That is, go through all the
 * secondary buffers (the sound bites currently playing) and mix them in
 * to the primary buffer (the device buffer).
 */
static void DSOUND_PerformMix(DirectSoundDevice *device)
{
	UINT64 clock_pos, clock_freq, pos_bytes;
	UINT delta_frags;
	HRESULT hr;

	TRACE("(%p)\n", device);

	/* **** */
	EnterCriticalSection(&device->mixlock);

	hr = IAudioClock_GetFrequency(device->clock, &clock_freq);
	if(FAILED(hr)){
		WARN("GetFrequency failed: %08x\n", hr);
        LeaveCriticalSection(&device->mixlock);
		return;
	}

	hr = IAudioClock_GetPosition(device->clock, &clock_pos, NULL);
	if(FAILED(hr)){
		WARN("GetCurrentPadding failed: %08x\n", hr);
        LeaveCriticalSection(&device->mixlock);
		return;
	}

	pos_bytes = (clock_pos * device->pwfx->nSamplesPerSec * device->pwfx->nBlockAlign) / clock_freq;

	delta_frags = (pos_bytes - device->last_pos_bytes) / device->fraglen;
	if(delta_frags > 0){
		device->pwplay += delta_frags;
		device->pwplay %= device->helfrags;
		device->pwqueue -= delta_frags;
		device->last_pos_bytes = pos_bytes - (pos_bytes % device->fraglen);
	}

	if (device->priolevel != DSSCL_WRITEPRIMARY) {
		BOOL recover = FALSE, all_stopped = FALSE;
		DWORD playpos, writepos, writelead, maxq, frag, prebuff_max, prebuff_left, size1, size2, mixplaypos, mixplaypos2;
		LPVOID buf1, buf2;
		int nfiller;

		/* the sound of silence */
		nfiller = device->pwfx->wBitsPerSample == 8 ? 128 : 0;

		/* get the position in the primary buffer */
		if (DSOUND_PrimaryGetPosition(device, &playpos, &writepos) != 0){
			LeaveCriticalSection(&(device->mixlock));
			return;
		}

		TRACE("primary playpos=%d, writepos=%d, clrpos=%d, mixpos=%d, buflen=%d\n",
			playpos,writepos,device->playpos,device->mixpos,device->buflen);
		assert(device->playpos < device->buflen);

		mixplaypos = DSOUND_bufpos_to_mixpos(device, device->playpos);
		mixplaypos2 = DSOUND_bufpos_to_mixpos(device, playpos);

		/* calc maximum prebuff */
		prebuff_max = (device->prebuf * device->fraglen);
		if (playpos + prebuff_max >= device->helfrags * device->fraglen)
			prebuff_max += device->buflen - device->helfrags * device->fraglen;

		/* check how close we are to an underrun. It occurs when the writepos overtakes the mixpos */
		prebuff_left = DSOUND_BufPtrDiff(device->buflen, device->mixpos, playpos);
		writelead = DSOUND_BufPtrDiff(device->buflen, writepos, playpos);

		/* check for underrun. underrun occurs when the write position passes the mix position
		 * also wipe out just-played sound data */
		if((prebuff_left > prebuff_max) || (device->state == STATE_STOPPED) || (device->state == STATE_STARTING)){
			if (device->state == STATE_STOPPING || device->state == STATE_PLAYING)
				WARN("Probable buffer underrun\n");
			else TRACE("Buffer starting or buffer underrun\n");

			/* recover mixing for all buffers */
			recover = TRUE;

			/* reset mix position to write position */
			device->mixpos = writepos;

			ZeroMemory(device->mix_buffer, device->mix_buffer_len);
			ZeroMemory(device->buffer, device->buflen);
		} else if (playpos < device->playpos) {
			buf1 = device->buffer + device->playpos;
			buf2 = device->buffer;
			size1 = device->buflen - device->playpos;
			size2 = playpos;
			FillMemory(device->mix_buffer + mixplaypos, device->mix_buffer_len - mixplaypos, 0);
			FillMemory(device->mix_buffer, mixplaypos2, 0);
			FillMemory(buf1, size1, nfiller);
			if (playpos && (!buf2 || !size2))
				FIXME("%d: (%d, %d)=>(%d, %d) There should be an additional buffer here!!\n", __LINE__, device->playpos, device->mixpos, playpos, writepos);
			FillMemory(buf2, size2, nfiller);
		} else {
			buf1 = device->buffer + device->playpos;
			buf2 = NULL;
			size1 = playpos - device->playpos;
			size2 = 0;
			FillMemory(device->mix_buffer + mixplaypos, mixplaypos2 - mixplaypos, 0);
			FillMemory(buf1, size1, nfiller);
		}
		device->playpos = playpos;

		/* find the maximum we can prebuffer from current write position */
		maxq = (writelead < prebuff_max) ? (prebuff_max - writelead) : 0;

		TRACE("prebuff_left = %d, prebuff_max = %dx%d=%d, writelead=%d\n",
			prebuff_left, device->prebuf, device->fraglen, prebuff_max, writelead);

		/* do the mixing */
		frag = DSOUND_MixToPrimary(device, writepos, maxq, recover, &all_stopped);

		if (frag + writepos > device->buflen)
		{
			DWORD todo = device->buflen - writepos;
			device->normfunction(device->mix_buffer + DSOUND_bufpos_to_mixpos(device, writepos), device->buffer + writepos, todo);
			device->normfunction(device->mix_buffer, device->buffer, frag - todo);
		}
		else
			device->normfunction(device->mix_buffer + DSOUND_bufpos_to_mixpos(device, writepos), device->buffer + writepos, frag);

		/* update the mix position, taking wrap-around into account */
		device->mixpos = writepos + frag;
		device->mixpos %= device->buflen;

		/* update prebuff left */
		prebuff_left = DSOUND_BufPtrDiff(device->buflen, device->mixpos, playpos);

		/* check if have a whole fragment */
		if (prebuff_left >= device->fraglen){

			/* update the wave queue */
			DSOUND_WaveQueue(device, FALSE);

			/* buffers are full. start playing if applicable */
			if(device->state == STATE_STARTING){
				TRACE("started primary buffer\n");
				if(DSOUND_PrimaryPlay(device) != DS_OK){
					WARN("DSOUND_PrimaryPlay failed\n");
				}
				else{
					/* we are playing now */
					device->state = STATE_PLAYING;
				}
			}

			/* buffers are full. start stopping if applicable */
			if(device->state == STATE_STOPPED){
				TRACE("restarting primary buffer\n");
				if(DSOUND_PrimaryPlay(device) != DS_OK){
					WARN("DSOUND_PrimaryPlay failed\n");
				}
				else{
					/* start stopping again. as soon as there is no more data, it will stop */
					device->state = STATE_STOPPING;
				}
			}
		}

		/* if device was stopping, its for sure stopped when all buffers have stopped */
		else if((all_stopped == TRUE) && (device->state == STATE_STOPPING)){
			TRACE("All buffers have stopped. Stopping primary buffer\n");
			device->state = STATE_STOPPED;

			/* stop the primary buffer now */
			DSOUND_PrimaryStop(device);
		}

	} else {

		DSOUND_WaveQueue(device, TRUE);

		/* in the DSSCL_WRITEPRIMARY mode, the app is totally in charge... */
		if (device->state == STATE_STARTING) {
			if (DSOUND_PrimaryPlay(device) != DS_OK)
				WARN("DSOUND_PrimaryPlay failed\n");
			else
				device->state = STATE_PLAYING;
		}
		else if (device->state == STATE_STOPPING) {
			if (DSOUND_PrimaryStop(device) != DS_OK)
				WARN("DSOUND_PrimaryStop failed\n");
			else
				device->state = STATE_STOPPED;
		}
	}

	LeaveCriticalSection(&(device->mixlock));
	/* **** */
}