static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SOURCE(o)->userdata; int err; audio_info_t info; switch (code) { case PA_SOURCE_MESSAGE_GET_LATENCY: { pa_usec_t r = 0; if (u->fd) { err = ioctl(u->fd, AUDIO_GETINFO, &info); pa_assert(err >= 0); r += pa_bytes_to_usec(info.record.samples * u->frame_size, &PA_SOURCE(o)->sample_spec); r -= pa_bytes_to_usec(u->read_bytes, &PA_SOURCE(o)->sample_spec); } *((pa_usec_t*) data) = r; return 0; } case PA_SOURCE_MESSAGE_SET_VOLUME: if (u->fd >= 0) { AUDIO_INITINFO(&info); info.record.gain = pa_cvolume_avg((pa_cvolume*) data) * AUDIO_MAX_GAIN / PA_VOLUME_NORM; assert(info.record.gain <= AUDIO_MAX_GAIN); if (ioctl(u->fd, AUDIO_SETINFO, &info) < 0) { if (errno == EINVAL) pa_log("AUDIO_SETINFO: Unsupported volume."); else pa_log("AUDIO_SETINFO: %s", pa_cstrerror(errno)); } else { return 0; } } break; case PA_SOURCE_MESSAGE_GET_VOLUME: if (u->fd >= 0) { err = ioctl(u->fd, AUDIO_GETINFO, &info); pa_assert(err >= 0); pa_cvolume_set((pa_cvolume*) data, ((pa_cvolume*) data)->channels, info.record.gain * PA_VOLUME_NORM / AUDIO_MAX_GAIN); return 0; } break; } return pa_source_process_msg(o, code, data, offset, chunk); }
// Return the current latency in seconds static float get_delay(struct ao *ao) { /* This code basically does what pa_stream_get_latency() _should_ * do, but doesn't due to multiple known bugs in PulseAudio (at * PulseAudio version 2.1). In particular, the timing interpolation * mode (PA_STREAM_INTERPOLATE_TIMING) can return completely bogus * values, and the non-interpolating code has a bug causing too * large results at end of stream (so a stream never seems to finish). * This code can still return wrong values in some cases due to known * PulseAudio bugs that can not be worked around on the client side. * * We always query the server for latest timing info. This may take * too long to work well with remote audio servers, but at least * this should be enough to fix the normal local playback case. */ struct priv *priv = ao->priv; pa_threaded_mainloop_lock(priv->mainloop); if (!waitop(priv, pa_stream_update_timing_info(priv->stream, NULL, NULL))) { GENERIC_ERR_MSG(priv->context, "pa_stream_update_timing_info() failed"); return 0; } pa_threaded_mainloop_lock(priv->mainloop); const pa_timing_info *ti = pa_stream_get_timing_info(priv->stream); if (!ti) { pa_threaded_mainloop_unlock(priv->mainloop); GENERIC_ERR_MSG(priv->context, "pa_stream_get_timing_info() failed"); return 0; } const struct pa_sample_spec *ss = pa_stream_get_sample_spec(priv->stream); if (!ss) { pa_threaded_mainloop_unlock(priv->mainloop); GENERIC_ERR_MSG(priv->context, "pa_stream_get_sample_spec() failed"); return 0; } // data left in PulseAudio's main buffers (not written to sink yet) int64_t latency = pa_bytes_to_usec(ti->write_index - ti->read_index, ss); // since this info may be from a while ago, playback has progressed since latency -= ti->transport_usec; // data already moved from buffers to sink, but not played yet int64_t sink_latency = ti->sink_usec; if (!ti->playing) /* At the end of a stream, part of the data "left" in the sink may * be padding silence after the end; that should be subtracted to * get the amount of real audio from our stream. This adjustment * is missing from Pulseaudio's own get_latency calculations * (as of PulseAudio 2.1). */ sink_latency -= pa_bytes_to_usec(ti->since_underrun, ss); if (sink_latency > 0) latency += sink_latency; if (latency < 0) latency = 0; pa_threaded_mainloop_unlock(priv->mainloop); return latency / 1e6; }
static pa_usec_t sink_get_latency(struct userdata *u, pa_sample_spec *ss) { pa_usec_t r = 0; pa_assert(u); pa_assert(ss); if (u->fd >= 0) { r = pa_bytes_to_usec(get_playback_buffered_bytes(u), ss); if (u->memchunk.memblock) r += pa_bytes_to_usec(u->memchunk.length, ss); } return r; }
static pa_usec_t source_get_latency(struct userdata *u, pa_sample_spec *ss) { pa_usec_t r = 0; audio_info_t info; pa_assert(u); pa_assert(ss); if (u->fd) { int err = ioctl(u->fd, AUDIO_GETINFO, &info); pa_assert(err >= 0); r = pa_bytes_to_usec(get_recorded_bytes(u), ss) - pa_bytes_to_usec(u->read_bytes, ss); } return r; }
static uint64_t get_playback_buffered_bytes(struct userdata *u) { audio_info_t info; uint64_t played_bytes; int err; pa_assert(u->sink); err = ioctl(u->fd, AUDIO_GETINFO, &info); pa_assert(err >= 0); /* Handle wrap-around of the device's sample counter, which is a uint_32. */ if (u->prev_playback_samples > info.play.samples) { /* * Unfortunately info.play.samples can sometimes go backwards, even before it wraps! * The bug seems to be absent on Solaris x86 nv117 with audio810 driver, at least on this (UP) machine. * The bug is present on a different (SMP) machine running Solaris x86 nv103 with audioens driver. * An earlier revision of this file mentions the same bug independently (unknown configuration). */ if (u->prev_playback_samples + info.play.samples < 240000) { ++u->play_samples_msw; } else { pa_log_debug("play.samples went backwards %d bytes", u->prev_playback_samples - info.play.samples); } } u->prev_playback_samples = info.play.samples; played_bytes = (((uint64_t)u->play_samples_msw << 32) + info.play.samples) * u->frame_size; pa_smoother_put(u->smoother, pa_rtclock_now(), pa_bytes_to_usec(played_bytes, &u->sink->sample_spec)); if (u->written_bytes > played_bytes) return u->written_bytes - played_bytes; else return 0; }
static void inputStreamStateCallback(pa_stream *stream, void *userdata) { Q_UNUSED(userdata); pa_stream_state_t state = pa_stream_get_state(stream); #ifdef DEBUG_PULSE qDebug() << "Stream state: " << QPulseAudioInternal::stateToQString(state); #endif switch (state) { case PA_STREAM_CREATING: break; case PA_STREAM_READY: { #ifdef DEBUG_PULSE QPulseAudioInput *audioInput = static_cast<QPulseAudioInput*>(userdata); const pa_buffer_attr *buffer_attr = pa_stream_get_buffer_attr(stream); qDebug() << "*** maxlength: " << buffer_attr->maxlength; qDebug() << "*** prebuf: " << buffer_attr->prebuf; qDebug() << "*** fragsize: " << buffer_attr->fragsize; qDebug() << "*** minreq: " << buffer_attr->minreq; qDebug() << "*** tlength: " << buffer_attr->tlength; pa_sample_spec spec = QPulseAudioInternal::audioFormatToSampleSpec(audioInput->format()); qDebug() << "*** bytes_to_usec: " << pa_bytes_to_usec(buffer_attr->fragsize, &spec); #endif } break; case PA_STREAM_TERMINATED: break; case PA_STREAM_FAILED: default: qWarning() << QString("Stream error: %1").arg(pa_strerror(pa_context_errno(pa_stream_get_context(stream)))); QPulseAudioEngine *pulseEngine = QPulseAudioEngine::instance(); pa_threaded_mainloop_signal(pulseEngine->mainloop(), 0); break; } }
qint64 QPulseAudioInput::processedUSecs() const { pa_sample_spec spec = QPulseAudioInternal::audioFormatToSampleSpec(m_format); qint64 result = pa_bytes_to_usec(m_totalTimeValue, &spec); return result; }
static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SINK(o)->userdata; switch (code) { case PA_SINK_MESSAGE_GET_LATENCY: { pa_usec_t w, r; r = pa_smoother_get(u->smoother, pa_rtclock_now()); w = pa_bytes_to_usec((uint64_t) u->offset + u->memchunk.length, &u->sink->sample_spec); *((int64_t*) data) = (int64_t)w - r; return 0; } case SINK_MESSAGE_PASS_SOCKET: { struct pollfd *pollfd; pa_assert(!u->rtpoll_item); u->rtpoll_item = pa_rtpoll_item_new(u->rtpoll, PA_RTPOLL_NEVER, 1); pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL); pollfd->fd = u->fd; pollfd->events = pollfd->revents = 0; return 0; } } return pa_sink_process_msg(o, code, data, offset, chunk); }
/* Called from I/O thread context */ static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SINK(o)->userdata; switch (code) { case PA_SINK_MESSAGE_GET_LATENCY: /* The sink is _put() before the sink input is, so let's * make sure we don't access it yet */ if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) || !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state)) { *((pa_usec_t*) data) = 0; return 0; } *((pa_usec_t*) data) = /* Get the latency of the master sink */ pa_sink_get_latency_within_thread(u->sink_input->sink) + /* Add the latency internal to our sink input on top */ pa_bytes_to_usec(pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq), &u->sink_input->sink->sample_spec); return 0; } return pa_sink_process_msg(o, code, data, offset, chunk); }
static int sink_process_msg(pa_msgobject * o, int code, void *data, int64_t offset, pa_memchunk * chunk) { int r; struct userdata *u = PA_SINK(o)->userdata; int state; switch (code) { case PA_SINK_MESSAGE_SET_STATE: state = PA_PTR_TO_UINT(data); r = pa_sink_process_msg(o, code, data, offset, chunk); if (r >= 0) { pa_log("sink cork req state =%d, now state=%d\n", state, (int) (u->sink->state)); } return r; case PA_SINK_MESSAGE_GET_LATENCY: { size_t n = 0; n += u->memchunk_sink.length; *((pa_usec_t *) data) = pa_bytes_to_usec(n, &u->sink->sample_spec); return 0; } } return pa_sink_process_msg(o, code, data, offset, chunk); }
static int source_process_msg( pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SOURCE(o)->userdata; switch (code) { case PA_SOURCE_MESSAGE_GET_LATENCY: { size_t n = 0; #ifdef FIONREAD int l; if (ioctl(u->fd, FIONREAD, &l) >= 0 && l > 0) n = (size_t) l; #endif *((int64_t*) data) = pa_bytes_to_usec(n, &u->source->sample_spec); return 0; } } return pa_source_process_msg(o, code, data, offset, chunk); }
static void process_render(struct userdata *u, pa_usec_t now) { pa_memchunk chunk; int request_bytes; //int index; pa_assert(u); if (u->got_max_latency) { return; } //index = 0; while (u->timestamp < now + u->block_usec) { //index++; //if (index > 3) { /* used when u->block_usec and u->sink->thread_info.max_request get big using got_max_latency now */ // return; //} request_bytes = u->sink->thread_info.max_request; request_bytes = MIN(request_bytes, 16 * 1024); pa_sink_render(u->sink, request_bytes, &chunk); //pa_log("bytes %d index %d", chunk.length, index); data_send(u, &chunk); pa_memblock_unref(chunk.memblock); u->timestamp += pa_bytes_to_usec(chunk.length, &u->sink->sample_spec); } }
/* Called from I/O thread context */ static int source_process_msg_cb(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SOURCE(o)->userdata; switch (code) { case PA_SOURCE_MESSAGE_GET_LATENCY: /* The source is _put() before the source output is, so let's * make sure we don't access it in that time. Also, the * source output is first shut down, the source second. */ if (!PA_SOURCE_IS_LINKED(u->source->thread_info.state) || !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->thread_info.state)) { *((pa_usec_t*) data) = 0; return 0; } *((pa_usec_t*) data) = /* Get the latency of the master source */ pa_source_get_latency_within_thread(u->source_output->source) + /* Add the latency internal to our source output on top */ pa_bytes_to_usec(pa_memblockq_get_length(u->source_output->thread_info.delay_memblockq), &u->source_output->source->sample_spec); return 0; } return pa_source_process_msg(o, code, data, offset, chunk); }
static void process_rewind(struct userdata *u, pa_usec_t now) { size_t rewind_nbytes, in_buffer; pa_usec_t delay; pa_assert(u); rewind_nbytes = u->sink->thread_info.rewind_nbytes; if (!PA_SINK_IS_OPENED(u->sink->thread_info.state) || rewind_nbytes <= 0) goto do_nothing; pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes); if (u->timestamp <= now) goto do_nothing; delay = u->timestamp - now; in_buffer = pa_usec_to_bytes(delay, &u->sink->sample_spec); if (in_buffer <= 0) goto do_nothing; if (rewind_nbytes > in_buffer) rewind_nbytes = in_buffer; pa_sink_process_rewind(u->sink, rewind_nbytes); u->timestamp -= pa_bytes_to_usec(rewind_nbytes, &u->sink->sample_spec); pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes); return; do_nothing: pa_sink_process_rewind(u->sink, 0); }
/* Called from I/O thread context */ static int voip_source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SOURCE(o)->userdata; switch (code) { case VOICE_SOURCE_SET_UL_DEADLINE: { u->ul_deadline = offset; pa_log_debug("Uplink deadline set to %lld (%lld usec from now)", u->ul_deadline, u->ul_deadline - pa_rtclock_now()); return 0; } case PA_SOURCE_MESSAGE_GET_LATENCY: { pa_usec_t usec = 0; if (PA_MSGOBJECT(u->master_source)->process_msg( PA_MSGOBJECT(u->master_source), PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0) usec = 0; usec += pa_bytes_to_usec(pa_memblockq_get_length(u->ul_memblockq), &u->aep_sample_spec); *((pa_usec_t*) data) = usec; return 0; } } return pa_source_process_msg(o, code, data, offset, chunk); }
static void process_render(struct userdata *u, pa_usec_t now) { size_t ate = 0; pa_assert(u); /* This is the configured latency. Sink inputs connected to us might not have a single frame more than the maxrequest value queued. Hence: at maximum read this many bytes from the sink inputs. */ /* Fill the buffer up the latency size */ while (u->timestamp < now + u->block_usec) { pa_memchunk chunk; pa_sink_render(u->sink, u->sink->thread_info.max_request, &chunk); pa_memblock_unref(chunk.memblock); /* pa_log_debug("Ate %lu bytes.", (unsigned long) chunk.length); */ u->timestamp += pa_bytes_to_usec(chunk.length, &u->sink->sample_spec); ate += chunk.length; if (ate >= u->sink->thread_info.max_request) break; } /* pa_log_debug("Ate in sum %lu bytes (of %lu)", (unsigned long) ate, (unsigned long) nbytes); */ }
static int thread_read(struct userdata *u) { void *p; ssize_t readd; pa_memchunk chunk; chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) u->buffer_size); p = pa_memblock_acquire(chunk.memblock); readd = u->stream->read(u->stream, (uint8_t*) p, pa_memblock_get_length(chunk.memblock)); pa_memblock_release(chunk.memblock); if (readd < 0) { pa_log("Failed to read from stream. (err %i)", readd); goto end; } u->timestamp += pa_bytes_to_usec(readd, &u->source->sample_spec); chunk.index = 0; chunk.length = readd; if (chunk.length > 0) pa_source_post(u->source, &chunk); end: pa_memblock_unref(chunk.memblock); return 0; }
/* Called from main context */ static void adjust_rates(struct userdata *u) { size_t buffer, fs; uint32_t old_rate, base_rate, new_rate; pa_usec_t buffer_latency; pa_assert(u); pa_assert_ctl_context(); pa_asyncmsgq_send(u->source_output->source->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT, NULL, 0, NULL); pa_asyncmsgq_send(u->sink_input->sink->asyncmsgq, PA_MSGOBJECT(u->sink_input), SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT, NULL, 0, NULL); buffer = u->latency_snapshot.sink_input_buffer + u->latency_snapshot.source_output_buffer; if (u->latency_snapshot.recv_counter <= u->latency_snapshot.send_counter) buffer += (size_t) (u->latency_snapshot.send_counter - u->latency_snapshot.recv_counter); else buffer += PA_CLIP_SUB(buffer, (size_t) (u->latency_snapshot.recv_counter - u->latency_snapshot.send_counter)); buffer_latency = pa_bytes_to_usec(buffer, &u->sink_input->sample_spec); pa_log_debug("Loopback overall latency is %0.2f ms + %0.2f ms + %0.2f ms = %0.2f ms", (double) u->latency_snapshot.sink_latency / PA_USEC_PER_MSEC, (double) buffer_latency / PA_USEC_PER_MSEC, (double) u->latency_snapshot.source_latency / PA_USEC_PER_MSEC, ((double) u->latency_snapshot.sink_latency + buffer_latency + u->latency_snapshot.source_latency) / PA_USEC_PER_MSEC); pa_log_debug("Should buffer %zu bytes, buffered at minimum %zu bytes", u->latency_snapshot.max_request*2, u->latency_snapshot.min_memblockq_length); fs = pa_frame_size(&u->sink_input->sample_spec); old_rate = u->sink_input->sample_spec.rate; base_rate = u->source_output->sample_spec.rate; if (u->latency_snapshot.min_memblockq_length < u->latency_snapshot.max_request*2) new_rate = base_rate - (((u->latency_snapshot.max_request*2 - u->latency_snapshot.min_memblockq_length) / fs) *PA_USEC_PER_SEC)/u->adjust_time; else new_rate = base_rate + (((u->latency_snapshot.min_memblockq_length - u->latency_snapshot.max_request*2) / fs) *PA_USEC_PER_SEC)/u->adjust_time; if (new_rate < (uint32_t) (base_rate*0.8) || new_rate > (uint32_t) (base_rate*1.25)) { pa_log_warn("Sample rates too different, not adjusting (%u vs. %u).", base_rate, new_rate); new_rate = base_rate; } else { if (base_rate < new_rate + 20 && new_rate < base_rate + 20) new_rate = base_rate; /* Do the adjustment in small steps; 2‰ can be considered inaudible */ if (new_rate < (uint32_t) (old_rate*0.998) || new_rate > (uint32_t) (old_rate*1.002)) { pa_log_info("New rate of %u Hz not within 2‰ of %u Hz, forcing smaller adjustment", new_rate, old_rate); new_rate = PA_CLAMP(new_rate, (uint32_t) (old_rate*0.998), (uint32_t) (old_rate*1.002)); } } pa_sink_input_set_rate(u->sink_input, new_rate); pa_log_debug("[%s] Updated sampling rate to %lu Hz.", u->sink_input->sink->name, (unsigned long) new_rate); pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time); }
static pa_usec_t io_sink_get_latency(struct userdata *u) { pa_usec_t r = 0; pa_assert(u); if (u->use_getodelay) { int arg; #if defined(__NetBSD__) && !defined(SNDCTL_DSP_GETODELAY) #if defined(AUDIO_GETBUFINFO) struct audio_info info; if (syscall(SYS_ioctl, u->fd, AUDIO_GETBUFINFO, &info) < 0) { pa_log_info("Device doesn't support AUDIO_GETBUFINFO: %s", pa_cstrerror(errno)); u->use_getodelay = 0; } else { arg = info.play.seek + info.blocksize / 2; r = pa_bytes_to_usec((size_t) arg, &u->sink->sample_spec); } #else pa_log_info("System doesn't support AUDIO_GETBUFINFO"); u->use_getodelay = 0; #endif #else if (ioctl(u->fd, SNDCTL_DSP_GETODELAY, &arg) < 0) { pa_log_info("Device doesn't support SNDCTL_DSP_GETODELAY: %s", pa_cstrerror(errno)); u->use_getodelay = 0; } else r = pa_bytes_to_usec((size_t) arg, &u->sink->sample_spec); #endif } if (!u->use_getodelay && u->use_getospace) { struct audio_buf_info info; if (ioctl(u->fd, SNDCTL_DSP_GETOSPACE, &info) < 0) { pa_log_info("Device doesn't support SNDCTL_DSP_GETOSPACE: %s", pa_cstrerror(errno)); u->use_getospace = 0; } else r = pa_bytes_to_usec((size_t) info.bytes, &u->sink->sample_spec); } if (u->memchunk.memblock) r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec); return r; }
static pa_usec_t sink_get_latency(struct userdata *u) { uint32_t free_frags; MMTIME mmt; pa_assert(u); pa_assert(u->sink); memset(&mmt, 0, sizeof(mmt)); mmt.wType = TIME_BYTES; if (waveOutGetPosition(u->hwo, &mmt, sizeof(mmt)) == MMSYSERR_NOERROR) return pa_bytes_to_usec(u->written_bytes - mmt.u.cb, &u->sink->sample_spec); else { EnterCriticalSection(&u->crit); free_frags = u->free_ofrags; LeaveCriticalSection(&u->crit); return pa_bytes_to_usec((u->fragments - free_frags) * u->fragment_size, &u->sink->sample_spec); } }
/* Called from main context */ static pa_usec_t source_output_get_latency_cb(pa_source_output *o) { connection*c; pa_source_output_assert_ref(o); c = CONNECTION(o->userdata); pa_assert(c); return pa_bytes_to_usec(pa_memblockq_get_length(c->output_memblockq), &c->source_output->sample_spec); }
static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SINK(o)->userdata; switch (code) { case PA_SINK_MESSAGE_SET_STATE: switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) { case PA_SINK_SUSPENDED: pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state)); pa_smoother_pause(u->smoother, pa_rtclock_now()); break; case PA_SINK_IDLE: case PA_SINK_RUNNING: if (u->sink->thread_info.state == PA_SINK_SUSPENDED) pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE); break; case PA_SINK_UNLINKED: case PA_SINK_INIT: case PA_SINK_INVALID_STATE: ; } break; case PA_SINK_MESSAGE_GET_LATENCY: { pa_usec_t w, r; r = pa_smoother_get(u->smoother, pa_rtclock_now()); w = pa_bytes_to_usec((uint64_t) u->offset + u->memchunk.length, &u->sink->sample_spec); *((pa_usec_t*) data) = w > r ? w - r : 0; return 0; } case SINK_MESSAGE_PASS_SOCKET: { struct pollfd *pollfd; pa_assert(!u->rtpoll_item); u->rtpoll_item = pa_rtpoll_item_new(u->rtpoll, PA_RTPOLL_NEVER, 1); pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL); pollfd->fd = u->fd; pollfd->events = pollfd->revents = 0; return 0; } } return pa_sink_process_msg(o, code, data, offset, chunk); }
static pa_usec_t source_get_latency(struct userdata *u) { pa_usec_t r = 0; uint32_t free_frags; pa_assert(u); pa_assert(u->source); EnterCriticalSection(&u->crit); free_frags = u->free_ifrags; LeaveCriticalSection(&u->crit); r += pa_bytes_to_usec((free_frags + 1) * u->fragment_size, &u->source->sample_spec); return r; }
/* Called from I/O thread context */ static int sink_input_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct session *s = PA_SINK_INPUT(o)->userdata; switch (code) { case PA_SINK_INPUT_MESSAGE_GET_LATENCY: *((pa_usec_t*) data) = pa_bytes_to_usec(pa_memblockq_get_length(s->memblockq), &s->sink_input->sample_spec); /* Fall through, the default handler will add in the extra * latency added by the resampler */ break; } return pa_sink_input_process_msg(o, code, data, offset, chunk); }
/* Called from I/O thread context */ static int source_output_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u; pa_assert_se(u = PA_SOURCE_OUTPUT(o)->userdata); switch (code) { case PA_SOURCE_OUTPUT_MESSAGE_GET_LATENCY: *((pa_usec_t*) data) = pa_bytes_to_usec(pa_memblockq_get_length(u->memblockq), &u->source_output->sample_spec); /* Fall through, the default handler will add in the extra * latency added by the resampler */ break; } return pa_source_output_process_msg(o, code, data, offset, chunk); }
/* Called from main context */ static void adjust_rates(struct userdata *u) { size_t buffer; uint32_t old_rate, base_rate, new_rate; int32_t latency_difference; pa_usec_t current_buffer_latency, snapshot_delay, current_source_sink_latency, current_latency, latency_at_optimum_rate; pa_usec_t final_latency; pa_assert(u); pa_assert_ctl_context(); /* Rates and latencies*/ old_rate = u->sink_input->sample_spec.rate; base_rate = u->source_output->sample_spec.rate; buffer = u->latency_snapshot.sink_input_buffer; if (u->latency_snapshot.recv_counter <= u->latency_snapshot.send_counter) buffer += (size_t) (u->latency_snapshot.send_counter - u->latency_snapshot.recv_counter); else buffer = PA_CLIP_SUB(buffer, (size_t) (u->latency_snapshot.recv_counter - u->latency_snapshot.send_counter)); current_buffer_latency = pa_bytes_to_usec(buffer, &u->sink_input->sample_spec); snapshot_delay = u->latency_snapshot.source_timestamp - u->latency_snapshot.sink_timestamp; current_source_sink_latency = u->latency_snapshot.sink_latency + u->latency_snapshot.source_latency - snapshot_delay; /* Current latency */ current_latency = current_source_sink_latency + current_buffer_latency; /* Latency at base rate */ latency_at_optimum_rate = current_source_sink_latency + current_buffer_latency * old_rate / base_rate; final_latency = u->latency; latency_difference = (int32_t)((int64_t)latency_at_optimum_rate - final_latency); pa_log_debug("Loopback overall latency is %0.2f ms + %0.2f ms + %0.2f ms = %0.2f ms", (double) u->latency_snapshot.sink_latency / PA_USEC_PER_MSEC, (double) current_buffer_latency / PA_USEC_PER_MSEC, (double) u->latency_snapshot.source_latency / PA_USEC_PER_MSEC, (double) current_latency / PA_USEC_PER_MSEC); pa_log_debug("Loopback latency at base rate is %0.2f ms", (double)latency_at_optimum_rate / PA_USEC_PER_MSEC); /* Calculate new rate */ new_rate = rate_controller(base_rate, u->adjust_time, latency_difference); /* Set rate */ pa_sink_input_set_rate(u->sink_input, new_rate); pa_log_debug("[%s] Updated sampling rate to %lu Hz.", u->sink_input->sink->name, (unsigned long) new_rate); }
static int source_process_msg(pa_msgobject * o, int code, void *data, int64_t offset, pa_memchunk * chunk) { int r; struct userdata *u = PA_SOURCE(o)->userdata; int state; switch (code) { case PA_SOURCE_MESSAGE_SET_STATE: state = PA_PTR_TO_UINT(data); r = pa_source_process_msg(o, code, data, offset, chunk); if (r >= 0) { pa_log("source cork req state =%d, now state=%d\n", state, (int) (u->source->state)); uint32_t cmd = 0; if (u->source->state != PA_SOURCE_RUNNING && state == PA_SOURCE_RUNNING) cmd = QUBES_PA_SOURCE_START_CMD; else if (u->source->state == PA_SOURCE_RUNNING && state != PA_SOURCE_RUNNING) cmd = QUBES_PA_SOURCE_STOP_CMD; if (cmd != 0) { if (libvchan_send(u->rec_ctrl, (char*)&cmd, sizeof(cmd)) < 0) { pa_log("vchan: failed to send record cmd"); /* This is a problem in case of enabling recording, in case * of QUBES_PA_SOURCE_STOP_CMD it can happen that remote end * is already disconnected, so indeed will not send further data. * This can happen for example when we terminate the * process because of pacat in dom0 has disconnected. */ if (state == PA_SOURCE_RUNNING) return -1; else return r; } } } return r; case PA_SOURCE_MESSAGE_GET_LATENCY: { size_t n = 0; n += u->memchunk_source.length; *((pa_usec_t *) data) = pa_bytes_to_usec(n, &u->source->sample_spec); return 0; } } return pa_source_process_msg(o, code, data, offset, chunk); }
/* Called from main context */ static void adjust_rates(struct userdata *u) { size_t buffer, fs; uint32_t old_rate, base_rate, new_rate; pa_usec_t buffer_latency; pa_assert(u); pa_assert_ctl_context(); pa_asyncmsgq_send(u->source_output->source->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT, NULL, 0, NULL); pa_asyncmsgq_send(u->sink_input->sink->asyncmsgq, PA_MSGOBJECT(u->sink_input), SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT, NULL, 0, NULL); buffer = u->latency_snapshot.sink_input_buffer + u->latency_snapshot.source_output_buffer; if (u->latency_snapshot.recv_counter <= u->latency_snapshot.send_counter) buffer += (size_t) (u->latency_snapshot.send_counter - u->latency_snapshot.recv_counter); else buffer += PA_CLIP_SUB(buffer, (size_t) (u->latency_snapshot.recv_counter - u->latency_snapshot.send_counter)); buffer_latency = pa_bytes_to_usec(buffer, &u->sink_input->sample_spec); pa_log_info("Loopback overall latency is %0.2f ms + %0.2f ms + %0.2f ms = %0.2f ms", (double) u->latency_snapshot.sink_latency / PA_USEC_PER_MSEC, (double) buffer_latency / PA_USEC_PER_MSEC, (double) u->latency_snapshot.source_latency / PA_USEC_PER_MSEC, ((double) u->latency_snapshot.sink_latency + buffer_latency + u->latency_snapshot.source_latency) / PA_USEC_PER_MSEC); pa_log_info("Should buffer %zu bytes, buffered at minimum %zu bytes", u->latency_snapshot.max_request*2, u->latency_snapshot.min_memblockq_length); fs = pa_frame_size(&u->sink_input->sample_spec); old_rate = u->sink_input->sample_spec.rate; base_rate = u->source_output->sample_spec.rate; if (u->latency_snapshot.min_memblockq_length < u->latency_snapshot.max_request*2) new_rate = base_rate - (((u->latency_snapshot.max_request*2 - u->latency_snapshot.min_memblockq_length) / fs) *PA_USEC_PER_SEC)/u->adjust_time; else new_rate = base_rate + (((u->latency_snapshot.min_memblockq_length - u->latency_snapshot.max_request*2) / fs) *PA_USEC_PER_SEC)/u->adjust_time; pa_log_info("Old rate %lu Hz, new rate %lu Hz", (unsigned long) old_rate, (unsigned long) new_rate); pa_sink_input_set_rate(u->sink_input, new_rate); pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time); }
static void thread_func(void *userdata) { struct userdata *u = userdata; pa_assert(u); pa_log_debug("Thread starting up"); pa_thread_mq_install(&u->thread_mq); pa_rtpoll_install(u->rtpoll); pa_rtclock_get(&u->timestamp); for (;;) { int ret; /* Render some data and drop it immediately */ if (u->sink->thread_info.state == PA_SINK_RUNNING) { struct timeval now; pa_rtclock_get(&now); if (pa_timeval_cmp(&u->timestamp, &now) <= 0) { pa_sink_skip(u->sink, u->block_size); pa_timeval_add(&u->timestamp, pa_bytes_to_usec(u->block_size, &u->sink->sample_spec)); } pa_rtpoll_set_timer_absolute(u->rtpoll, &u->timestamp); } else pa_rtpoll_set_timer_disabled(u->rtpoll); /* Hmm, nothing to do. Let's sleep */ if ((ret = pa_rtpoll_run(u->rtpoll, 1)) < 0) goto fail; if (ret == 0) goto finish; } fail: /* If this was no regular exit from the loop we have to continue * processing messages until we received PA_MESSAGE_SHUTDOWN */ pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL); pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN); finish: pa_log_debug("Thread shutting down"); }
static pa_usec_t io_source_get_latency(struct userdata *u) { pa_usec_t r = 0; pa_assert(u); if (u->use_getispace) { struct audio_buf_info info; if (ioctl(u->fd, SNDCTL_DSP_GETISPACE, &info) < 0) { pa_log_info("Device doesn't support SNDCTL_DSP_GETISPACE: %s", pa_cstrerror(errno)); u->use_getispace = 0; } else r = pa_bytes_to_usec((size_t) info.bytes, &u->source->sample_spec); } return r; }