/* Called from main context */ static void adjust_rates(struct userdata *u) { size_t buffer, fs; uint32_t old_rate, base_rate, new_rate; pa_usec_t buffer_latency; pa_assert(u); pa_assert_ctl_context(); pa_asyncmsgq_send(u->source_output->source->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT, NULL, 0, NULL); pa_asyncmsgq_send(u->sink_input->sink->asyncmsgq, PA_MSGOBJECT(u->sink_input), SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT, NULL, 0, NULL); buffer = u->latency_snapshot.sink_input_buffer + u->latency_snapshot.source_output_buffer; if (u->latency_snapshot.recv_counter <= u->latency_snapshot.send_counter) buffer += (size_t) (u->latency_snapshot.send_counter - u->latency_snapshot.recv_counter); else buffer += PA_CLIP_SUB(buffer, (size_t) (u->latency_snapshot.recv_counter - u->latency_snapshot.send_counter)); buffer_latency = pa_bytes_to_usec(buffer, &u->sink_input->sample_spec); pa_log_debug("Loopback overall latency is %0.2f ms + %0.2f ms + %0.2f ms = %0.2f ms", (double) u->latency_snapshot.sink_latency / PA_USEC_PER_MSEC, (double) buffer_latency / PA_USEC_PER_MSEC, (double) u->latency_snapshot.source_latency / PA_USEC_PER_MSEC, ((double) u->latency_snapshot.sink_latency + buffer_latency + u->latency_snapshot.source_latency) / PA_USEC_PER_MSEC); pa_log_debug("Should buffer %zu bytes, buffered at minimum %zu bytes", u->latency_snapshot.max_request*2, u->latency_snapshot.min_memblockq_length); fs = pa_frame_size(&u->sink_input->sample_spec); old_rate = u->sink_input->sample_spec.rate; base_rate = u->source_output->sample_spec.rate; if (u->latency_snapshot.min_memblockq_length < u->latency_snapshot.max_request*2) new_rate = base_rate - (((u->latency_snapshot.max_request*2 - u->latency_snapshot.min_memblockq_length) / fs) *PA_USEC_PER_SEC)/u->adjust_time; else new_rate = base_rate + (((u->latency_snapshot.min_memblockq_length - u->latency_snapshot.max_request*2) / fs) *PA_USEC_PER_SEC)/u->adjust_time; if (new_rate < (uint32_t) (base_rate*0.8) || new_rate > (uint32_t) (base_rate*1.25)) { pa_log_warn("Sample rates too different, not adjusting (%u vs. %u).", base_rate, new_rate); new_rate = base_rate; } else { if (base_rate < new_rate + 20 && new_rate < base_rate + 20) new_rate = base_rate; /* Do the adjustment in small steps; 2‰ can be considered inaudible */ if (new_rate < (uint32_t) (old_rate*0.998) || new_rate > (uint32_t) (old_rate*1.002)) { pa_log_info("New rate of %u Hz not within 2‰ of %u Hz, forcing smaller adjustment", new_rate, old_rate); new_rate = PA_CLAMP(new_rate, (uint32_t) (old_rate*0.998), (uint32_t) (old_rate*1.002)); } } pa_sink_input_set_rate(u->sink_input, new_rate); pa_log_debug("[%s] Updated sampling rate to %lu Hz.", u->sink_input->sink->name, (unsigned long) new_rate); pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time); }
/* Called from main context */ static void adjust_rates(struct userdata *u) { size_t buffer; uint32_t old_rate, base_rate, new_rate; int32_t latency_difference; pa_usec_t current_buffer_latency, snapshot_delay, current_source_sink_latency, current_latency, latency_at_optimum_rate; pa_usec_t final_latency; pa_assert(u); pa_assert_ctl_context(); /* Rates and latencies*/ old_rate = u->sink_input->sample_spec.rate; base_rate = u->source_output->sample_spec.rate; buffer = u->latency_snapshot.sink_input_buffer; if (u->latency_snapshot.recv_counter <= u->latency_snapshot.send_counter) buffer += (size_t) (u->latency_snapshot.send_counter - u->latency_snapshot.recv_counter); else buffer = PA_CLIP_SUB(buffer, (size_t) (u->latency_snapshot.recv_counter - u->latency_snapshot.send_counter)); current_buffer_latency = pa_bytes_to_usec(buffer, &u->sink_input->sample_spec); snapshot_delay = u->latency_snapshot.source_timestamp - u->latency_snapshot.sink_timestamp; current_source_sink_latency = u->latency_snapshot.sink_latency + u->latency_snapshot.source_latency - snapshot_delay; /* Current latency */ current_latency = current_source_sink_latency + current_buffer_latency; /* Latency at base rate */ latency_at_optimum_rate = current_source_sink_latency + current_buffer_latency * old_rate / base_rate; final_latency = u->latency; latency_difference = (int32_t)((int64_t)latency_at_optimum_rate - final_latency); pa_log_debug("Loopback overall latency is %0.2f ms + %0.2f ms + %0.2f ms = %0.2f ms", (double) u->latency_snapshot.sink_latency / PA_USEC_PER_MSEC, (double) current_buffer_latency / PA_USEC_PER_MSEC, (double) u->latency_snapshot.source_latency / PA_USEC_PER_MSEC, (double) current_latency / PA_USEC_PER_MSEC); pa_log_debug("Loopback latency at base rate is %0.2f ms", (double)latency_at_optimum_rate / PA_USEC_PER_MSEC); /* Calculate new rate */ new_rate = rate_controller(base_rate, u->adjust_time, latency_difference); /* Set rate */ pa_sink_input_set_rate(u->sink_input, new_rate); pa_log_debug("[%s] Updated sampling rate to %lu Hz.", u->sink_input->sink->name, (unsigned long) new_rate); }
/* Called from output thread context */ static void source_output_state_change_cb(pa_source_output *o, pa_source_output_state_t state) { struct userdata *u; pa_source_output_assert_ref(o); pa_source_output_assert_io_context(o); pa_assert_se(u = o->userdata); if (PA_SOURCE_OUTPUT_IS_LINKED(state) && o->thread_info.state == PA_SOURCE_OUTPUT_INIT) { u->skip = pa_usec_to_bytes(PA_CLIP_SUB(pa_source_get_latency_within_thread(o->source), u->latency), &o->sample_spec); pa_log_info("Skipping %lu bytes", (unsigned long) u->skip); } }
/* Called from main context */ static void adjust_rates(struct userdata *u) { size_t buffer, fs; uint32_t old_rate, base_rate, new_rate; pa_usec_t buffer_latency; pa_assert(u); pa_assert_ctl_context(); pa_asyncmsgq_send(u->source_output->source->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT, NULL, 0, NULL); pa_asyncmsgq_send(u->sink_input->sink->asyncmsgq, PA_MSGOBJECT(u->sink_input), SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT, NULL, 0, NULL); buffer = u->latency_snapshot.sink_input_buffer + u->latency_snapshot.source_output_buffer; if (u->latency_snapshot.recv_counter <= u->latency_snapshot.send_counter) buffer += (size_t) (u->latency_snapshot.send_counter - u->latency_snapshot.recv_counter); else buffer += PA_CLIP_SUB(buffer, (size_t) (u->latency_snapshot.recv_counter - u->latency_snapshot.send_counter)); buffer_latency = pa_bytes_to_usec(buffer, &u->sink_input->sample_spec); pa_log_info("Loopback overall latency is %0.2f ms + %0.2f ms + %0.2f ms = %0.2f ms", (double) u->latency_snapshot.sink_latency / PA_USEC_PER_MSEC, (double) buffer_latency / PA_USEC_PER_MSEC, (double) u->latency_snapshot.source_latency / PA_USEC_PER_MSEC, ((double) u->latency_snapshot.sink_latency + buffer_latency + u->latency_snapshot.source_latency) / PA_USEC_PER_MSEC); pa_log_info("Should buffer %zu bytes, buffered at minimum %zu bytes", u->latency_snapshot.max_request*2, u->latency_snapshot.min_memblockq_length); fs = pa_frame_size(&u->sink_input->sample_spec); old_rate = u->sink_input->sample_spec.rate; base_rate = u->source_output->sample_spec.rate; if (u->latency_snapshot.min_memblockq_length < u->latency_snapshot.max_request*2) new_rate = base_rate - (((u->latency_snapshot.max_request*2 - u->latency_snapshot.min_memblockq_length) / fs) *PA_USEC_PER_SEC)/u->adjust_time; else new_rate = base_rate + (((u->latency_snapshot.min_memblockq_length - u->latency_snapshot.max_request*2) / fs) *PA_USEC_PER_SEC)/u->adjust_time; pa_log_info("Old rate %lu Hz, new rate %lu Hz", (unsigned long) old_rate, (unsigned long) new_rate); pa_sink_input_set_rate(u->sink_input, new_rate); pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time); }