/* Called from I/O thread context */ static int sink_process_msg_cb(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SINK(o)->userdata; switch (code) { case PA_SINK_MESSAGE_GET_LATENCY: /* The sink is _put() before the sink input is, so let's * make sure we don't access it in that time. Also, the * sink input is first shut down, the sink second. */ if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) || !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state)) { *((pa_usec_t*) data) = 0; return 0; } *((pa_usec_t*) data) = /* Get the latency of the master sink */ pa_sink_get_latency_within_thread(u->sink_input->sink) + /* Add the latency internal to our sink input on top */ pa_bytes_to_usec(pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq), &u->sink_input->sink->sample_spec); return 0; } return pa_sink_process_msg(o, code, data, offset, chunk); }
/* Called from main context */ static int sink_set_state_cb(pa_sink *s, pa_sink_state_t state) { struct userdata *u; pa_sink_assert_ref(s); pa_assert_se(u = s->userdata); if (!PA_SINK_IS_LINKED(state)) { return 0; } if (state == PA_SINK_RUNNING) { /* need to wake-up source if it was suspended */ pa_source_suspend(u->source, FALSE, PA_SUSPEND_ALL); /* FIXME: if there's no client connected, the source will suspend and playback will be stuck. You'd want to prevent the source from sleeping when the uplink sink is active; even if the audio is discarded at least the app isn't stuck */ } else { /* nothing to do, if the sink becomes idle or suspended let module-suspend-idle handle the sources later */ } return 0; }
static int sink_process_msg_cb(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SINK(o)->userdata; switch (code) { case PA_SINK_MESSAGE_GET_LATENCY: { int negative; pa_usec_t remote_latency; if (!PA_SINK_IS_LINKED(u->sink->thread_info.state)) { *((pa_usec_t*) data) = 0; return 0; } if (!u->stream) { *((pa_usec_t*) data) = 0; return 0; } if (pa_stream_get_state(u->stream) != PA_STREAM_READY) { *((pa_usec_t*) data) = 0; return 0; } if (pa_stream_get_latency(u->stream, &remote_latency, &negative) < 0) { *((pa_usec_t*) data) = 0; return 0; } *((pa_usec_t*) data) = remote_latency; return 0; } case PA_SINK_MESSAGE_SET_STATE: if (!u->stream || pa_stream_get_state(u->stream) != PA_STREAM_READY) break; switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) { case PA_SINK_SUSPENDED: { cork_stream(u, true); break; } case PA_SINK_IDLE: case PA_SINK_RUNNING: { cork_stream(u, false); break; } case PA_SINK_INVALID_STATE: case PA_SINK_INIT: case PA_SINK_UNLINKED: break; } break; } return pa_sink_process_msg(o, code, data, offset, chunk); }
/* Called from I/O thread context */ static void sink_input_detach_cb(pa_sink_input *i) { struct userdata *u; pa_sink_input_assert_ref(i); pa_assert_se(u = i->userdata); if (PA_SINK_IS_LINKED(u->sink->thread_info.state)) pa_sink_detach_within_thread(u->sink); pa_sink_set_rtpoll(u->sink, NULL); }
/* Called from main context */ static void sink_set_mute_cb(pa_sink *s) { struct userdata *u; pa_sink_assert_ref(s); pa_assert_se(u = s->userdata); if (!PA_SINK_IS_LINKED(pa_sink_get_state(s)) || !PA_SINK_INPUT_IS_LINKED(pa_sink_input_get_state(u->sink_input))) return; pa_sink_input_set_mute(u->sink_input, s->muted, s->save_muted); }
/* Called from main context */ static void sink_set_volume_cb(pa_sink *s) { struct userdata *u; pa_sink_assert_ref(s); pa_assert_se(u = s->userdata); if (!PA_SINK_IS_LINKED(pa_sink_get_state(s)) || !PA_SINK_INPUT_IS_LINKED(pa_sink_input_get_state(u->sink_input))) return; pa_sink_input_set_volume(u->sink_input, &s->real_volume, s->save_volume, TRUE); }
/* Called from I/O thread context */ static void sink_request_rewind(pa_sink *s) { struct userdata *u; pa_sink_assert_ref(s); pa_assert_se(u = s->userdata); if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) || !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state)) return; pa_sink_input_request_rewind(u->sink_input, s->thread_info.rewind_nbytes, TRUE, FALSE, FALSE); }
/* Called from main context */ static int sink_set_state_cb(pa_sink *s, pa_sink_state_t state) { struct userdata *u; pa_sink_assert_ref(s); pa_assert_se(u = s->userdata); if (!PA_SINK_IS_LINKED(state) || !PA_SINK_INPUT_IS_LINKED(pa_sink_input_get_state(u->sink_input))) return 0; pa_sink_input_cork(u->sink_input, state == PA_SINK_SUSPENDED); return 0; }
/* Called from I/O thread context */ static void sink_request_rewind_cb(pa_sink *s) { struct userdata *u; pa_sink_assert_ref(s); pa_assert_se(u = s->userdata); if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) || !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state)) return; /* Just hand this one over to the master sink */ pa_sink_input_request_rewind(u->sink_input, s->thread_info.rewind_nbytes + pa_memblockq_get_length(u->memblockq), TRUE, FALSE, FALSE); }
/* Called from I/O thread context */ static void sink_update_requested_latency_cb(pa_sink *s) { struct userdata *u; pa_sink_assert_ref(s); pa_assert_se(u = s->userdata); if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) || !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state)) return; /* Just hand this one over to the master sink */ pa_sink_input_set_requested_latency_within_thread( u->sink_input, pa_sink_get_requested_latency_within_thread(s)); }
/* Called from I/O thread context */ static int sink_input_pop_cb(pa_sink_input *i, size_t nbytes, pa_memchunk *chunk) { struct userdata *u; pa_sink_input_assert_ref(i); pa_assert(chunk); pa_assert_se(u = i->userdata); if (!PA_SINK_IS_LINKED(u->sink->thread_info.state)) return -1; /* Hmm, process any rewind request that might be queued up */ pa_sink_process_rewind(u->sink, 0); pa_sink_render(u->sink, nbytes, chunk); return 0; }
/* Called from I/O thread context */ static void sink_input_attach_cb(pa_sink_input *i) { struct userdata *u; pa_sink_input_assert_ref(i); pa_assert_se(u = i->userdata); pa_sink_set_rtpoll(u->sink, i->sink->thread_info.rtpoll); pa_sink_set_latency_range_within_thread(u->sink, i->sink->thread_info.min_latency, i->sink->thread_info.max_latency); pa_sink_set_fixed_latency_within_thread(u->sink, i->sink->thread_info.fixed_latency); pa_sink_set_max_request_within_thread(u->sink, pa_sink_input_get_max_request(i)); /* FIXME: Too small max_rewind: * https://bugs.freedesktop.org/show_bug.cgi?id=53709 */ pa_sink_set_max_rewind_within_thread(u->sink, pa_sink_input_get_max_rewind(i)); if (PA_SINK_IS_LINKED(u->sink->thread_info.state)) pa_sink_attach_within_thread(u->sink); }
/* Called from I/O thread context */ static void sink_input_process_rewind_cb(pa_sink_input *i, size_t nbytes) { size_t amount = 0; struct userdata *u; pa_sink_input_assert_ref(i); pa_assert_se(u = i->userdata); /* If the sink is not yet linked, there is nothing to rewind */ if (!PA_SINK_IS_LINKED(u->sink->thread_info.state)) return; if (u->sink->thread_info.rewind_nbytes > 0) { amount = PA_MIN(u->sink->thread_info.rewind_nbytes, nbytes); u->sink->thread_info.rewind_nbytes = 0; } pa_sink_process_rewind(u->sink, amount); }
/* Called from I/O thread context */ static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SINK(o)->userdata; switch (code) { case PA_SINK_MESSAGE_GET_LATENCY: /* The sink is _put() before the sink input is, so let's * make sure we don't access it yet */ if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) || !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state)) { *((int64_t*) data) = 0; return 0; } *((int64_t*) data) = /* Get the latency of the master sink */ pa_sink_get_latency_within_thread(u->sink_input->sink, true) + /* Add the latency internal to our sink input on top */ pa_bytes_to_usec(pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq), &u->sink_input->sink->sample_spec); return 0; case PA_SINK_MESSAGE_SET_STATE: { pa_sink_state_t new_state = (pa_sink_state_t) PA_PTR_TO_UINT(data); /* When set to running or idle for the first time, request a rewind * of the master sink to make sure we are heard immediately */ if ((new_state == PA_SINK_IDLE || new_state == PA_SINK_RUNNING) && u->sink->thread_info.state == PA_SINK_INIT) { pa_log_debug("Requesting rewind due to state change."); pa_sink_input_request_rewind(u->sink_input, 0, false, true, true); } break; } } return pa_sink_process_msg(o, code, data, offset, chunk); }
static void thread_func(void *userdata) { struct userdata *u = userdata; pa_proplist *proplist; pa_assert(u); pa_log_debug("Thread starting up"); pa_thread_mq_install(u->thread_mq); proplist = tunnel_new_proplist(u); u->context = pa_context_new_with_proplist(u->thread_mainloop_api, "PulseAudio", proplist); pa_proplist_free(proplist); if (!u->context) { pa_log("Failed to create libpulse context"); goto fail; } if (u->cookie_file && pa_context_load_cookie_from_file(u->context, u->cookie_file) != 0) { pa_log_error("Can not load cookie file!"); goto fail; } pa_context_set_state_callback(u->context, context_state_cb, u); if (pa_context_connect(u->context, u->remote_server, PA_CONTEXT_NOAUTOSPAWN, NULL) < 0) { pa_log("Failed to connect libpulse context"); goto fail; } for (;;) { int ret; if (pa_mainloop_iterate(u->thread_mainloop, 1, &ret) < 0) { if (ret == 0) goto finish; else goto fail; } if (PA_UNLIKELY(u->sink->thread_info.rewind_requested)) pa_sink_process_rewind(u->sink, 0); if (u->connected && pa_stream_get_state(u->stream) == PA_STREAM_READY && PA_SINK_IS_LINKED(u->sink->thread_info.state)) { size_t writable; writable = pa_stream_writable_size(u->stream); if (writable > 0) { if(u->transcode.encoding != -1) { pa_memchunk memchunk; const void *p; size_t nbBytes; unsigned char *cbits; pa_sink_render_full(u->sink, u->transcode.frame_size*u->transcode.channels*u->transcode.sample_size, &memchunk); pa_assert(memchunk.length > 0); pa_assert(memchunk.length >= u->transcode.frame_size*u->transcode.channels); pa_log_debug("received memchunk length: %zu bytes", memchunk.length ); /* we have new data to write */ p = pa_memblock_acquire(memchunk.memblock); nbBytes = pa_transcode_encode(&u->transcode, (uint8_t*) p + memchunk.index, &cbits); pa_log_debug("encoded length: %zu bytes", nbBytes); /* TODO: Use pa_stream_begin_write() to reduce copying. */ ret = pa_stream_write_compressed(u->stream, (uint8_t*) cbits, nbBytes, NULL, /**< A cleanup routine for the data or NULL to request an internal copy */ 0, /** offset */ PA_SEEK_RELATIVE, u->transcode.frame_size*u->transcode.channels*u->transcode.sample_size); pa_memblock_release(memchunk.memblock); pa_memblock_unref(memchunk.memblock); if(nbBytes > 0) free(cbits); if (ret != 0) { pa_log_error("Could not write data into the stream ... ret = %i", ret); u->thread_mainloop_api->quit(u->thread_mainloop_api, TUNNEL_THREAD_FAILED_MAINLOOP); } } else { pa_memchunk memchunk; const void *p; pa_sink_render_full(u->sink, writable, &memchunk); pa_assert(memchunk.length > 0); /* we have new data to write */ p = pa_memblock_acquire(memchunk.memblock); /* TODO: Use pa_stream_begin_write() to reduce copying. */ ret = pa_stream_write(u->stream, (uint8_t*) p + memchunk.index, memchunk.length, NULL, /**< A cleanup routine for the data or NULL to request an internal copy */ 0, /** offset */ PA_SEEK_RELATIVE); pa_memblock_release(memchunk.memblock); pa_memblock_unref(memchunk.memblock); if (ret != 0) { pa_log_error("Could not write data into the stream ... ret = %i", ret); u->thread_mainloop_api->quit(u->thread_mainloop_api, TUNNEL_THREAD_FAILED_MAINLOOP); } } } } } fail: pa_asyncmsgq_post(u->thread_mq->outq, PA_MSGOBJECT(u->module->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL); pa_asyncmsgq_wait_for(u->thread_mq->inq, PA_MESSAGE_SHUTDOWN); finish: if (u->stream) { pa_stream_disconnect(u->stream); pa_stream_unref(u->stream); u->stream = NULL; } if (u->context) { pa_context_disconnect(u->context); pa_context_unref(u->context); u->context = NULL; } pa_log_debug("Thread shutting down"); }
static void do_write(struct userdata *u) { uint32_t free_frags; pa_memchunk memchunk; WAVEHDR *hdr; MMRESULT res; void *p; if (!u->sink) return; if (!PA_SINK_IS_LINKED(u->sink->state)) return; EnterCriticalSection(&u->crit); free_frags = u->free_ofrags; LeaveCriticalSection(&u->crit); if (!u->sink_underflow && (free_frags == u->fragments)) pa_log_debug("WaveOut underflow!"); while (free_frags) { hdr = &u->ohdrs[u->cur_ohdr]; if (hdr->dwFlags & WHDR_PREPARED) waveOutUnprepareHeader(u->hwo, hdr, sizeof(WAVEHDR)); hdr->dwBufferLength = 0; while (hdr->dwBufferLength < u->fragment_size) { size_t len; len = u->fragment_size - hdr->dwBufferLength; pa_sink_render(u->sink, len, &memchunk); pa_assert(memchunk.memblock); pa_assert(memchunk.length); if (memchunk.length < len) len = memchunk.length; p = pa_memblock_acquire(memchunk.memblock); memcpy(hdr->lpData + hdr->dwBufferLength, (char*) p + memchunk.index, len); pa_memblock_release(memchunk.memblock); hdr->dwBufferLength += len; pa_memblock_unref(memchunk.memblock); memchunk.memblock = NULL; } /* Underflow detection */ if (hdr->dwBufferLength == 0) { u->sink_underflow = 1; break; } u->sink_underflow = 0; res = waveOutPrepareHeader(u->hwo, hdr, sizeof(WAVEHDR)); if (res != MMSYSERR_NOERROR) pa_log_error("Unable to prepare waveOut block: %d", res); res = waveOutWrite(u->hwo, hdr, sizeof(WAVEHDR)); if (res != MMSYSERR_NOERROR) pa_log_error("Unable to write waveOut block: %d", res); u->written_bytes += hdr->dwBufferLength; EnterCriticalSection(&u->crit); u->free_ofrags--; LeaveCriticalSection(&u->crit); free_frags--; u->cur_ohdr++; u->cur_ohdr %= u->fragments; } }