static int sink_process_msg( pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SINK(o)->userdata; switch (code) { case PA_SINK_MESSAGE_SET_STATE: if (pa_sink_get_state(u->sink) == PA_SINK_SUSPENDED || pa_sink_get_state(u->sink) == PA_SINK_INIT) { if (PA_PTR_TO_UINT(data) == PA_SINK_RUNNING || PA_PTR_TO_UINT(data) == PA_SINK_IDLE) u->timestamp = pa_rtclock_now(); } break; case PA_SINK_MESSAGE_GET_LATENCY: { pa_usec_t now; now = pa_rtclock_now(); *((int64_t*) data) = (int64_t)u->timestamp - (int64_t)now; return 0; } } return pa_sink_process_msg(o, code, data, offset, chunk); }
static int sink_process_msg(pa_msgobject * o, int code, void *data, int64_t offset, pa_memchunk * chunk) { int r; struct userdata *u = PA_SINK(o)->userdata; int state; switch (code) { case PA_SINK_MESSAGE_SET_STATE: state = PA_PTR_TO_UINT(data); r = pa_sink_process_msg(o, code, data, offset, chunk); if (r >= 0) { pa_log("sink cork req state =%d, now state=%d\n", state, (int) (u->sink->state)); } return r; case PA_SINK_MESSAGE_GET_LATENCY: { size_t n = 0; n += u->memchunk_sink.length; *((pa_usec_t *) data) = pa_bytes_to_usec(n, &u->sink->sample_spec); return 0; } } return pa_sink_process_msg(o, code, data, offset, chunk); }
static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SINK(o)->userdata; switch (code) { case PA_SINK_MESSAGE_SET_STATE: if (PA_PTR_TO_UINT(data) == PA_SINK_RUNNING) pa_rtclock_get(&u->timestamp); break; case PA_SINK_MESSAGE_GET_LATENCY: { struct timeval now; pa_rtclock_get(&now); if (pa_timeval_cmp(&u->timestamp, &now) > 0) *((pa_usec_t*) data) = 0; else *((pa_usec_t*) data) = pa_timeval_diff(&u->timestamp, &now); break; } } return pa_sink_process_msg(o, code, data, offset, chunk); }
static int source_process_msg( pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SOURCE(o)->userdata; switch (code) { case PA_SOURCE_MESSAGE_SET_STATE: if (PA_PTR_TO_UINT(data) == PA_SOURCE_RUNNING) u->timestamp = pa_rtclock_now(); break; case PA_SOURCE_MESSAGE_GET_LATENCY: { pa_usec_t now, left_to_fill; now = pa_rtclock_now(); left_to_fill = u->timestamp > now ? u->timestamp - now : 0ULL; *((pa_usec_t*) data) = u->block_usec > left_to_fill ? u->block_usec - left_to_fill : 0ULL; return 0; } } return pa_source_process_msg(o, code, data, offset, chunk); }
static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SINK(o)->userdata; switch (code) { case PA_SINK_MESSAGE_SET_STATE: switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) { case PA_SINK_SUSPENDED: pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state)); pa_smoother_pause(u->smoother, pa_rtclock_now()); break; case PA_SINK_IDLE: case PA_SINK_RUNNING: if (u->sink->thread_info.state == PA_SINK_SUSPENDED) pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE); break; case PA_SINK_UNLINKED: case PA_SINK_INIT: case PA_SINK_INVALID_STATE: ; } break; case PA_SINK_MESSAGE_GET_LATENCY: { pa_usec_t w, r; r = pa_smoother_get(u->smoother, pa_rtclock_now()); w = pa_bytes_to_usec((uint64_t) u->offset + u->memchunk.length, &u->sink->sample_spec); *((pa_usec_t*) data) = w > r ? w - r : 0; return 0; } case SINK_MESSAGE_PASS_SOCKET: { struct pollfd *pollfd; pa_assert(!u->rtpoll_item); u->rtpoll_item = pa_rtpoll_item_new(u->rtpoll, PA_RTPOLL_NEVER, 1); pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL); pollfd->fd = u->fd; pollfd->events = pollfd->revents = 0; return 0; } } return pa_sink_process_msg(o, code, data, offset, chunk); }
/* Called from IO thread context, except when it is not */ int pa_source_output_process_msg(pa_msgobject *mo, int code, void *userdata, int64_t offset, pa_memchunk* chunk) { pa_source_output *o = PA_SOURCE_OUTPUT(mo); pa_source_output_assert_ref(o); switch (code) { case PA_SOURCE_OUTPUT_MESSAGE_GET_LATENCY: { pa_usec_t *r = userdata; r[0] += pa_bytes_to_usec(pa_memblockq_get_length(o->thread_info.delay_memblockq), &o->source->sample_spec); r[1] += pa_source_get_latency_within_thread(o->source); return 0; } case PA_SOURCE_OUTPUT_MESSAGE_SET_RATE: o->thread_info.sample_spec.rate = PA_PTR_TO_UINT(userdata); pa_resampler_set_output_rate(o->thread_info.resampler, PA_PTR_TO_UINT(userdata)); return 0; case PA_SOURCE_OUTPUT_MESSAGE_SET_STATE: pa_source_output_set_state_within_thread(o, PA_PTR_TO_UINT(userdata)); return 0; case PA_SOURCE_OUTPUT_MESSAGE_SET_REQUESTED_LATENCY: { pa_usec_t *usec = userdata; *usec = pa_source_output_set_requested_latency_within_thread(o, *usec); return 0; } case PA_SOURCE_OUTPUT_MESSAGE_GET_REQUESTED_LATENCY: { pa_usec_t *r = userdata; *r = o->thread_info.requested_source_latency; return 0; } } return -PA_ERR_NOTIMPLEMENTED; }
static int sink_process_msg_cb(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SINK(o)->userdata; switch (code) { case PA_SINK_MESSAGE_GET_LATENCY: { int negative; pa_usec_t remote_latency; if (!PA_SINK_IS_LINKED(u->sink->thread_info.state)) { *((pa_usec_t*) data) = 0; return 0; } if (!u->stream) { *((pa_usec_t*) data) = 0; return 0; } if (pa_stream_get_state(u->stream) != PA_STREAM_READY) { *((pa_usec_t*) data) = 0; return 0; } if (pa_stream_get_latency(u->stream, &remote_latency, &negative) < 0) { *((pa_usec_t*) data) = 0; return 0; } *((pa_usec_t*) data) = remote_latency; return 0; } case PA_SINK_MESSAGE_SET_STATE: if (!u->stream || pa_stream_get_state(u->stream) != PA_STREAM_READY) break; switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) { case PA_SINK_SUSPENDED: { cork_stream(u, true); break; } case PA_SINK_IDLE: case PA_SINK_RUNNING: { cork_stream(u, false); break; } case PA_SINK_INVALID_STATE: case PA_SINK_INIT: case PA_SINK_UNLINKED: break; } break; } return pa_sink_process_msg(o, code, data, offset, chunk); }
static void work(void *p) { pa_log_notice("CPU%i: Created thread.", PA_PTR_TO_UINT(p)); pa_make_realtime(12); #ifdef HAVE_PTHREAD_SETAFFINITY_NP { #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) cpuset_t mask; #else cpu_set_t mask; #endif CPU_ZERO(&mask); CPU_SET((size_t) PA_PTR_TO_UINT(p), &mask); pa_assert_se(pthread_setaffinity_np(pthread_self(), sizeof(mask), &mask) == 0); } #endif for (;;) { struct timeval now, end; uint64_t usec; pa_log_notice("CPU%i: Sleeping for 1s", PA_PTR_TO_UINT(p)); pa_msleep(1000); usec = (uint64_t) ((((double) rand())*(double)(msec_upper-msec_lower)*PA_USEC_PER_MSEC)/RAND_MAX) + (uint64_t) ((uint64_t) msec_lower*PA_USEC_PER_MSEC); pa_log_notice("CPU%i: Freezing for %ims", PA_PTR_TO_UINT(p), (int) (usec/PA_USEC_PER_MSEC)); pa_rtclock_get(&end); pa_timeval_add(&end, usec); do { pa_rtclock_get(&now); } while (pa_timeval_cmp(&now, &end) < 0); } }
static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SINK(o)->userdata; switch (code) { case PA_SINK_MESSAGE_GET_LATENCY: *((pa_usec_t*) data) = sink_get_latency(u, &PA_SINK(o)->sample_spec); return 0; case PA_SINK_MESSAGE_SET_STATE: switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) { case PA_SINK_SUSPENDED: pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state)); pa_smoother_pause(u->smoother, pa_rtclock_now()); if (!u->source || u->source_suspended) { if (suspend(u) < 0) return -1; } u->sink_suspended = true; break; case PA_SINK_IDLE: case PA_SINK_RUNNING: if (u->sink->thread_info.state == PA_SINK_SUSPENDED) { pa_smoother_resume(u->smoother, pa_rtclock_now(), true); if (!u->source || u->source_suspended) { if (unsuspend(u) < 0) return -1; u->sink->get_volume(u->sink); u->sink->get_mute(u->sink); } u->sink_suspended = false; } break; case PA_SINK_INVALID_STATE: case PA_SINK_UNLINKED: case PA_SINK_INIT: ; } break; } return pa_sink_process_msg(o, code, data, offset, chunk); }
static int source_process_msg(pa_msgobject * o, int code, void *data, int64_t offset, pa_memchunk * chunk) { int r; struct userdata *u = PA_SOURCE(o)->userdata; int state; switch (code) { case PA_SOURCE_MESSAGE_SET_STATE: state = PA_PTR_TO_UINT(data); r = pa_source_process_msg(o, code, data, offset, chunk); if (r >= 0) { pa_log("source cork req state =%d, now state=%d\n", state, (int) (u->source->state)); uint32_t cmd = 0; if (u->source->state != PA_SOURCE_RUNNING && state == PA_SOURCE_RUNNING) cmd = QUBES_PA_SOURCE_START_CMD; else if (u->source->state == PA_SOURCE_RUNNING && state != PA_SOURCE_RUNNING) cmd = QUBES_PA_SOURCE_STOP_CMD; if (cmd != 0) { if (libvchan_send(u->rec_ctrl, (char*)&cmd, sizeof(cmd)) < 0) { pa_log("vchan: failed to send record cmd"); /* This is a problem in case of enabling recording, in case * of QUBES_PA_SOURCE_STOP_CMD it can happen that remote end * is already disconnected, so indeed will not send further data. * This can happen for example when we terminate the * process because of pacat in dom0 has disconnected. */ if (state == PA_SOURCE_RUNNING) return -1; else return r; } } } return r; case PA_SOURCE_MESSAGE_GET_LATENCY: { size_t n = 0; n += u->memchunk_source.length; *((pa_usec_t *) data) = pa_bytes_to_usec(n, &u->source->sample_spec); return 0; } } return pa_source_process_msg(o, code, data, offset, chunk); }
static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct context *context = PA_SINK(o)->userdata; pa_usec_t now; long latency; pa_log_debug("sink_process_msg: code %d", code); switch (code) { case PA_SINK_MESSAGE_SET_VOLUME: /* 3 */ break; case PA_SINK_MESSAGE_SET_MUTE: /* 6 */ break; case PA_SINK_MESSAGE_GET_LATENCY: /* 7 */ now = pa_rtclock_now(); latency = context->timestamp > now ? context->timestamp - now : 0ULL; pa_log_debug("sink_process_msg: latency %ld", latency); *((pa_usec_t*) data) = latency; return 0; case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: /* 8 */ break; case PA_SINK_MESSAGE_SET_STATE: /* 9 */ if (PA_PTR_TO_UINT(data) == PA_SINK_RUNNING) /* 0 */ { pa_log("sink_process_msg: running"); context->timestamp = pa_rtclock_now(); } else { pa_log("sink_process_msg: not running"); close_send(context); } break; } return pa_sink_process_msg(o, code, data, offset, chunk); }
static void cmtspeech_dl_sideinfo_forward(struct userdata *u) { unsigned int spc_flags = 0; pa_assert(u); if (NULL == u->voice_sideinfoq) return; spc_flags = PA_PTR_TO_UINT(pa_queue_pop(u->local_sideinfoq)); if (spc_flags == 0) { pa_log_warn("Local sideinfo queue empty."); spc_flags = VOICE_SIDEINFO_FLAG_BAD|VOICE_SIDEINFO_FLAG_BOGUS; } else if (!u->continuous_dl_stream) spc_flags |= VOICE_SIDEINFO_FLAG_BAD; u->continuous_dl_stream = TRUE; pa_queue_push(u->voice_sideinfoq, PA_UINT_TO_PTR(spc_flags)); }
/* Called from I/O thread context */ static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SINK(o)->userdata; switch (code) { case PA_SINK_MESSAGE_GET_LATENCY: /* The sink is _put() before the sink input is, so let's * make sure we don't access it yet */ if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) || !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state)) { *((int64_t*) data) = 0; return 0; } *((int64_t*) data) = /* Get the latency of the master sink */ pa_sink_get_latency_within_thread(u->sink_input->sink, true) + /* Add the latency internal to our sink input on top */ pa_bytes_to_usec(pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq), &u->sink_input->sink->sample_spec); return 0; case PA_SINK_MESSAGE_SET_STATE: { pa_sink_state_t new_state = (pa_sink_state_t) PA_PTR_TO_UINT(data); /* When set to running or idle for the first time, request a rewind * of the master sink to make sure we are heard immediately */ if ((new_state == PA_SINK_IDLE || new_state == PA_SINK_RUNNING) && u->sink->thread_info.state == PA_SINK_INIT) { pa_log_debug("Requesting rewind due to state change."); pa_sink_input_request_rewind(u->sink_input, 0, false, true, true); } break; } } return pa_sink_process_msg(o, code, data, offset, chunk); }
/* Called from IO context */ static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SOURCE(o)->userdata; switch (code) { case PA_SOURCE_MESSAGE_SET_STATE: { switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) { case PA_SOURCE_SUSPENDED: { int r; pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state)); if ((r = suspend(u)) < 0) return r; break; } case PA_SOURCE_IDLE: break; case PA_SOURCE_RUNNING: { pa_log_info("Resuming..."); u->timestamp = pa_rtclock_now(); break; } /* not needed */ case PA_SOURCE_UNLINKED: case PA_SOURCE_INIT: case PA_SOURCE_INVALID_STATE: ; } break; } } return pa_source_process_msg(o, code, data, offset, chunk); }
static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SOURCE(o)->userdata; int ret; int do_trigger = FALSE, quick = TRUE; switch (code) { case PA_SOURCE_MESSAGE_GET_LATENCY: { pa_usec_t r = 0; if (u->fd >= 0) { if (u->use_mmap) r = mmap_source_get_latency(u); else r = io_source_get_latency(u); } *((pa_usec_t*) data) = r; return 0; } case PA_SOURCE_MESSAGE_SET_STATE: switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) { case PA_SOURCE_SUSPENDED: pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state)); if (!u->sink || u->sink_suspended) { if (suspend(u) < 0) return -1; } do_trigger = TRUE; u->source_suspended = TRUE; break; case PA_SOURCE_IDLE: case PA_SOURCE_RUNNING: if (u->source->thread_info.state == PA_SOURCE_INIT) { do_trigger = TRUE; quick = u->sink && PA_SINK_IS_OPENED(u->sink->thread_info.state); } if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) { if (!u->sink || u->sink_suspended) { if (unsuspend(u) < 0) return -1; quick = FALSE; } do_trigger = TRUE; u->in_mmap_current = 0; u->in_mmap_saved_nfrags = 0; u->source_suspended = FALSE; } break; case PA_SOURCE_UNLINKED: case PA_SOURCE_INIT: case PA_SOURCE_INVALID_STATE: ; } break; } ret = pa_source_process_msg(o, code, data, offset, chunk); if (ret >= 0 && do_trigger) { if (trigger(u, quick) < 0) return -1; } return ret; }
unsigned pa_idxset_trivial_hash_func(const void *p) { return PA_PTR_TO_UINT(p); }
static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SINK(o)->userdata; int err; audio_info_t info; switch (code) { case PA_SINK_MESSAGE_GET_LATENCY: { pa_usec_t r = 0; if (u->fd >= 0) { err = ioctl(u->fd, AUDIO_GETINFO, &info); pa_assert(err >= 0); r += pa_bytes_to_usec(u->written_bytes, &PA_SINK(o)->sample_spec); r -= pa_bytes_to_usec(info.play.samples * u->frame_size, &PA_SINK(o)->sample_spec); if (u->memchunk.memblock) r += pa_bytes_to_usec(u->memchunk.length, &PA_SINK(o)->sample_spec); } *((pa_usec_t*) data) = r; return 0; } case PA_SINK_MESSAGE_SET_VOLUME: if (u->fd >= 0) { AUDIO_INITINFO(&info); info.play.gain = pa_cvolume_avg((pa_cvolume*)data) * AUDIO_MAX_GAIN / PA_VOLUME_NORM; assert(info.play.gain <= AUDIO_MAX_GAIN); if (ioctl(u->fd, AUDIO_SETINFO, &info) < 0) { if (errno == EINVAL) pa_log("AUDIO_SETINFO: Unsupported volume."); else pa_log("AUDIO_SETINFO: %s", pa_cstrerror(errno)); } else { return 0; } } break; case PA_SINK_MESSAGE_GET_VOLUME: if (u->fd >= 0) { err = ioctl(u->fd, AUDIO_GETINFO, &info); assert(err >= 0); pa_cvolume_set((pa_cvolume*) data, ((pa_cvolume*) data)->channels, info.play.gain * PA_VOLUME_NORM / AUDIO_MAX_GAIN); return 0; } break; case PA_SINK_MESSAGE_SET_MUTE: if (u->fd >= 0) { AUDIO_INITINFO(&info); info.output_muted = !!PA_PTR_TO_UINT(data); if (ioctl(u->fd, AUDIO_SETINFO, &info) < 0) pa_log("AUDIO_SETINFO: %s", pa_cstrerror(errno)); else return 0; } break; case PA_SINK_MESSAGE_GET_MUTE: if (u->fd >= 0) { err = ioctl(u->fd, AUDIO_GETINFO, &info); pa_assert(err >= 0); *(int*)data = !!info.output_muted; return 0; } break; } return pa_sink_process_msg(o, code, data, offset, chunk); }
static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SINK(o)->userdata; switch (code) { case PA_SINK_MESSAGE_SET_STATE: switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) { case PA_SINK_SUSPENDED: pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state)); pa_smoother_pause(u->smoother, pa_rtclock_now()); /* Issue a FLUSH if we are connected */ if (u->fd >= 0) { pa_raop_flush(u->raop); } break; case PA_SINK_IDLE: case PA_SINK_RUNNING: if (u->sink->thread_info.state == PA_SINK_SUSPENDED) { pa_smoother_resume(u->smoother, pa_rtclock_now(), true); /* The connection can be closed when idle, so check to see if we need to reestablish it */ if (u->fd < 0) pa_raop_connect(u->raop); else pa_raop_flush(u->raop); } break; case PA_SINK_UNLINKED: case PA_SINK_INIT: case PA_SINK_INVALID_STATE: ; } break; case PA_SINK_MESSAGE_GET_LATENCY: { pa_usec_t w, r; r = pa_smoother_get(u->smoother, pa_rtclock_now()); w = pa_bytes_to_usec((u->offset - u->encoding_overhead + (u->encoded_memchunk.length / u->encoding_ratio)), &u->sink->sample_spec); *((pa_usec_t*) data) = w > r ? w - r : 0; return 0; } case SINK_MESSAGE_PASS_SOCKET: { struct pollfd *pollfd; pa_assert(!u->rtpoll_item); u->rtpoll_item = pa_rtpoll_item_new(u->rtpoll, PA_RTPOLL_NEVER, 1); pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL); pollfd->fd = u->fd; pollfd->events = POLLOUT; /*pollfd->events = */pollfd->revents = 0; if (u->sink->thread_info.state == PA_SINK_SUSPENDED) { /* Our stream has been suspended so we just flush it.... */ pa_raop_flush(u->raop); } return 0; } case SINK_MESSAGE_RIP_SOCKET: { if (u->fd >= 0) { pa_close(u->fd); u->fd = -1; } else /* FIXME */ pa_log("We should not get to this state. Cannot rip socket if not connected."); if (u->sink->thread_info.state == PA_SINK_SUSPENDED) { pa_log_debug("RTSP control connection closed, but we're suspended so let's not worry about it... we'll open it again later"); if (u->rtpoll_item) pa_rtpoll_item_free(u->rtpoll_item); u->rtpoll_item = NULL; } else { /* Question: is this valid here: or should we do some sort of: return pa_sink_process_msg(PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL); ?? */ pa_module_unload_request(u->module, true); } return 0; } } return pa_sink_process_msg(o, code, data, offset, chunk); }