static struct service *get_service(struct userdata *u, pa_object *device) { struct service *s; char *hn, *un; const char *n; pa_assert(u); pa_object_assert_ref(device); if ((s = pa_hashmap_get(u->services, device))) return s; s = pa_xnew0(struct service, 1); s->userdata = u; s->device = device; if (pa_sink_isinstance(device)) { if (!(n = pa_proplist_gets(PA_SINK(device)->proplist, PA_PROP_DEVICE_DESCRIPTION))) n = PA_SINK(device)->name; } else { if (!(n = pa_proplist_gets(PA_SOURCE(device)->proplist, PA_PROP_DEVICE_DESCRIPTION))) n = PA_SOURCE(device)->name; } hn = pa_get_host_name_malloc(); un = pa_get_user_name_malloc(); s->service_name = pa_truncate_utf8(pa_sprintf_malloc("%s@%s: %s", un, hn, n), kDNSServiceMaxDomainName-1); pa_xfree(un); pa_xfree(hn); pa_hashmap_put(u->services, s->device, s); return s; }
static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SOURCE(o)->userdata; int err; audio_info_t info; switch (code) { case PA_SOURCE_MESSAGE_GET_LATENCY: { pa_usec_t r = 0; if (u->fd) { err = ioctl(u->fd, AUDIO_GETINFO, &info); pa_assert(err >= 0); r += pa_bytes_to_usec(info.record.samples * u->frame_size, &PA_SOURCE(o)->sample_spec); r -= pa_bytes_to_usec(u->read_bytes, &PA_SOURCE(o)->sample_spec); } *((pa_usec_t*) data) = r; return 0; } case PA_SOURCE_MESSAGE_SET_VOLUME: if (u->fd >= 0) { AUDIO_INITINFO(&info); info.record.gain = pa_cvolume_avg((pa_cvolume*) data) * AUDIO_MAX_GAIN / PA_VOLUME_NORM; assert(info.record.gain <= AUDIO_MAX_GAIN); if (ioctl(u->fd, AUDIO_SETINFO, &info) < 0) { if (errno == EINVAL) pa_log("AUDIO_SETINFO: Unsupported volume."); else pa_log("AUDIO_SETINFO: %s", pa_cstrerror(errno)); } else { return 0; } } break; case PA_SOURCE_MESSAGE_GET_VOLUME: if (u->fd >= 0) { err = ioctl(u->fd, AUDIO_GETINFO, &info); pa_assert(err >= 0); pa_cvolume_set((pa_cvolume*) data, ((pa_cvolume*) data)->channels, info.record.gain * PA_VOLUME_NORM / AUDIO_MAX_GAIN); return 0; } break; } return pa_source_process_msg(o, code, data, offset, chunk); }
static bool shall_ignore(pa_object *o) { pa_object_assert_ref(o); if (pa_sink_isinstance(o)) return !!(PA_SINK(o)->flags & PA_SINK_NETWORK); if (pa_source_isinstance(o)) return PA_SOURCE(o)->monitor_of || (PA_SOURCE(o)->flags & PA_SOURCE_NETWORK); pa_assert_not_reached(); }
static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SOURCE(o)->userdata; switch (code) { case PA_SOURCE_MESSAGE_GET_LATENCY: *((pa_usec_t*) data) = source_get_latency(u, &PA_SOURCE(o)->sample_spec); return 0; case PA_SOURCE_MESSAGE_SET_STATE: switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) { case PA_SOURCE_SUSPENDED: pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state)); if (!u->sink || u->sink_suspended) { if (suspend(u) < 0) return -1; } u->source_suspended = true; break; case PA_SOURCE_IDLE: case PA_SOURCE_RUNNING: if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) { if (!u->sink || u->sink_suspended) { if (unsuspend(u) < 0) return -1; u->source->get_volume(u->source); } u->source_suspended = false; } break; case PA_SOURCE_UNLINKED: case PA_SOURCE_INIT: case PA_SOURCE_INVALID_STATE: ; } break; } return pa_source_process_msg(o, code, data, offset, chunk); }
static void get_service_data(struct service *s, pa_sample_spec *ret_ss, pa_channel_map *ret_map, const char **ret_name, pa_proplist **ret_proplist, enum service_subtype *ret_subtype) { pa_assert(s); pa_assert(ret_ss); pa_assert(ret_proplist); pa_assert(ret_subtype); if (pa_sink_isinstance(s->device)) { pa_sink *sink = PA_SINK(s->device); *ret_ss = sink->sample_spec; *ret_map = sink->channel_map; *ret_name = sink->name; *ret_proplist = sink->proplist; *ret_subtype = sink->flags & PA_SINK_HARDWARE ? SUBTYPE_HARDWARE : SUBTYPE_VIRTUAL; } else if (pa_source_isinstance(s->device)) { pa_source *source = PA_SOURCE(s->device); *ret_ss = source->sample_spec; *ret_map = source->channel_map; *ret_name = source->name; *ret_proplist = source->proplist; *ret_subtype = source->monitor_of ? SUBTYPE_MONITOR : (source->flags & PA_SOURCE_HARDWARE ? SUBTYPE_HARDWARE : SUBTYPE_VIRTUAL); } else pa_assert_not_reached(); }
static int source_process_msg( pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SOURCE(o)->userdata; switch (code) { case PA_SOURCE_MESSAGE_GET_LATENCY: { size_t n = 0; #ifdef FIONREAD int l; if (ioctl(u->fd, FIONREAD, &l) >= 0 && l > 0) n = (size_t) l; #endif *((int64_t*) data) = pa_bytes_to_usec(n, &u->source->sample_spec); return 0; } } return pa_source_process_msg(o, code, data, offset, chunk); }
static int publish_all_services(struct userdata *u) { pa_sink *sink; pa_source *source; int r = -1; uint32_t idx; pa_assert(u); pa_log_debug("Publishing services in Zeroconf"); for (sink = PA_SINK(pa_idxset_first(u->core->sinks, &idx)); sink; sink = PA_SINK(pa_idxset_next(u->core->sinks, &idx))) if (!shall_ignore(PA_OBJECT(sink))) publish_service(get_service(u, PA_OBJECT(sink))); for (source = PA_SOURCE(pa_idxset_first(u->core->sources, &idx)); source; source = PA_SOURCE(pa_idxset_next(u->core->sources, &idx))) if (!shall_ignore(PA_OBJECT(source))) publish_service(get_service(u, PA_OBJECT(source))); if (publish_main_service(u) < 0) goto fail; r = 0; fail: return r; }
static int source_process_msg( pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SOURCE(o)->userdata; switch (code) { case PA_SOURCE_MESSAGE_SET_STATE: if (PA_PTR_TO_UINT(data) == PA_SOURCE_RUNNING) u->timestamp = pa_rtclock_now(); break; case PA_SOURCE_MESSAGE_GET_LATENCY: { pa_usec_t now, left_to_fill; now = pa_rtclock_now(); left_to_fill = u->timestamp > now ? u->timestamp - now : 0ULL; *((pa_usec_t*) data) = u->block_usec > left_to_fill ? u->block_usec - left_to_fill : 0ULL; return 0; } } return pa_source_process_msg(o, code, data, offset, chunk); }
/* Called from I/O thread context */ static int source_process_msg_cb(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SOURCE(o)->userdata; switch (code) { case PA_SOURCE_MESSAGE_GET_LATENCY: /* The source is _put() before the source output is, so let's * make sure we don't access it in that time. Also, the * source output is first shut down, the source second. */ if (!PA_SOURCE_IS_LINKED(u->source->thread_info.state) || !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->thread_info.state)) { *((pa_usec_t*) data) = 0; return 0; } *((pa_usec_t*) data) = /* Get the latency of the master source */ pa_source_get_latency_within_thread(u->source_output->source) + /* Add the latency internal to our source output on top */ pa_bytes_to_usec(pa_memblockq_get_length(u->source_output->thread_info.delay_memblockq), &u->source_output->source->sample_spec); return 0; } return pa_source_process_msg(o, code, data, offset, chunk); }
static pa_hook_result_t device_state_changed_hook_cb(pa_core *c, pa_object *o, struct userdata *u) { struct device_info *d; pa_assert(c); pa_object_assert_ref(o); pa_assert(u); if (!(d = pa_hashmap_get(u->device_infos, o))) return PA_HOOK_OK; if (pa_sink_isinstance(o)) { pa_sink *s = PA_SINK(o); pa_sink_state_t state = pa_sink_get_state(s); if (pa_sink_check_suspend(s) <= 0) if (PA_SINK_IS_OPENED(state)) restart(d); } else if (pa_source_isinstance(o)) { pa_source *s = PA_SOURCE(o); pa_source_state_t state = pa_source_get_state(s); if (pa_source_check_suspend(s) <= 0) if (PA_SOURCE_IS_OPENED(state)) restart(d); } return PA_HOOK_OK; }
/* Called from I/O thread context */ static int voip_source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SOURCE(o)->userdata; switch (code) { case VOICE_SOURCE_SET_UL_DEADLINE: { u->ul_deadline = offset; pa_log_debug("Uplink deadline set to %lld (%lld usec from now)", u->ul_deadline, u->ul_deadline - pa_rtclock_now()); return 0; } case PA_SOURCE_MESSAGE_GET_LATENCY: { pa_usec_t usec = 0; if (PA_MSGOBJECT(u->master_source)->process_msg( PA_MSGOBJECT(u->master_source), PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0) usec = 0; usec += pa_bytes_to_usec(pa_memblockq_get_length(u->ul_memblockq), &u->aep_sample_spec); *((pa_usec_t*) data) = usec; return 0; } } return pa_source_process_msg(o, code, data, offset, chunk); }
/* Runs in PA mainloop context */ static void get_service_data(struct service *s, pa_object *device) { pa_assert(s); if (pa_sink_isinstance(device)) { pa_sink *sink = PA_SINK(device); s->is_sink = true; s->service_type = SERVICE_TYPE_SINK; s->ss = sink->sample_spec; s->cm = sink->channel_map; s->name = pa_xstrdup(sink->name); s->proplist = pa_proplist_copy(sink->proplist); s->subtype = sink->flags & PA_SINK_HARDWARE ? SUBTYPE_HARDWARE : SUBTYPE_VIRTUAL; } else if (pa_source_isinstance(device)) { pa_source *source = PA_SOURCE(device); s->is_sink = false; s->service_type = SERVICE_TYPE_SOURCE; s->ss = source->sample_spec; s->cm = source->channel_map; s->name = pa_xstrdup(source->name); s->proplist = pa_proplist_copy(source->proplist); s->subtype = source->monitor_of ? SUBTYPE_MONITOR : (source->flags & PA_SOURCE_HARDWARE ? SUBTYPE_HARDWARE : SUBTYPE_VIRTUAL); } else pa_assert_not_reached(); }
static pa_hook_result_t device_new_hook_cb(pa_core *c, pa_object *o, struct userdata *u) { struct device_info *d; pa_source *source; pa_sink *sink; pa_assert(c); pa_object_assert_ref(o); pa_assert(u); source = pa_source_isinstance(o) ? PA_SOURCE(o) : NULL; sink = pa_sink_isinstance(o) ? PA_SINK(o) : NULL; /* Never suspend monitors */ if (source && source->monitor_of) return PA_HOOK_OK; pa_assert(source || sink); d = pa_xnew(struct device_info, 1); d->userdata = u; d->source = source ? pa_source_ref(source) : NULL; d->sink = sink ? pa_sink_ref(sink) : NULL; d->time_event = pa_core_rttime_new(c, PA_USEC_INVALID, timeout_cb, d); pa_hashmap_put(u->device_infos, o, d); if ((d->sink && pa_sink_check_suspend(d->sink) <= 0) || (d->source && pa_source_check_suspend(d->source) <= 0)) restart(d); return PA_HOOK_OK; }
static pa_hook_result_t device_new_hook_cb(pa_core *c, pa_object *o, struct userdata *u) { struct device_info *d; pa_source *source; pa_sink *sink; const char *timeout_str; int32_t timeout; bool timeout_valid; pa_assert(c); pa_object_assert_ref(o); pa_assert(u); source = pa_source_isinstance(o) ? PA_SOURCE(o) : NULL; sink = pa_sink_isinstance(o) ? PA_SINK(o) : NULL; /* Never suspend monitors */ if (source && source->monitor_of) return PA_HOOK_OK; pa_assert(source || sink); timeout_str = pa_proplist_gets(sink ? sink->proplist : source->proplist, "module-suspend-on-idle.timeout"); if (timeout_str && pa_atoi(timeout_str, &timeout) >= 0) timeout_valid = true; else timeout_valid = false; if (timeout_valid && timeout < 0) return PA_HOOK_OK; d = pa_xnew(struct device_info, 1); d->userdata = u; d->source = source ? pa_source_ref(source) : NULL; d->sink = sink ? pa_sink_ref(sink) : NULL; d->time_event = pa_core_rttime_new(c, PA_USEC_INVALID, timeout_cb, d); if (timeout_valid) d->timeout = timeout * PA_USEC_PER_SEC; else d->timeout = d->userdata->timeout; pa_hashmap_put(u->device_infos, o, d); if ((d->sink && pa_sink_check_suspend(d->sink) <= 0) || (d->source && pa_source_check_suspend(d->source) <= 0)) restart(d); return PA_HOOK_OK; }
static int source_process_msg(pa_msgobject * o, int code, void *data, int64_t offset, pa_memchunk * chunk) { int r; struct userdata *u = PA_SOURCE(o)->userdata; int state; switch (code) { case PA_SOURCE_MESSAGE_SET_STATE: state = PA_PTR_TO_UINT(data); r = pa_source_process_msg(o, code, data, offset, chunk); if (r >= 0) { pa_log("source cork req state =%d, now state=%d\n", state, (int) (u->source->state)); uint32_t cmd = 0; if (u->source->state != PA_SOURCE_RUNNING && state == PA_SOURCE_RUNNING) cmd = QUBES_PA_SOURCE_START_CMD; else if (u->source->state == PA_SOURCE_RUNNING && state != PA_SOURCE_RUNNING) cmd = QUBES_PA_SOURCE_STOP_CMD; if (cmd != 0) { if (libvchan_send(u->rec_ctrl, (char*)&cmd, sizeof(cmd)) < 0) { pa_log("vchan: failed to send record cmd"); /* This is a problem in case of enabling recording, in case * of QUBES_PA_SOURCE_STOP_CMD it can happen that remote end * is already disconnected, so indeed will not send further data. * This can happen for example when we terminate the * process because of pacat in dom0 has disconnected. */ if (state == PA_SOURCE_RUNNING) return -1; else return r; } } } return r; case PA_SOURCE_MESSAGE_GET_LATENCY: { size_t n = 0; n += u->memchunk_source.length; *((pa_usec_t *) data) = pa_bytes_to_usec(n, &u->source->sample_spec); return 0; } } return pa_source_process_msg(o, code, data, offset, chunk); }
static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SOURCE(o)->userdata; switch (code) { case PA_SOURCE_MESSAGE_GET_LATENCY: { pa_usec_t now; now = pa_rtclock_now(); *((int64_t*) data) = (int64_t)now - (int64_t)u->timestamp; return 0; } } return pa_source_process_msg(o, code, data, offset, chunk); }
static int publish_all_services(struct userdata *u) { pa_sink *sink; pa_source *source; uint32_t idx; pa_assert(u); pa_log_debug("Publishing services in Bonjour"); for (sink = PA_SINK(pa_idxset_first(u->core->sinks, &idx)); sink; sink = PA_SINK(pa_idxset_next(u->core->sinks, &idx))) if (!shall_ignore(PA_OBJECT(sink))) publish_service(get_service(u, PA_OBJECT(sink))); for (source = PA_SOURCE(pa_idxset_first(u->core->sources, &idx)); source; source = PA_SOURCE(pa_idxset_next(u->core->sources, &idx))) if (!shall_ignore(PA_OBJECT(source))) publish_service(get_service(u, PA_OBJECT(source))); return publish_main_service(u); }
/* Called from I/O thread context */ static int raw_source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SOURCE(o)->userdata; switch (code) { case PA_SOURCE_MESSAGE_GET_LATENCY: { pa_usec_t usec = 0; if (PA_MSGOBJECT(u->master_source)->process_msg( PA_MSGOBJECT(u->master_source), PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0) usec = 0; *((pa_usec_t*) data) = usec + pa_bytes_to_usec(pa_memblockq_get_length(u->hw_source_memblockq), &u->raw_source->sample_spec); return 0; } } return pa_source_process_msg(o, code, data, offset, chunk); }
static int process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u; if (pa_sink_isinstance(o)) { u = PA_SINK(o)->userdata; switch (code) { case PA_SINK_MESSAGE_GET_LATENCY: { pa_usec_t r = 0; if (u->hwo) r = sink_get_latency(u); *((pa_usec_t*) data) = r; return 0; } } return pa_sink_process_msg(o, code, data, offset, chunk); } if (pa_source_isinstance(o)) { u = PA_SOURCE(o)->userdata; switch (code) { case PA_SOURCE_MESSAGE_GET_LATENCY: { pa_usec_t r = 0; if (u->hwi) r = source_get_latency(u); *((pa_usec_t*) data) = r; return 0; } } return pa_source_process_msg(o, code, data, offset, chunk); } return -1; }
static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SOURCE(o)->userdata; switch (code) { case PA_SOURCE_MESSAGE_GET_LATENCY: { pa_usec_t r = 0; if (u->fd >= 0) { if (u->use_mmap) r = mmap_source_get_latency(u); else r = io_source_get_latency(u); } *((int64_t*) data) = (int64_t)r; return 0; } } return pa_source_process_msg(o, code, data, offset, chunk); }
/* Called from IO context */ static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SOURCE(o)->userdata; switch (code) { case PA_SOURCE_MESSAGE_SET_STATE: { switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) { case PA_SOURCE_SUSPENDED: { int r; pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state)); if ((r = suspend(u)) < 0) return r; break; } case PA_SOURCE_IDLE: break; case PA_SOURCE_RUNNING: { pa_log_info("Resuming..."); u->timestamp = pa_rtclock_now(); break; } /* not needed */ case PA_SOURCE_UNLINKED: case PA_SOURCE_INIT: case PA_SOURCE_INVALID_STATE: ; } break; } } return pa_source_process_msg(o, code, data, offset, chunk); }
static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SOURCE(o)->userdata; int ret; int do_trigger = FALSE, quick = TRUE; switch (code) { case PA_SOURCE_MESSAGE_GET_LATENCY: { pa_usec_t r = 0; if (u->fd >= 0) { if (u->use_mmap) r = mmap_source_get_latency(u); else r = io_source_get_latency(u); } *((pa_usec_t*) data) = r; return 0; } case PA_SOURCE_MESSAGE_SET_STATE: switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) { case PA_SOURCE_SUSPENDED: pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state)); if (!u->sink || u->sink_suspended) { if (suspend(u) < 0) return -1; } do_trigger = TRUE; u->source_suspended = TRUE; break; case PA_SOURCE_IDLE: case PA_SOURCE_RUNNING: if (u->source->thread_info.state == PA_SOURCE_INIT) { do_trigger = TRUE; quick = u->sink && PA_SINK_IS_OPENED(u->sink->thread_info.state); } if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) { if (!u->sink || u->sink_suspended) { if (unsuspend(u) < 0) return -1; quick = FALSE; } do_trigger = TRUE; u->in_mmap_current = 0; u->in_mmap_saved_nfrags = 0; u->source_suspended = FALSE; } break; case PA_SOURCE_UNLINKED: case PA_SOURCE_INIT: case PA_SOURCE_INVALID_STATE: ; } break; } ret = pa_source_process_msg(o, code, data, offset, chunk); if (ret >= 0 && do_trigger) { if (trigger(u, quick) < 0) return -1; } return ret; }