static void thread_func(void *userdata) { struct userdata *u = userdata; int write_type = 0; pa_assert(u); pa_log_debug("Thread starting up"); pa_thread_mq_install(&u->thread_mq); pa_smoother_set_time_offset(u->smoother, pa_rtclock_now()); for (;;) { int ret; if (PA_UNLIKELY(u->sink->thread_info.rewind_requested)) pa_sink_process_rewind(u->sink, 0); if (u->rtpoll_item) { struct pollfd *pollfd; pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL); /* Render some data and write it to the fifo */ if (PA_SINK_IS_OPENED(u->sink->thread_info.state) && pollfd->revents) { pa_usec_t usec; int64_t n; for (;;) { ssize_t l; void *p; if (u->memchunk.length <= 0) pa_sink_render(u->sink, u->block_size, &u->memchunk); pa_assert(u->memchunk.length > 0); p = pa_memblock_acquire(u->memchunk.memblock); l = pa_write(u->fd, (uint8_t*) p + u->memchunk.index, u->memchunk.length, &write_type); pa_memblock_release(u->memchunk.memblock); pa_assert(l != 0); if (l < 0) { if (errno == EINTR) continue; else if (errno == EAGAIN) { /* OK, we filled all socket buffers up * now. */ goto filled_up; } else { pa_log("Failed to write data to FIFO: %s", pa_cstrerror(errno)); goto fail; } } else { u->offset += l; u->memchunk.index += (size_t) l; u->memchunk.length -= (size_t) l; if (u->memchunk.length <= 0) { pa_memblock_unref(u->memchunk.memblock); pa_memchunk_reset(&u->memchunk); } pollfd->revents = 0; if (u->memchunk.length > 0) /* OK, we wrote less that we asked for, * hence we can assume that the socket * buffers are full now */ goto filled_up; } } filled_up: /* At this spot we know that the socket buffers are * fully filled up. This is the best time to estimate * the playback position of the server */ n = u->offset; #ifdef SIOCOUTQ { int l; if (ioctl(u->fd, SIOCOUTQ, &l) >= 0 && l > 0) n -= l; } #endif usec = pa_bytes_to_usec((uint64_t) n, &u->sink->sample_spec); if (usec > u->latency) usec -= u->latency; else usec = 0; pa_smoother_put(u->smoother, pa_rtclock_now(), usec); } /* Hmm, nothing to do. Let's sleep */ pollfd->events = (short) (PA_SINK_IS_OPENED(u->sink->thread_info.state) ? POLLOUT : 0); } if ((ret = pa_rtpoll_run(u->rtpoll)) < 0) goto fail; if (ret == 0) goto finish; if (u->rtpoll_item) { struct pollfd* pollfd; pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL); if (pollfd->revents & ~POLLOUT) { pa_log("FIFO shutdown."); goto fail; } } } fail: /* If this was no regular exit from the loop we have to continue * processing messages until we received PA_MESSAGE_SHUTDOWN */ pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL); pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN); finish: pa_log_debug("Thread shutting down"); }
static void thread_func(void *userdata) { struct userdata *u = userdata; int write_type = 0; pa_memchunk silence; uint32_t silence_overhead = 0; double silence_ratio = 0; pa_assert(u); pa_log_debug("Thread starting up"); pa_thread_mq_install(&u->thread_mq); pa_smoother_set_time_offset(u->smoother, pa_rtclock_now()); /* Create a chunk of memory that is our encoded silence sample. */ pa_memchunk_reset(&silence); for (;;) { int ret; if (PA_UNLIKELY(u->sink->thread_info.rewind_requested)) pa_sink_process_rewind(u->sink, 0); if (u->rtpoll_item) { struct pollfd *pollfd; pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL); /* Render some data and write it to the fifo */ if (/*PA_SINK_IS_OPENED(u->sink->thread_info.state) && */pollfd->revents) { pa_usec_t usec; int64_t n; void *p; if (!silence.memblock) { pa_memchunk silence_tmp; pa_memchunk_reset(&silence_tmp); silence_tmp.memblock = pa_memblock_new(u->core->mempool, 4096); silence_tmp.length = 4096; p = pa_memblock_acquire(silence_tmp.memblock); memset(p, 0, 4096); pa_memblock_release(silence_tmp.memblock); pa_raop_client_encode_sample(u->raop, &silence_tmp, &silence); pa_assert(0 == silence_tmp.length); silence_overhead = silence_tmp.length - 4096; silence_ratio = silence_tmp.length / 4096; pa_memblock_unref(silence_tmp.memblock); } for (;;) { ssize_t l; if (u->encoded_memchunk.length <= 0) { if (u->encoded_memchunk.memblock) pa_memblock_unref(u->encoded_memchunk.memblock); if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) { size_t rl; /* We render real data */ if (u->raw_memchunk.length <= 0) { if (u->raw_memchunk.memblock) pa_memblock_unref(u->raw_memchunk.memblock); pa_memchunk_reset(&u->raw_memchunk); /* Grab unencoded data */ pa_sink_render(u->sink, u->block_size, &u->raw_memchunk); } pa_assert(u->raw_memchunk.length > 0); /* Encode it */ rl = u->raw_memchunk.length; u->encoding_overhead += u->next_encoding_overhead; pa_raop_client_encode_sample(u->raop, &u->raw_memchunk, &u->encoded_memchunk); u->next_encoding_overhead = (u->encoded_memchunk.length - (rl - u->raw_memchunk.length)); u->encoding_ratio = u->encoded_memchunk.length / (rl - u->raw_memchunk.length); } else { /* We render some silence into our memchunk */ memcpy(&u->encoded_memchunk, &silence, sizeof(pa_memchunk)); pa_memblock_ref(silence.memblock); /* Calculate/store some values to be used with the smoother */ u->next_encoding_overhead = silence_overhead; u->encoding_ratio = silence_ratio; } } pa_assert(u->encoded_memchunk.length > 0); p = pa_memblock_acquire(u->encoded_memchunk.memblock); l = pa_write(u->fd, (uint8_t*) p + u->encoded_memchunk.index, u->encoded_memchunk.length, &write_type); pa_memblock_release(u->encoded_memchunk.memblock); pa_assert(l != 0); if (l < 0) { if (errno == EINTR) continue; else if (errno == EAGAIN) { /* OK, we filled all socket buffers up * now. */ goto filled_up; } else { pa_log("Failed to write data to FIFO: %s", pa_cstrerror(errno)); goto fail; } } else { u->offset += l; u->encoded_memchunk.index += l; u->encoded_memchunk.length -= l; pollfd->revents = 0; if (u->encoded_memchunk.length > 0) { /* we've completely written the encoded data, so update our overhead */ u->encoding_overhead += u->next_encoding_overhead; /* OK, we wrote less that we asked for, * hence we can assume that the socket * buffers are full now */ goto filled_up; } } } filled_up: /* At this spot we know that the socket buffers are * fully filled up. This is the best time to estimate * the playback position of the server */ n = u->offset - u->encoding_overhead; #ifdef SIOCOUTQ { int l; if (ioctl(u->fd, SIOCOUTQ, &l) >= 0 && l > 0) n -= (l / u->encoding_ratio); } #endif usec = pa_bytes_to_usec(n, &u->sink->sample_spec); if (usec > u->latency) usec -= u->latency; else usec = 0; pa_smoother_put(u->smoother, pa_rtclock_now(), usec); } /* Hmm, nothing to do. Let's sleep */ pollfd->events = POLLOUT; /*PA_SINK_IS_OPENED(u->sink->thread_info.state) ? POLLOUT : 0;*/ } if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0) goto fail; if (ret == 0) goto finish; if (u->rtpoll_item) { struct pollfd* pollfd; pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL); if (pollfd->revents & ~POLLOUT) { if (u->sink->thread_info.state != PA_SINK_SUSPENDED) { pa_log("FIFO shutdown."); goto fail; } /* We expect this to happen on occasion if we are not sending data. It's perfectly natural and normal and natural */ if (u->rtpoll_item) pa_rtpoll_item_free(u->rtpoll_item); u->rtpoll_item = NULL; } } } fail: /* If this was no regular exit from the loop we have to continue * processing messages until we received PA_MESSAGE_SHUTDOWN */ pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL); pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN); finish: if (silence.memblock) pa_memblock_unref(silence.memblock); pa_log_debug("Thread shutting down"); }
static void thread_func(void *userdata) { struct userdata *u = userdata; char buf[2048]; // max ring buffer size pa_assert(u); pa_log_debug("Thread starting up"); pa_thread_mq_install(&u->thread_mq); for (;;) { struct pollfd *play_pollfd; struct pollfd *rec_pollfd; int ret; play_pollfd = pa_rtpoll_item_get_pollfd(u->play_rtpoll_item, NULL); rec_pollfd = pa_rtpoll_item_get_pollfd(u->rec_rtpoll_item, NULL); if (play_pollfd->revents & POLLIN) { if (libvchan_wait(u->play_ctrl) < 0) goto fail; play_pollfd->revents = 0; } if (rec_pollfd->revents & POLLIN) { if (libvchan_wait(u->rec_ctrl) < 0) goto fail; rec_pollfd->revents = 0; } /* Render some data and write it to the fifo */ if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) { if (u->sink->thread_info.rewind_requested) pa_sink_process_rewind(u->sink, 0); if (libvchan_buffer_space(u->play_ctrl)) { if (process_sink_render(u) < 0) goto fail; } } if (u->source->thread_info.state == PA_SOURCE_RUNNING) { while (libvchan_data_ready(u->rec_ctrl)) { if (process_source_data(u) < 0) goto fail; } } else { /* discard the data */ if (libvchan_data_ready(u->rec_ctrl)) if (libvchan_read(u->rec_ctrl, buf, sizeof(buf)) < 0) goto fail; } /* Hmm, nothing to do. Let's sleep */ play_pollfd->events = POLLIN; rec_pollfd->events = POLLIN; #if PA_CHECK_VERSION(6,0,0) if ((ret = pa_rtpoll_run(u->rtpoll)) < 0) #else if ((ret = pa_rtpoll_run(u->rtpoll, true)) < 0) #endif goto fail; if (ret == 0) goto finish; } fail: /* If this was no regular exit from the loop we have to continue * processing messages until we received PA_MESSAGE_SHUTDOWN */ pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL); pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN); finish: pa_log_debug("Thread shutting down"); }
/* Called from output thread context */ static int sink_input_process_msg_cb(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SINK_INPUT(obj)->userdata; switch (code) { case PA_SINK_INPUT_MESSAGE_GET_LATENCY: { pa_usec_t *r = data; pa_sink_input_assert_io_context(u->sink_input); *r = pa_bytes_to_usec(pa_memblockq_get_length(u->memblockq), &u->sink_input->sample_spec); /* Fall through, the default handler will add in the extra * latency added by the resampler */ break; } case SINK_INPUT_MESSAGE_POST: pa_sink_input_assert_io_context(u->sink_input); if (PA_SINK_IS_OPENED(u->sink_input->sink->thread_info.state)) pa_memblockq_push_align(u->memblockq, chunk); else pa_memblockq_flush_write(u->memblockq, true); update_min_memblockq_length(u); /* Is this the end of an underrun? Then let's start things * right-away */ if (!u->in_pop && u->sink_input->thread_info.underrun_for > 0 && pa_memblockq_is_readable(u->memblockq)) { pa_log_debug("Requesting rewind due to end of underrun."); pa_sink_input_request_rewind(u->sink_input, (size_t) (u->sink_input->thread_info.underrun_for == (size_t) -1 ? 0 : u->sink_input->thread_info.underrun_for), false, true, false); } u->recv_counter += (int64_t) chunk->length; return 0; case SINK_INPUT_MESSAGE_REWIND: pa_sink_input_assert_io_context(u->sink_input); if (PA_SINK_IS_OPENED(u->sink_input->sink->thread_info.state)) pa_memblockq_seek(u->memblockq, -offset, PA_SEEK_RELATIVE, true); else pa_memblockq_flush_write(u->memblockq, true); u->recv_counter -= offset; update_min_memblockq_length(u); return 0; case SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT: { size_t length; update_min_memblockq_length(u); length = pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq); u->latency_snapshot.recv_counter = u->recv_counter; u->latency_snapshot.sink_input_buffer = pa_memblockq_get_length(u->memblockq) + (u->sink_input->thread_info.resampler ? pa_resampler_request(u->sink_input->thread_info.resampler, length) : length); u->latency_snapshot.sink_latency = pa_sink_get_latency_within_thread(u->sink_input->sink); u->latency_snapshot.max_request = pa_sink_input_get_max_request(u->sink_input); u->latency_snapshot.min_memblockq_length = u->min_memblockq_length; u->min_memblockq_length = (size_t) -1; return 0; } case SINK_INPUT_MESSAGE_MAX_REQUEST_CHANGED: { /* This message is sent from the IO thread to the main * thread! So don't be confused. All the user cases above * are executed in thread context, but this one is not! */ pa_assert_ctl_context(); if (u->time_event) adjust_rates(u); return 0; } } return pa_sink_input_process_msg(obj, code, data, offset, chunk); }
static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SINK(o)->userdata; switch (code) { case PA_SINK_MESSAGE_SET_STATE: switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) { case PA_SINK_SUSPENDED: pa_assert(PA_SINK_IS_OPENED(u->sink->thread_info.state)); pa_smoother_pause(u->smoother, pa_rtclock_now()); /* Issue a FLUSH if we are connected */ if (u->fd >= 0) { pa_raop_flush(u->raop); } break; case PA_SINK_IDLE: case PA_SINK_RUNNING: if (u->sink->thread_info.state == PA_SINK_SUSPENDED) { pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE); /* The connection can be closed when idle, so check to see if we need to reestablish it */ if (u->fd < 0) pa_raop_connect(u->raop); else pa_raop_flush(u->raop); } break; case PA_SINK_UNLINKED: case PA_SINK_INIT: case PA_SINK_INVALID_STATE: ; } break; case PA_SINK_MESSAGE_GET_LATENCY: { pa_usec_t w, r; r = pa_smoother_get(u->smoother, pa_rtclock_now()); w = pa_bytes_to_usec((u->offset - u->encoding_overhead + (u->encoded_memchunk.length / u->encoding_ratio)), &u->sink->sample_spec); *((pa_usec_t*) data) = w > r ? w - r : 0; return 0; } case SINK_MESSAGE_PASS_SOCKET: { struct pollfd *pollfd; pa_assert(!u->rtpoll_item); u->rtpoll_item = pa_rtpoll_item_new(u->rtpoll, PA_RTPOLL_NEVER, 1); pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL); pollfd->fd = u->fd; pollfd->events = POLLOUT; /*pollfd->events = */pollfd->revents = 0; if (u->sink->thread_info.state == PA_SINK_SUSPENDED) { /* Our stream has been suspended so we just flush it.... */ pa_raop_flush(u->raop); } return 0; } case SINK_MESSAGE_RIP_SOCKET: { if (u->fd >= 0) { pa_close(u->fd); u->fd = -1; } else /* FIXME */ pa_log("We should not get to this state. Cannot rip socket if not connected."); if (u->sink->thread_info.state == PA_SINK_SUSPENDED) { pa_log_debug("RTSP control connection closed, but we're suspended so let's not worry about it... we'll open it again later"); if (u->rtpoll_item) pa_rtpoll_item_free(u->rtpoll_item); u->rtpoll_item = NULL; } else { /* Question: is this valid here: or should we do some sort of: return pa_sink_process_msg(PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL); ?? */ pa_module_unload_request(u->module, TRUE); } return 0; } } return pa_sink_process_msg(o, code, data, offset, chunk); }
static void thread_func(void *userdata) { struct userdata *u = userdata; int write_type = 0, read_type = 0; short revents = 0; pa_assert(u); pa_log_debug("Thread starting up"); if (u->core->realtime_scheduling) pa_thread_make_realtime(u->core->realtime_priority); pa_thread_mq_install(&u->thread_mq); for (;;) { int ret; /* pa_log("loop"); */ if (PA_UNLIKELY(u->sink && u->sink->thread_info.rewind_requested)) pa_sink_process_rewind(u->sink, 0); /* Render some data and write it to the dsp */ if (u->sink && PA_SINK_IS_OPENED(u->sink->thread_info.state) && ((revents & POLLOUT) || u->use_mmap || u->use_getospace)) { if (u->use_mmap) { if ((ret = mmap_write(u)) < 0) goto fail; revents &= ~POLLOUT; if (ret > 0) continue; } else { ssize_t l; bool loop = false, work_done = false; l = (ssize_t) u->out_fragment_size; if (u->use_getospace) { audio_buf_info info; if (ioctl(u->fd, SNDCTL_DSP_GETOSPACE, &info) < 0) { pa_log_info("Device doesn't support SNDCTL_DSP_GETOSPACE: %s", pa_cstrerror(errno)); u->use_getospace = false; } else { l = info.bytes; /* We loop only if GETOSPACE worked and we * actually *know* that we can write more than * one fragment at a time */ loop = true; } } /* Round down to multiples of the fragment size, * because OSS needs that (at least some versions * do) */ l = (l/(ssize_t) u->out_fragment_size) * (ssize_t) u->out_fragment_size; /* Hmm, so poll() signalled us that we can read * something, but GETOSPACE told us there was nothing? * Hmm, make the best of it, try to read some data, to * avoid spinning forever. */ if (l <= 0 && (revents & POLLOUT)) { l = (ssize_t) u->out_fragment_size; loop = false; } while (l > 0) { void *p; ssize_t t; if (u->memchunk.length <= 0) pa_sink_render(u->sink, (size_t) l, &u->memchunk); pa_assert(u->memchunk.length > 0); p = pa_memblock_acquire(u->memchunk.memblock); t = pa_write(u->fd, (uint8_t*) p + u->memchunk.index, u->memchunk.length, &write_type); pa_memblock_release(u->memchunk.memblock); /* pa_log("wrote %i bytes of %u", t, l); */ pa_assert(t != 0); if (t < 0) { if (errno == EINTR) continue; else if (errno == EAGAIN) { pa_log_debug("EAGAIN"); revents &= ~POLLOUT; break; } else { pa_log("Failed to write data to DSP: %s", pa_cstrerror(errno)); goto fail; } } else { u->memchunk.index += (size_t) t; u->memchunk.length -= (size_t) t; if (u->memchunk.length <= 0) { pa_memblock_unref(u->memchunk.memblock); pa_memchunk_reset(&u->memchunk); } l -= t; revents &= ~POLLOUT; work_done = true; } if (!loop) break; } if (work_done) continue; } } /* Try to read some data and pass it on to the source driver. */ if (u->source && PA_SOURCE_IS_OPENED(u->source->thread_info.state) && ((revents & POLLIN) || u->use_mmap || u->use_getispace)) { if (u->use_mmap) { if ((ret = mmap_read(u)) < 0) goto fail; revents &= ~POLLIN; if (ret > 0) continue; } else { void *p; ssize_t l; pa_memchunk memchunk; bool loop = false, work_done = false; l = (ssize_t) u->in_fragment_size; if (u->use_getispace) { audio_buf_info info; if (ioctl(u->fd, SNDCTL_DSP_GETISPACE, &info) < 0) { pa_log_info("Device doesn't support SNDCTL_DSP_GETISPACE: %s", pa_cstrerror(errno)); u->use_getispace = false; } else { l = info.bytes; loop = true; } } l = (l/(ssize_t) u->in_fragment_size) * (ssize_t) u->in_fragment_size; if (l <= 0 && (revents & POLLIN)) { l = (ssize_t) u->in_fragment_size; loop = false; } while (l > 0) { ssize_t t; size_t k; pa_assert(l > 0); memchunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1); k = pa_memblock_get_length(memchunk.memblock); if (k > (size_t) l) k = (size_t) l; k = (k/u->frame_size)*u->frame_size; p = pa_memblock_acquire(memchunk.memblock); t = pa_read(u->fd, p, k, &read_type); pa_memblock_release(memchunk.memblock); pa_assert(t != 0); /* EOF cannot happen */ /* pa_log("read %i bytes of %u", t, l); */ if (t < 0) { pa_memblock_unref(memchunk.memblock); if (errno == EINTR) continue; else if (errno == EAGAIN) { pa_log_debug("EAGAIN"); revents &= ~POLLIN; break; } else { pa_log("Failed to read data from DSP: %s", pa_cstrerror(errno)); goto fail; } } else { memchunk.index = 0; memchunk.length = (size_t) t; pa_source_post(u->source, &memchunk); pa_memblock_unref(memchunk.memblock); l -= t; revents &= ~POLLIN; work_done = true; } if (!loop) break; } if (work_done) continue; } } /* pa_log("loop2 revents=%i", revents); */ if (u->rtpoll_item) { struct pollfd *pollfd; pa_assert(u->fd >= 0); pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL); pollfd->events = (short) (((u->source && PA_SOURCE_IS_OPENED(u->source->thread_info.state)) ? POLLIN : 0) | ((u->sink && PA_SINK_IS_OPENED(u->sink->thread_info.state)) ? POLLOUT : 0)); } /* Hmm, nothing to do. Let's sleep */ if ((ret = pa_rtpoll_run(u->rtpoll)) < 0) goto fail; if (ret == 0) goto finish; if (u->rtpoll_item) { struct pollfd *pollfd; pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL); if (pollfd->revents & ~(POLLOUT|POLLIN)) { pa_log("DSP shutdown."); goto fail; } revents = pollfd->revents; } else revents = 0; } fail: /* If this was no regular exit from the loop we have to continue * processing messages until we received PA_MESSAGE_SHUTDOWN */ pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL); pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN); finish: pa_log_debug("Thread shutting down"); }
/* Called from the IO thread. */ static int source_set_state_in_io_thread_cb(pa_source *s, pa_source_state_t new_state, pa_suspend_cause_t new_suspend_cause) { struct userdata *u; bool do_trigger = false; bool quick = true; pa_assert(s); pa_assert_se(u = s->userdata); /* It may be that only the suspend cause is changing, in which case there's * nothing to do. */ if (new_state == s->thread_info.state) return 0; switch (new_state) { case PA_SOURCE_SUSPENDED: pa_assert(PA_SOURCE_IS_OPENED(s->thread_info.state)); if (!u->sink || u->sink_suspended) suspend(u); do_trigger = true; u->source_suspended = true; break; case PA_SOURCE_IDLE: case PA_SOURCE_RUNNING: if (s->thread_info.state == PA_SOURCE_INIT) { do_trigger = true; quick = u->sink && PA_SINK_IS_OPENED(u->sink->thread_info.state); } if (s->thread_info.state == PA_SOURCE_SUSPENDED) { if (!u->sink || u->sink_suspended) { if (unsuspend(u) < 0) return -1; quick = false; } do_trigger = true; u->in_mmap_current = 0; u->in_mmap_saved_nfrags = 0; u->source_suspended = false; } break; case PA_SOURCE_UNLINKED: case PA_SOURCE_INIT: case PA_SOURCE_INVALID_STATE: ; } if (do_trigger) trigger(u, u->sink ? u->sink->thread_info.state : PA_SINK_INVALID_STATE, new_state, quick); return 0; }
/* Sink and source states are passed as arguments, because this is called * during state changes, and we need the new state, but thread_info.state * has not yet been updated. */ static void trigger(struct userdata *u, pa_sink_state_t sink_state, pa_source_state_t source_state, bool quick) { int enable_bits = 0, zero = 0; pa_assert(u); if (u->fd < 0) return; pa_log_debug("trigger"); if (u->source && PA_SOURCE_IS_OPENED(source_state)) enable_bits |= PCM_ENABLE_INPUT; if (u->sink && PA_SINK_IS_OPENED(sink_state)) enable_bits |= PCM_ENABLE_OUTPUT; pa_log_debug("trigger: %i", enable_bits); if (u->use_mmap) { if (!quick) ioctl(u->fd, SNDCTL_DSP_SETTRIGGER, &zero); #ifdef SNDCTL_DSP_HALT if (enable_bits == 0) if (ioctl(u->fd, SNDCTL_DSP_HALT, NULL) < 0) pa_log_warn("SNDCTL_DSP_HALT: %s", pa_cstrerror(errno)); #endif if (ioctl(u->fd, SNDCTL_DSP_SETTRIGGER, &enable_bits) < 0) pa_log_warn("SNDCTL_DSP_SETTRIGGER: %s", pa_cstrerror(errno)); if (u->sink && !(enable_bits & PCM_ENABLE_OUTPUT)) { pa_log_debug("clearing playback buffer"); pa_silence_memory(u->out_mmap, u->out_hwbuf_size, &u->sink->sample_spec); } } else { if (enable_bits) if (ioctl(u->fd, SNDCTL_DSP_POST, NULL) < 0) pa_log_warn("SNDCTL_DSP_POST: %s", pa_cstrerror(errno)); if (!quick) { /* * Some crappy drivers do not start the recording until we * read something. Without this snippet, poll will never * register the fd as ready. */ if (u->source && PA_SOURCE_IS_OPENED(source_state)) { uint8_t *buf = pa_xnew(uint8_t, u->in_fragment_size); /* XXX: Shouldn't this be done only when resuming the source? * Currently this code path is executed also when resuming the * sink while the source is already running. */ if (pa_read(u->fd, buf, u->in_fragment_size, NULL) < 0) pa_log("pa_read() failed: %s", pa_cstrerror(errno)); pa_xfree(buf); } } } }
/* Called from I/O thread context */ static int rtpoll_work_cb(pa_rtpoll_item *i) { pa_memchunk chunk; int64_t k, j, delta; struct timeval now = { 0, 0 }; struct session *s; struct pollfd *p; pa_assert_se(s = pa_rtpoll_item_get_userdata(i)); p = pa_rtpoll_item_get_pollfd(i, NULL); if (p->revents & (POLLERR|POLLNVAL|POLLHUP|POLLOUT)) { pa_log("poll() signalled bad revents."); return -1; } if ((p->revents & POLLIN) == 0) return 0; p->revents = 0; if (pa_rtp_recv(&s->rtp_context, &chunk, s->userdata->module->core->mempool, &now) < 0) return 0; if (s->sdp_info.payload != s->rtp_context.payload || !PA_SINK_IS_OPENED(s->sink_input->sink->thread_info.state)) { pa_memblock_unref(chunk.memblock); return 0; } if (!s->first_packet) { s->first_packet = TRUE; s->ssrc = s->rtp_context.ssrc; s->offset = s->rtp_context.timestamp; if (s->ssrc == s->userdata->module->core->cookie) pa_log_warn("Detected RTP packet loop!"); } else { if (s->ssrc != s->rtp_context.ssrc) { pa_memblock_unref(chunk.memblock); return 0; } } /* Check whether there was a timestamp overflow */ k = (int64_t) s->rtp_context.timestamp - (int64_t) s->offset; j = (int64_t) 0x100000000LL - (int64_t) s->offset + (int64_t) s->rtp_context.timestamp; if ((k < 0 ? -k : k) < (j < 0 ? -j : j)) delta = k; else delta = j; pa_memblockq_seek(s->memblockq, delta * (int64_t) s->rtp_context.frame_size, PA_SEEK_RELATIVE, TRUE); if (now.tv_sec == 0) { PA_ONCE_BEGIN { pa_log_warn("Using artificial time instead of timestamp"); } PA_ONCE_END; pa_rtclock_get(&now); } else
/* Generic sink state change logic. Used by raw_sink and voip_sink */ int voice_sink_set_state(pa_sink *s, pa_sink *other, pa_sink_state_t state) { struct userdata *u; pa_sink *om_sink; pa_sink_assert_ref(s); pa_assert_se(u = s->userdata); if (!other) { pa_log_debug("other sink not initialized or freed"); return 0; } pa_sink_assert_ref(other); om_sink = u->master_sink; if (u->hw_sink_input && PA_SINK_INPUT_IS_LINKED(pa_sink_input_get_state(u->hw_sink_input))) { if (pa_sink_input_get_state(u->hw_sink_input) == PA_SINK_INPUT_CORKED) { if (PA_SINK_IS_OPENED(state) || PA_SINK_IS_OPENED(pa_sink_get_state(other)) || pa_atomic_load(&u->cmt_connection.dl_state) == CMT_DL_ACTIVE) { pa_sink_input_cork(u->hw_sink_input, FALSE); pa_log_debug("hw_sink_input uncorked"); } } else { if (state == PA_SINK_SUSPENDED && pa_sink_get_state(other) == PA_SINK_SUSPENDED && pa_atomic_load(&u->cmt_connection.dl_state) != CMT_DL_ACTIVE) { pa_sink_input_cork(u->hw_sink_input, TRUE); pa_log_debug("hw_sink_input corked"); } } } if (om_sink == NULL) { pa_log_info("No master sink, assuming primary mixer tuning.\n"); pa_atomic_store(&u->mixer_state, PROP_MIXER_TUNING_PRI); } else if (pa_atomic_load(&u->cmt_connection.dl_state) == CMT_DL_ACTIVE || (pa_sink_get_state(u->voip_sink) <= PA_SINK_SUSPENDED && voice_voip_sink_used_by(u))) { if (pa_atomic_load(&u->mixer_state) == PROP_MIXER_TUNING_PRI) { pa_proplist *p = pa_proplist_new(); pa_assert(p); pa_proplist_sets(p, PROP_MIXER_TUNING_MODE, PROP_MIXER_TUNING_ALT_S); pa_sink_update_proplist(om_sink, PA_UPDATE_REPLACE, p); pa_atomic_store(&u->mixer_state, PROP_MIXER_TUNING_ALT); pa_proplist_free(p); if (u->sidetone_enable) voice_enable_sidetone(u,1); } } else { if (pa_atomic_load(&u->mixer_state) == PROP_MIXER_TUNING_ALT) { pa_proplist *p = pa_proplist_new(); pa_assert(p); pa_proplist_sets(p, PROP_MIXER_TUNING_MODE, PROP_MIXER_TUNING_PRI_S); pa_sink_update_proplist(om_sink, PA_UPDATE_REPLACE, p); pa_atomic_store(&u->mixer_state, PROP_MIXER_TUNING_PRI); pa_proplist_free(p); voice_enable_sidetone(u,0); } } return 0; }