static void pstream_memblock_callback(pa_pstream *p, uint32_t channel, int64_t offset, pa_seek_mode_t seek, const pa_memchunk *chunk, void *userdata) { pa_context *c = userdata; pa_stream *s; pa_assert(p); pa_assert(chunk); pa_assert(chunk->length > 0); pa_assert(c); pa_assert(PA_REFCNT_VALUE(c) >= 1); pa_context_ref(c); if ((s = pa_hashmap_get(c->record_streams, PA_UINT32_TO_PTR(channel)))) { if (chunk->memblock) { pa_memblockq_seek(s->record_memblockq, offset, seek, TRUE); pa_memblockq_push_align(s->record_memblockq, chunk); } else pa_memblockq_seek(s->record_memblockq, offset+chunk->length, seek, TRUE); if (s->read_callback) { size_t l; if ((l = pa_memblockq_get_length(s->record_memblockq)) > 0) s->read_callback(s, l, s->read_userdata); } } pa_context_unref(c); }
/* Called from I/O thread context */ static void sink_input_process_rewind_cb(pa_sink_input *i, size_t nbytes) { struct userdata *u; size_t amount = 0; pa_sink_input_assert_ref(i); pa_assert_se(u = i->userdata); if (u->sink->thread_info.rewind_nbytes > 0) { size_t max_rewrite; max_rewrite = nbytes + pa_memblockq_get_length(u->memblockq); amount = PA_MIN(u->sink->thread_info.rewind_nbytes, max_rewrite); u->sink->thread_info.rewind_nbytes = 0; if (amount > 0) { unsigned c; pa_memblockq_seek(u->memblockq, - (int64_t) amount, PA_SEEK_RELATIVE, TRUE); pa_log_debug("Resetting plugin"); /* Reset the plugin */ if (u->descriptor->deactivate) for (c = 0; c < (u->channels / u->max_ladspaport_count); c++) u->descriptor->deactivate(u->handle[c]); if (u->descriptor->activate) for (c = 0; c < (u->channels / u->max_ladspaport_count); c++) u->descriptor->activate(u->handle[c]); } } pa_sink_process_rewind(u->sink, amount); pa_memblockq_rewind(u->memblockq, nbytes); }
/* Called from I/O thread context */ static void sink_input_process_rewind_cb(pa_sink_input *i, size_t nbytes) { struct userdata *u; size_t amount = 0; pa_sink_input_assert_ref(i); pa_assert_se(u = i->userdata); if (u->sink->thread_info.rewind_nbytes > 0) { size_t max_rewrite; max_rewrite = nbytes * u->sink_fs / u->fs + pa_memblockq_get_length(u->memblockq); amount = PA_MIN(u->sink->thread_info.rewind_nbytes * u->sink_fs / u->fs, max_rewrite); u->sink->thread_info.rewind_nbytes = 0; if (amount > 0) { pa_memblockq_seek(u->memblockq, - (int64_t) amount, PA_SEEK_RELATIVE, true); /* Reset the input buffer */ memset(u->input_buffer, 0, u->hrir_samples * u->sink_fs); u->input_buffer_offset = 0; } } pa_sink_process_rewind(u->sink, amount); pa_memblockq_rewind(u->memblockq, nbytes * u->sink_fs / u->fs); }
/* Called from I/O thread context */ static void sink_input_process_rewind_cb(pa_sink_input *i, size_t nbytes) { struct userdata *u; size_t amount = 0; pa_sink_input_assert_ref(i); pa_assert_se(u = i->userdata); if (u->sink->thread_info.rewind_nbytes > 0) { size_t max_rewrite; max_rewrite = nbytes + pa_memblockq_get_length(u->memblockq); amount = PA_MIN(u->sink->thread_info.rewind_nbytes, max_rewrite); u->sink->thread_info.rewind_nbytes = 0; if (amount > 0) { pa_memblockq_seek(u->memblockq, - (int64_t) amount, PA_SEEK_RELATIVE, TRUE); /* (5) PUT YOUR CODE HERE TO RESET YOUR FILTER */ } } pa_sink_process_rewind(u->sink, amount); pa_memblockq_rewind(u->memblockq, nbytes); }
/* Called from I/O thread context */ static int rtpoll_work_cb(pa_rtpoll_item *i) { pa_memchunk chunk; int64_t k, j, delta; struct timeval now = { 0, 0 }; struct session *s; struct pollfd *p; pa_assert_se(s = pa_rtpoll_item_get_userdata(i)); p = pa_rtpoll_item_get_pollfd(i, NULL); if (p->revents & (POLLERR|POLLNVAL|POLLHUP|POLLOUT)) { pa_log("poll() signalled bad revents."); return -1; } if ((p->revents & POLLIN) == 0) return 0; p->revents = 0; if (pa_rtp_recv(&s->rtp_context, &chunk, s->userdata->module->core->mempool, &now) < 0) return 0; if (s->sdp_info.payload != s->rtp_context.payload || !PA_SINK_IS_OPENED(s->sink_input->sink->thread_info.state)) { pa_memblock_unref(chunk.memblock); return 0; } if (!s->first_packet) { s->first_packet = TRUE; s->ssrc = s->rtp_context.ssrc; s->offset = s->rtp_context.timestamp; if (s->ssrc == s->userdata->module->core->cookie) pa_log_warn("Detected RTP packet loop!"); } else { if (s->ssrc != s->rtp_context.ssrc) { pa_memblock_unref(chunk.memblock); return 0; } } /* Check whether there was a timestamp overflow */ k = (int64_t) s->rtp_context.timestamp - (int64_t) s->offset; j = (int64_t) 0x100000000LL - (int64_t) s->offset + (int64_t) s->rtp_context.timestamp; if ((k < 0 ? -k : k) < (j < 0 ? -j : j)) delta = k; else delta = j; pa_memblockq_seek(s->memblockq, delta * (int64_t) s->rtp_context.frame_size, PA_SEEK_RELATIVE, TRUE); if (now.tv_sec == 0) { PA_ONCE_BEGIN { pa_log_warn("Using artificial time instead of timestamp"); } PA_ONCE_END; pa_rtclock_get(&now); } else
/* Called from output thread context */ static int sink_input_process_msg_cb(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SINK_INPUT(obj)->userdata; switch (code) { case PA_SINK_INPUT_MESSAGE_GET_LATENCY: { pa_usec_t *r = data; pa_sink_input_assert_io_context(u->sink_input); *r = pa_bytes_to_usec(pa_memblockq_get_length(u->memblockq), &u->sink_input->sample_spec); /* Fall through, the default handler will add in the extra * latency added by the resampler */ break; } case SINK_INPUT_MESSAGE_POST: pa_sink_input_assert_io_context(u->sink_input); if (PA_SINK_IS_OPENED(u->sink_input->sink->thread_info.state)) pa_memblockq_push_align(u->memblockq, chunk); else pa_memblockq_flush_write(u->memblockq, TRUE); update_min_memblockq_length(u); /* Is this the end of an underrun? Then let's start things * right-away */ if (!u->in_pop && u->sink_input->thread_info.underrun_for > 0 && pa_memblockq_is_readable(u->memblockq)) { pa_log_debug("Requesting rewind due to end of underrun."); pa_sink_input_request_rewind(u->sink_input, (size_t) (u->sink_input->thread_info.underrun_for == (size_t) -1 ? 0 : u->sink_input->thread_info.underrun_for), FALSE, TRUE, FALSE); } u->recv_counter += (int64_t) chunk->length; return 0; case SINK_INPUT_MESSAGE_REWIND: pa_sink_input_assert_io_context(u->sink_input); if (PA_SINK_IS_OPENED(u->sink_input->sink->thread_info.state)) pa_memblockq_seek(u->memblockq, -offset, PA_SEEK_RELATIVE, TRUE); else pa_memblockq_flush_write(u->memblockq, TRUE); u->recv_counter -= offset; update_min_memblockq_length(u); return 0; case SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT: { size_t length; update_min_memblockq_length(u); length = pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq); u->latency_snapshot.recv_counter = u->recv_counter; u->latency_snapshot.sink_input_buffer = pa_memblockq_get_length(u->memblockq) + (u->sink_input->thread_info.resampler ? pa_resampler_request(u->sink_input->thread_info.resampler, length) : length); u->latency_snapshot.sink_latency = pa_sink_get_latency_within_thread(u->sink_input->sink); u->latency_snapshot.max_request = pa_sink_input_get_max_request(u->sink_input); u->latency_snapshot.min_memblockq_length = u->min_memblockq_length; u->min_memblockq_length = (size_t) -1; return 0; } case SINK_INPUT_MESSAGE_MAX_REQUEST_CHANGED: { /* This message is sent from the IO thread to the main * thread! So don't be confused. All the user cases above * are executed in thread context, but this one is not! */ pa_assert_ctl_context(); if (u->time_event) adjust_rates(u); return 0; } } return pa_sink_input_process_msg(obj, code, data, offset, chunk); }
/* Called from output thread context */ static int sink_input_process_msg_cb(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) { struct userdata *u = PA_SINK_INPUT(obj)->userdata; switch (code) { case PA_SINK_INPUT_MESSAGE_GET_LATENCY: { pa_usec_t *r = data; pa_sink_input_assert_io_context(u->sink_input); *r = pa_bytes_to_usec(pa_memblockq_get_length(u->memblockq), &u->sink_input->sample_spec); /* Fall through, the default handler will add in the extra * latency added by the resampler */ break; } case SINK_INPUT_MESSAGE_POST: pa_sink_input_assert_io_context(u->sink_input); if (PA_SINK_IS_OPENED(u->sink_input->sink->thread_info.state)) pa_memblockq_push_align(u->memblockq, chunk); else pa_memblockq_flush_write(u->memblockq, true); /* Is this the end of an underrun? Then let's start things * right-away */ if (!u->in_pop && u->sink_input->thread_info.underrun_for > 0 && pa_memblockq_is_readable(u->memblockq)) { pa_log_debug("Requesting rewind due to end of underrun."); pa_sink_input_request_rewind(u->sink_input, (size_t) (u->sink_input->thread_info.underrun_for == (size_t) -1 ? 0 : u->sink_input->thread_info.underrun_for), false, true, false); } u->recv_counter += (int64_t) chunk->length; return 0; case SINK_INPUT_MESSAGE_REWIND: pa_sink_input_assert_io_context(u->sink_input); if (PA_SINK_IS_OPENED(u->sink_input->sink->thread_info.state)) pa_memblockq_seek(u->memblockq, -offset, PA_SEEK_RELATIVE, true); else pa_memblockq_flush_write(u->memblockq, true); u->recv_counter -= offset; return 0; case SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT: { size_t length; length = pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq); u->latency_snapshot.recv_counter = u->recv_counter; u->latency_snapshot.sink_input_buffer = pa_memblockq_get_length(u->memblockq); /* Add content of render memblockq to sink latency */ u->latency_snapshot.sink_latency = pa_sink_get_latency_within_thread(u->sink_input->sink) + pa_bytes_to_usec(length, &u->sink_input->sink->sample_spec); u->latency_snapshot.sink_timestamp = pa_rtclock_now(); return 0; } } return pa_sink_input_process_msg(obj, code, data, offset, chunk); }
int main(int argc, char *argv[]) { int ret; pa_mempool *p; pa_memblockq *bq; pa_memchunk chunk1, chunk2, chunk3, chunk4; pa_memchunk silence; pa_sample_spec ss = { .format = PA_SAMPLE_S16LE, .rate = 48000, .channels = 1 }; pa_log_set_level(PA_LOG_DEBUG); p = pa_mempool_new(FALSE, 0); pa_assert_se(silence.memblock = pa_memblock_new_fixed(p, (char*) "__", 2, 1)); silence.index = 0; silence.length = pa_memblock_get_length(silence.memblock); pa_assert_se(bq = pa_memblockq_new("test memblockq", 0, 200, 10, &ss, 4, 4, 40, &silence)); pa_assert_se(chunk1.memblock = pa_memblock_new_fixed(p, (char*) "11", 2, 1)); chunk1.index = 0; chunk1.length = 2; pa_assert_se(chunk2.memblock = pa_memblock_new_fixed(p, (char*) "XX22", 4, 1)); chunk2.index = 2; chunk2.length = 2; pa_assert_se(chunk3.memblock = pa_memblock_new_fixed(p, (char*) "3333", 4, 1)); chunk3.index = 0; chunk3.length = 4; pa_assert_se(chunk4.memblock = pa_memblock_new_fixed(p, (char*) "44444444", 8, 1)); chunk4.index = 0; chunk4.length = 8; ret = pa_memblockq_push(bq, &chunk1); assert(ret == 0); ret = pa_memblockq_push(bq, &chunk2); assert(ret == 0); ret = pa_memblockq_push(bq, &chunk3); assert(ret == 0); ret = pa_memblockq_push(bq, &chunk4); assert(ret == 0); pa_memblockq_seek(bq, -6, 0, TRUE); ret = pa_memblockq_push(bq, &chunk3); assert(ret == 0); pa_memblockq_seek(bq, -2, 0, TRUE); ret = pa_memblockq_push(bq, &chunk1); assert(ret == 0); pa_memblockq_seek(bq, -10, 0, TRUE); ret = pa_memblockq_push(bq, &chunk4); assert(ret == 0); pa_memblockq_seek(bq, 10, 0, TRUE); ret = pa_memblockq_push(bq, &chunk1); assert(ret == 0); pa_memblockq_seek(bq, -6, 0, TRUE); ret = pa_memblockq_push(bq, &chunk2); assert(ret == 0); /* Test splitting */ pa_memblockq_seek(bq, -12, 0, TRUE); ret = pa_memblockq_push(bq, &chunk1); assert(ret == 0); pa_memblockq_seek(bq, 20, 0, TRUE); /* Test merging */ ret = pa_memblockq_push(bq, &chunk3); assert(ret == 0); pa_memblockq_seek(bq, -2, 0, TRUE); chunk3.index += 2; chunk3.length -= 2; ret = pa_memblockq_push(bq, &chunk3); assert(ret == 0); pa_memblockq_seek(bq, 30, PA_SEEK_RELATIVE, TRUE); dump(bq); pa_memblockq_rewind(bq, 52); dump(bq); pa_memblockq_free(bq); pa_memblock_unref(silence.memblock); pa_memblock_unref(chunk1.memblock); pa_memblock_unref(chunk2.memblock); pa_memblock_unref(chunk3.memblock); pa_memblock_unref(chunk4.memblock); pa_mempool_free(p); return 0; }
/* Called from thread context */ void pa_source_output_push(pa_source_output *o, const pa_memchunk *chunk) { size_t length; size_t limit, mbs = 0; pa_source_output_assert_ref(o); pa_source_output_assert_io_context(o); pa_assert(PA_SOURCE_OUTPUT_IS_LINKED(o->thread_info.state)); pa_assert(chunk); pa_assert(pa_frame_aligned(chunk->length, &o->source->sample_spec)); if (!o->push || o->thread_info.state == PA_SOURCE_OUTPUT_CORKED) return; pa_assert(o->thread_info.state == PA_SOURCE_OUTPUT_RUNNING); if (pa_memblockq_push(o->thread_info.delay_memblockq, chunk) < 0) { pa_log_debug("Delay queue overflow!"); pa_memblockq_seek(o->thread_info.delay_memblockq, (int64_t) chunk->length, PA_SEEK_RELATIVE, TRUE); } limit = o->process_rewind ? 0 : o->source->thread_info.max_rewind; if (limit > 0 && o->source->monitor_of) { pa_usec_t latency; size_t n; /* Hmm, check the latency for knowing how much of the buffered * data is actually still unplayed and might hence still * change. This is suboptimal. Ideally we'd have a call like * pa_sink_get_changeable_size() or so that tells us how much * of the queued data is actually still changeable. Hence * FIXME! */ latency = pa_sink_get_latency_within_thread(o->source->monitor_of); n = pa_usec_to_bytes(latency, &o->source->sample_spec); if (n < limit) limit = n; } /* Implement the delay queue */ while ((length = pa_memblockq_get_length(o->thread_info.delay_memblockq)) > limit) { pa_memchunk qchunk; length -= limit; pa_assert_se(pa_memblockq_peek(o->thread_info.delay_memblockq, &qchunk) >= 0); if (qchunk.length > length) qchunk.length = length; pa_assert(qchunk.length > 0); if (!o->thread_info.resampler) o->push(o, &qchunk); else { pa_memchunk rchunk; if (mbs == 0) mbs = pa_resampler_max_block_size(o->thread_info.resampler); if (qchunk.length > mbs) qchunk.length = mbs; pa_resampler_run(o->thread_info.resampler, &qchunk, &rchunk); if (rchunk.length > 0) o->push(o, &rchunk); if (rchunk.memblock) pa_memblock_unref(rchunk.memblock); } pa_memblock_unref(qchunk.memblock); pa_memblockq_drop(o->thread_info.delay_memblockq, qchunk.length); } }