/* Called from output thread context */ static void source_output_push_cb(pa_source_output *o, const pa_memchunk *chunk) { struct userdata *u; pa_source_output_assert_ref(o); pa_source_output_assert_io_context(o); pa_assert_se(u = o->userdata); if (!PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output))) { pa_log("push when no link?"); return; } pa_source_post(u->source, chunk); }
/* Called from output thread context */ static void source_output_attach_cb(pa_source_output *o) { struct userdata *u; pa_source_output_assert_ref(o); pa_source_output_assert_io_context(o); pa_assert_se(u = o->userdata); pa_source_set_rtpoll(u->source, o->source->thread_info.rtpoll); pa_source_set_latency_range_within_thread(u->source, o->source->thread_info.min_latency, o->source->thread_info.max_latency); pa_source_set_fixed_latency_within_thread(u->source, o->source->thread_info.fixed_latency); pa_source_set_max_rewind_within_thread(u->source, pa_source_output_get_max_rewind(o)); pa_source_attach_within_thread(u->source); }
/* Called from thread context */ pa_usec_t pa_source_output_set_requested_latency_within_thread(pa_source_output *o, pa_usec_t usec) { pa_source_output_assert_ref(o); pa_source_output_assert_io_context(o); if (!(o->source->flags & PA_SOURCE_DYNAMIC_LATENCY)) usec = o->source->thread_info.fixed_latency; if (usec != (pa_usec_t) -1) usec = PA_CLAMP(usec, o->source->thread_info.min_latency, o->source->thread_info.max_latency); o->thread_info.requested_source_latency = usec; pa_source_invalidate_requested_latency(o->source, TRUE); return usec; }
/* Called from output thread context */ static void source_output_state_change_cb(pa_source_output *o, pa_source_output_state_t state) { struct userdata *u; pa_source_output_assert_ref(o); pa_source_output_assert_io_context(o); pa_assert_se(u = o->userdata); if (PA_SOURCE_OUTPUT_IS_LINKED(state) && o->thread_info.state == PA_SOURCE_OUTPUT_INIT) { u->skip = pa_usec_to_bytes(PA_CLIP_SUB(pa_source_get_latency_within_thread(o->source), u->latency), &o->sample_spec); pa_log_info("Skipping %lu bytes", (unsigned long) u->skip); } }
/* Called from input thread context */ static void source_output_push_cb(pa_source_output *o, const pa_memchunk *chunk) { struct userdata *u; pa_source_output_assert_ref(o); pa_source_output_assert_io_context(o); pa_assert_se(u = o->userdata); if (!PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output))) { pa_log("push when no link?"); return; } /* PUT YOUR CODE HERE TO DO SOMETHING WITH THE SOURCE DATA */ /* if uplink sink exists, pull data from there; simplify by using same length as chunk provided by source */ if(u->sink && (pa_sink_get_state(u->sink) == PA_SINK_RUNNING)) { pa_memchunk tchunk; size_t nbytes = chunk->length; pa_mix_info streams[2]; pa_memchunk target_chunk; void *target; int ch; /* Hmm, process any rewind request that might be queued up */ pa_sink_process_rewind(u->sink, 0); /* get data from the sink */ while (pa_memblockq_peek(u->sink_memblockq, &tchunk) < 0) { pa_memchunk nchunk; /* make sure we get nbytes from the sink with render_full, otherwise we cannot mix with the uplink */ pa_sink_render_full(u->sink, nbytes, &nchunk); pa_memblockq_push(u->sink_memblockq, &nchunk); pa_memblock_unref(nchunk.memblock); } pa_assert(tchunk.length == chunk->length); /* move the read pointer for sink memblockq */ pa_memblockq_drop(u->sink_memblockq, tchunk.length); /* allocate target chunk */ /* this could probably be done in-place, but having chunk as both the input and output creates issues with reference counts */ target_chunk.index = 0; target_chunk.length = chunk->length; pa_assert(target_chunk.length == chunk->length); target_chunk.memblock = pa_memblock_new(o->source->core->mempool, target_chunk.length); pa_assert( target_chunk.memblock ); /* get target pointer */ target = (void*)((uint8_t*)pa_memblock_acquire(target_chunk.memblock) + target_chunk.index); /* set-up mixing structure volume was taken care of in sink and source already */ streams[0].chunk = *chunk; for(ch=0;ch<o->sample_spec.channels;ch++) streams[0].volume.values[ch] = PA_VOLUME_NORM; /* FIXME */ streams[0].volume.channels = o->sample_spec.channels; streams[1].chunk = tchunk; for(ch=0;ch<o->sample_spec.channels;ch++) streams[1].volume.values[ch] = PA_VOLUME_NORM; /* FIXME */ streams[1].volume.channels = o->sample_spec.channels; /* do mixing */ pa_mix(streams, /* 2 streams to be mixed */ 2, target, /* put result in target chunk */ chunk->length, /* same length as input */ (const pa_sample_spec *)&o->sample_spec, /* same sample spec for input and output */ NULL, /* no volume information */ FALSE); /* no mute */ pa_memblock_release(target_chunk.memblock); pa_memblock_unref(tchunk.memblock); /* clean-up */ /* forward the data to the virtual source */ pa_source_post(u->source, &target_chunk); pa_memblock_unref(target_chunk.memblock); /* clean-up */ } else { /* forward the data to the virtual source */ pa_source_post(u->source, chunk); } }
/* Called from thread context */ size_t pa_source_output_get_max_rewind(pa_source_output *o) { pa_source_output_assert_ref(o); pa_source_output_assert_io_context(o); return o->thread_info.resampler ? pa_resampler_request(o->thread_info.resampler, o->source->thread_info.max_rewind) : o->source->thread_info.max_rewind; }
/* Called from thread context */ void pa_source_output_push(pa_source_output *o, const pa_memchunk *chunk) { size_t length; size_t limit, mbs = 0; pa_source_output_assert_ref(o); pa_source_output_assert_io_context(o); pa_assert(PA_SOURCE_OUTPUT_IS_LINKED(o->thread_info.state)); pa_assert(chunk); pa_assert(pa_frame_aligned(chunk->length, &o->source->sample_spec)); if (!o->push || o->thread_info.state == PA_SOURCE_OUTPUT_CORKED) return; pa_assert(o->thread_info.state == PA_SOURCE_OUTPUT_RUNNING); if (pa_memblockq_push(o->thread_info.delay_memblockq, chunk) < 0) { pa_log_debug("Delay queue overflow!"); pa_memblockq_seek(o->thread_info.delay_memblockq, (int64_t) chunk->length, PA_SEEK_RELATIVE, TRUE); } limit = o->process_rewind ? 0 : o->source->thread_info.max_rewind; if (limit > 0 && o->source->monitor_of) { pa_usec_t latency; size_t n; /* Hmm, check the latency for knowing how much of the buffered * data is actually still unplayed and might hence still * change. This is suboptimal. Ideally we'd have a call like * pa_sink_get_changeable_size() or so that tells us how much * of the queued data is actually still changeable. Hence * FIXME! */ latency = pa_sink_get_latency_within_thread(o->source->monitor_of); n = pa_usec_to_bytes(latency, &o->source->sample_spec); if (n < limit) limit = n; } /* Implement the delay queue */ while ((length = pa_memblockq_get_length(o->thread_info.delay_memblockq)) > limit) { pa_memchunk qchunk; length -= limit; pa_assert_se(pa_memblockq_peek(o->thread_info.delay_memblockq, &qchunk) >= 0); if (qchunk.length > length) qchunk.length = length; pa_assert(qchunk.length > 0); if (!o->thread_info.resampler) o->push(o, &qchunk); else { pa_memchunk rchunk; if (mbs == 0) mbs = pa_resampler_max_block_size(o->thread_info.resampler); if (qchunk.length > mbs) qchunk.length = mbs; pa_resampler_run(o->thread_info.resampler, &qchunk, &rchunk); if (rchunk.length > 0) o->push(o, &rchunk); if (rchunk.memblock) pa_memblock_unref(rchunk.memblock); } pa_memblock_unref(qchunk.memblock); pa_memblockq_drop(o->thread_info.delay_memblockq, qchunk.length); } }