int pa_play_memchunk( pa_sink *sink, const pa_sample_spec *ss, const pa_channel_map *map, const pa_memchunk *chunk, pa_cvolume *volume, pa_proplist *p, uint32_t *sink_input_index) { pa_memblockq *q; int r; pa_assert(sink); pa_assert(ss); pa_assert(chunk); q = pa_memblockq_new(0, chunk->length, 0, pa_frame_size(ss), 1, 1, 0, NULL); pa_assert_se(pa_memblockq_push(q, chunk) >= 0); if ((r = pa_play_memblockq(sink, ss, map, q, volume, p, sink_input_index)) < 0) { pa_memblockq_free(q); return r; } return 0; }
int pa_play_memchunk( pa_sink *sink, const pa_sample_spec *ss, const pa_channel_map *map, const pa_memchunk *chunk, pa_cvolume *volume, pa_proplist *p, uint32_t *sink_input_index) { pa_memblockq *q; int r; pa_memchunk silence; pa_assert(sink); pa_assert(ss); pa_assert(chunk); pa_silence_memchunk_get(&sink->core->silence_cache, sink->core->mempool, &silence, ss, 0); q = pa_memblockq_new(0, chunk->length, 0, pa_frame_size(ss), 1, 1, 0, &silence); pa_memblock_unref(silence.memblock); pa_assert_se(pa_memblockq_push(q, chunk) >= 0); if ((r = pa_play_memblockq(sink, ss, map, q, volume, p, sink_input_index)) < 0) { pa_memblockq_free(q); return r; } return 0; }
/* Called from I/O thread context */ static int sink_input_pop_cb(pa_sink_input *i, size_t nbytes, pa_memchunk *chunk) { struct userdata *u; float *src, *dst; size_t fs; unsigned n, h, c; pa_memchunk tchunk; pa_sink_input_assert_ref(i); pa_assert(chunk); pa_assert_se(u = i->userdata); /* Hmm, process any rewind request that might be queued up */ pa_sink_process_rewind(u->sink, 0); while (pa_memblockq_peek(u->memblockq, &tchunk) < 0) { pa_memchunk nchunk; pa_sink_render(u->sink, nbytes, &nchunk); pa_memblockq_push(u->memblockq, &nchunk); pa_memblock_unref(nchunk.memblock); } tchunk.length = PA_MIN(nbytes, tchunk.length); pa_assert(tchunk.length > 0); fs = pa_frame_size(&i->sample_spec); n = (unsigned) (PA_MIN(tchunk.length, u->block_size) / fs); pa_assert(n > 0); chunk->index = 0; chunk->length = n*fs; chunk->memblock = pa_memblock_new(i->sink->core->mempool, chunk->length); pa_memblockq_drop(u->memblockq, chunk->length); src = (float*) ((uint8_t*) pa_memblock_acquire(tchunk.memblock) + tchunk.index); dst = (float*) pa_memblock_acquire(chunk->memblock); for (h = 0; h < (u->channels / u->max_ladspaport_count); h++) { for (c = 0; c < u->input_count; c++) pa_sample_clamp(PA_SAMPLE_FLOAT32NE, u->input[c], sizeof(float), src+ h*u->max_ladspaport_count + c, u->channels*sizeof(float), n); u->descriptor->run(u->handle[h], n); for (c = 0; c < u->output_count; c++) pa_sample_clamp(PA_SAMPLE_FLOAT32NE, dst + h*u->max_ladspaport_count + c, u->channels*sizeof(float), u->output[c], sizeof(float), n); } pa_memblock_release(tchunk.memblock); pa_memblock_release(chunk->memblock); pa_memblock_unref(tchunk.memblock); return 0; }
/* Called from I/O thread context */ static void source_output_push_cb(pa_source_output *o, const pa_memchunk *chunk) { struct userdata *u; pa_source_output_assert_ref(o); pa_assert_se(u = o->userdata); if (pa_memblockq_push(u->memblockq, chunk) < 0) { pa_log_warn("Failed to push chunk into memblockq."); return; } pa_rtp_send(&u->rtp_context, u->mtu, u->memblockq); }
/* Called from I/O thread context */ static int sink_input_pop_cb(pa_sink_input *i, size_t nbytes, pa_memchunk *chunk) { struct userdata *u; float *src, *dst; size_t fs; unsigned n, c; pa_memchunk tchunk; pa_usec_t current_latency PA_GCC_UNUSED; pa_sink_input_assert_ref(i); pa_assert(chunk); pa_assert_se(u = i->userdata); /* Hmm, process any rewind request that might be queued up */ pa_sink_process_rewind(u->sink, 0); /* (1) IF YOU NEED A FIXED BLOCK SIZE USE * pa_memblockq_peek_fixed_size() HERE INSTEAD. NOTE THAT FILTERS * WHICH CAN DEAL WITH DYNAMIC BLOCK SIZES ARE HIGHLY * PREFERRED. */ while (pa_memblockq_peek(u->memblockq, &tchunk) < 0) { pa_memchunk nchunk; pa_sink_render(u->sink, nbytes, &nchunk); pa_memblockq_push(u->memblockq, &nchunk); pa_memblock_unref(nchunk.memblock); } /* (2) IF YOU NEED A FIXED BLOCK SIZE, THIS NEXT LINE IS NOT * NECESSARY */ tchunk.length = PA_MIN(nbytes, tchunk.length); pa_assert(tchunk.length > 0); fs = pa_frame_size(&i->sample_spec); n = (unsigned) (tchunk.length / fs); pa_assert(n > 0); chunk->index = 0; chunk->length = n*fs; chunk->memblock = pa_memblock_new(i->sink->core->mempool, chunk->length); pa_memblockq_drop(u->memblockq, chunk->length); src = pa_memblock_acquire_chunk(&tchunk); dst = pa_memblock_acquire(chunk->memblock); /* (3) PUT YOUR CODE HERE TO DO SOMETHING WITH THE DATA */ /* As an example, copy input to output */ for (c = 0; c < u->channels; c++) { pa_sample_clamp(PA_SAMPLE_FLOAT32NE, dst+c, u->channels * sizeof(float), src+c, u->channels * sizeof(float), n); } pa_memblock_release(tchunk.memblock); pa_memblock_release(chunk->memblock); pa_memblock_unref(tchunk.memblock); /* (4) IF YOU NEED THE LATENCY FOR SOMETHING ACQUIRE IT LIKE THIS: */ current_latency = /* Get the latency of the master sink */ pa_sink_get_latency_within_thread(i->sink) + /* Add the latency internal to our sink input on top */ pa_bytes_to_usec(pa_memblockq_get_length(i->thread_info.render_memblockq), &i->sink->sample_spec); return 0; }
void mi6k_commit_frame(pa_memchunk *frame) { pa_memblockq_push(mi6k.queue, frame); pa_memblock_unref(frame->memblock); }
/* Called from input thread context */ static void source_output_push_cb(pa_source_output *o, const pa_memchunk *chunk) { struct userdata *u; pa_source_output_assert_ref(o); pa_source_output_assert_io_context(o); pa_assert_se(u = o->userdata); if (!PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output))) { pa_log("push when no link?"); return; } /* PUT YOUR CODE HERE TO DO SOMETHING WITH THE SOURCE DATA */ /* if uplink sink exists, pull data from there; simplify by using same length as chunk provided by source */ if(u->sink && (pa_sink_get_state(u->sink) == PA_SINK_RUNNING)) { pa_memchunk tchunk; size_t nbytes = chunk->length; pa_mix_info streams[2]; pa_memchunk target_chunk; void *target; int ch; /* Hmm, process any rewind request that might be queued up */ pa_sink_process_rewind(u->sink, 0); /* get data from the sink */ while (pa_memblockq_peek(u->sink_memblockq, &tchunk) < 0) { pa_memchunk nchunk; /* make sure we get nbytes from the sink with render_full, otherwise we cannot mix with the uplink */ pa_sink_render_full(u->sink, nbytes, &nchunk); pa_memblockq_push(u->sink_memblockq, &nchunk); pa_memblock_unref(nchunk.memblock); } pa_assert(tchunk.length == chunk->length); /* move the read pointer for sink memblockq */ pa_memblockq_drop(u->sink_memblockq, tchunk.length); /* allocate target chunk */ /* this could probably be done in-place, but having chunk as both the input and output creates issues with reference counts */ target_chunk.index = 0; target_chunk.length = chunk->length; pa_assert(target_chunk.length == chunk->length); target_chunk.memblock = pa_memblock_new(o->source->core->mempool, target_chunk.length); pa_assert( target_chunk.memblock ); /* get target pointer */ target = (void*)((uint8_t*)pa_memblock_acquire(target_chunk.memblock) + target_chunk.index); /* set-up mixing structure volume was taken care of in sink and source already */ streams[0].chunk = *chunk; for(ch=0;ch<o->sample_spec.channels;ch++) streams[0].volume.values[ch] = PA_VOLUME_NORM; /* FIXME */ streams[0].volume.channels = o->sample_spec.channels; streams[1].chunk = tchunk; for(ch=0;ch<o->sample_spec.channels;ch++) streams[1].volume.values[ch] = PA_VOLUME_NORM; /* FIXME */ streams[1].volume.channels = o->sample_spec.channels; /* do mixing */ pa_mix(streams, /* 2 streams to be mixed */ 2, target, /* put result in target chunk */ chunk->length, /* same length as input */ (const pa_sample_spec *)&o->sample_spec, /* same sample spec for input and output */ NULL, /* no volume information */ FALSE); /* no mute */ pa_memblock_release(target_chunk.memblock); pa_memblock_unref(tchunk.memblock); /* clean-up */ /* forward the data to the virtual source */ pa_source_post(u->source, &target_chunk); pa_memblock_unref(target_chunk.memblock); /* clean-up */ } else { /* forward the data to the virtual source */ pa_source_post(u->source, chunk); } }
int main(int argc, char *argv[]) { int ret; pa_mempool *p; pa_memblockq *bq; pa_memchunk chunk1, chunk2, chunk3, chunk4; pa_memchunk silence; pa_sample_spec ss = { .format = PA_SAMPLE_S16LE, .rate = 48000, .channels = 1 }; pa_log_set_level(PA_LOG_DEBUG); p = pa_mempool_new(FALSE, 0); pa_assert_se(silence.memblock = pa_memblock_new_fixed(p, (char*) "__", 2, 1)); silence.index = 0; silence.length = pa_memblock_get_length(silence.memblock); pa_assert_se(bq = pa_memblockq_new("test memblockq", 0, 200, 10, &ss, 4, 4, 40, &silence)); pa_assert_se(chunk1.memblock = pa_memblock_new_fixed(p, (char*) "11", 2, 1)); chunk1.index = 0; chunk1.length = 2; pa_assert_se(chunk2.memblock = pa_memblock_new_fixed(p, (char*) "XX22", 4, 1)); chunk2.index = 2; chunk2.length = 2; pa_assert_se(chunk3.memblock = pa_memblock_new_fixed(p, (char*) "3333", 4, 1)); chunk3.index = 0; chunk3.length = 4; pa_assert_se(chunk4.memblock = pa_memblock_new_fixed(p, (char*) "44444444", 8, 1)); chunk4.index = 0; chunk4.length = 8; ret = pa_memblockq_push(bq, &chunk1); assert(ret == 0); ret = pa_memblockq_push(bq, &chunk2); assert(ret == 0); ret = pa_memblockq_push(bq, &chunk3); assert(ret == 0); ret = pa_memblockq_push(bq, &chunk4); assert(ret == 0); pa_memblockq_seek(bq, -6, 0, TRUE); ret = pa_memblockq_push(bq, &chunk3); assert(ret == 0); pa_memblockq_seek(bq, -2, 0, TRUE); ret = pa_memblockq_push(bq, &chunk1); assert(ret == 0); pa_memblockq_seek(bq, -10, 0, TRUE); ret = pa_memblockq_push(bq, &chunk4); assert(ret == 0); pa_memblockq_seek(bq, 10, 0, TRUE); ret = pa_memblockq_push(bq, &chunk1); assert(ret == 0); pa_memblockq_seek(bq, -6, 0, TRUE); ret = pa_memblockq_push(bq, &chunk2); assert(ret == 0); /* Test splitting */ pa_memblockq_seek(bq, -12, 0, TRUE); ret = pa_memblockq_push(bq, &chunk1); assert(ret == 0); pa_memblockq_seek(bq, 20, 0, TRUE); /* Test merging */ ret = pa_memblockq_push(bq, &chunk3); assert(ret == 0); pa_memblockq_seek(bq, -2, 0, TRUE); chunk3.index += 2; chunk3.length -= 2; ret = pa_memblockq_push(bq, &chunk3); assert(ret == 0); pa_memblockq_seek(bq, 30, PA_SEEK_RELATIVE, TRUE); dump(bq); pa_memblockq_rewind(bq, 52); dump(bq); pa_memblockq_free(bq); pa_memblock_unref(silence.memblock); pa_memblock_unref(chunk1.memblock); pa_memblock_unref(chunk2.memblock); pa_memblock_unref(chunk3.memblock); pa_memblock_unref(chunk4.memblock); pa_mempool_free(p); return 0; }
/* Called from IO thread context */ static int sink_input_pop_cb(pa_sink_input *i, size_t length, pa_memchunk *chunk) { file_stream *u; pa_sink_input_assert_ref(i); pa_assert(chunk); u = FILE_STREAM(i->userdata); file_stream_assert_ref(u); if (!u->memblockq) return -1; for (;;) { pa_memchunk tchunk; size_t fs; void *p; sf_count_t n; if (pa_memblockq_peek(u->memblockq, chunk) >= 0) { chunk->length = PA_MIN(chunk->length, length); pa_memblockq_drop(u->memblockq, chunk->length); return 0; } if (!u->sndfile) break; tchunk.memblock = pa_memblock_new(i->sink->core->mempool, length); tchunk.index = 0; p = pa_memblock_acquire(tchunk.memblock); if (u->readf_function) { fs = pa_frame_size(&i->sample_spec); n = u->readf_function(u->sndfile, p, (sf_count_t) (length/fs)); } else { fs = 1; n = sf_read_raw(u->sndfile, p, (sf_count_t) length); } pa_memblock_release(tchunk.memblock); if (n <= 0) { pa_memblock_unref(tchunk.memblock); sf_close(u->sndfile); u->sndfile = NULL; break; } tchunk.length = (size_t) n * fs; pa_memblockq_push(u->memblockq, &tchunk); pa_memblock_unref(tchunk.memblock); } if (pa_sink_input_safe_to_remove(i)) { pa_memblockq_free(u->memblockq); u->memblockq = NULL; pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(u), FILE_STREAM_MESSAGE_UNLINK, NULL, 0, NULL, NULL); } return -1; }
/* Called from thread context */ void pa_source_output_push(pa_source_output *o, const pa_memchunk *chunk) { size_t length; size_t limit, mbs = 0; pa_source_output_assert_ref(o); pa_source_output_assert_io_context(o); pa_assert(PA_SOURCE_OUTPUT_IS_LINKED(o->thread_info.state)); pa_assert(chunk); pa_assert(pa_frame_aligned(chunk->length, &o->source->sample_spec)); if (!o->push || o->thread_info.state == PA_SOURCE_OUTPUT_CORKED) return; pa_assert(o->thread_info.state == PA_SOURCE_OUTPUT_RUNNING); if (pa_memblockq_push(o->thread_info.delay_memblockq, chunk) < 0) { pa_log_debug("Delay queue overflow!"); pa_memblockq_seek(o->thread_info.delay_memblockq, (int64_t) chunk->length, PA_SEEK_RELATIVE, TRUE); } limit = o->process_rewind ? 0 : o->source->thread_info.max_rewind; if (limit > 0 && o->source->monitor_of) { pa_usec_t latency; size_t n; /* Hmm, check the latency for knowing how much of the buffered * data is actually still unplayed and might hence still * change. This is suboptimal. Ideally we'd have a call like * pa_sink_get_changeable_size() or so that tells us how much * of the queued data is actually still changeable. Hence * FIXME! */ latency = pa_sink_get_latency_within_thread(o->source->monitor_of); n = pa_usec_to_bytes(latency, &o->source->sample_spec); if (n < limit) limit = n; } /* Implement the delay queue */ while ((length = pa_memblockq_get_length(o->thread_info.delay_memblockq)) > limit) { pa_memchunk qchunk; length -= limit; pa_assert_se(pa_memblockq_peek(o->thread_info.delay_memblockq, &qchunk) >= 0); if (qchunk.length > length) qchunk.length = length; pa_assert(qchunk.length > 0); if (!o->thread_info.resampler) o->push(o, &qchunk); else { pa_memchunk rchunk; if (mbs == 0) mbs = pa_resampler_max_block_size(o->thread_info.resampler); if (qchunk.length > mbs) qchunk.length = mbs; pa_resampler_run(o->thread_info.resampler, &qchunk, &rchunk); if (rchunk.length > 0) o->push(o, &rchunk); if (rchunk.memblock) pa_memblock_unref(rchunk.memblock); } pa_memblock_unref(qchunk.memblock); pa_memblockq_drop(o->thread_info.delay_memblockq, qchunk.length); } }
/* Called from I/O thread context */ static int sink_input_pop_cb(pa_sink_input *i, size_t nbytes, pa_memchunk *chunk) { struct userdata *u; float *src, *dst; unsigned n; pa_memchunk tchunk; unsigned j, k, l; float sum_right, sum_left; float current_sample; pa_sink_input_assert_ref(i); pa_assert(chunk); pa_assert_se(u = i->userdata); /* Hmm, process any rewind request that might be queued up */ pa_sink_process_rewind(u->sink, 0); while (pa_memblockq_peek(u->memblockq, &tchunk) < 0) { pa_memchunk nchunk; pa_sink_render(u->sink, nbytes * u->sink_fs / u->fs, &nchunk); pa_memblockq_push(u->memblockq, &nchunk); pa_memblock_unref(nchunk.memblock); } tchunk.length = PA_MIN(nbytes * u->sink_fs / u->fs, tchunk.length); pa_assert(tchunk.length > 0); n = (unsigned) (tchunk.length / u->sink_fs); pa_assert(n > 0); chunk->index = 0; chunk->length = n * u->fs; chunk->memblock = pa_memblock_new(i->sink->core->mempool, chunk->length); pa_memblockq_drop(u->memblockq, n * u->sink_fs); src = pa_memblock_acquire_chunk(&tchunk); dst = pa_memblock_acquire(chunk->memblock); for (l = 0; l < n; l++) { memcpy(((char*) u->input_buffer) + u->input_buffer_offset * u->sink_fs, ((char *) src) + l * u->sink_fs, u->sink_fs); sum_right = 0; sum_left = 0; /* fold the input buffer with the impulse response */ for (j = 0; j < u->hrir_samples; j++) { for (k = 0; k < u->channels; k++) { current_sample = u->input_buffer[((u->input_buffer_offset + j) % u->hrir_samples) * u->channels + k]; sum_left += current_sample * u->hrir_data[j * u->hrir_channels + u->mapping_left[k]]; sum_right += current_sample * u->hrir_data[j * u->hrir_channels + u->mapping_right[k]]; } } dst[2 * l] = PA_CLAMP_UNLIKELY(sum_left, -1.0f, 1.0f); dst[2 * l + 1] = PA_CLAMP_UNLIKELY(sum_right, -1.0f, 1.0f); u->input_buffer_offset--; if (u->input_buffer_offset < 0) u->input_buffer_offset += u->hrir_samples; } pa_memblock_release(tchunk.memblock); pa_memblock_release(chunk->memblock); pa_memblock_unref(tchunk.memblock); return 0; }
/*** sink_input callbacks ***/ static int cmtspeech_sink_input_pop_cb(pa_sink_input *i, size_t length, pa_memchunk *chunk) { struct userdata *u; int queue_counter = 0; pa_assert_fp(i); pa_sink_input_assert_ref(i); pa_assert_se(u = i->userdata); pa_assert_fp(chunk); if (u->cmt_connection.dl_frame_queue) { cmtspeech_dl_buf_t *buf; while ((buf = pa_asyncq_pop(u->cmt_connection.dl_frame_queue, FALSE))) { pa_memchunk cmtchunk; if (cmtspeech_buffer_to_memchunk(u, buf, &cmtchunk) < 0) continue; queue_counter++; if (pa_memblockq_push(u->dl_memblockq, &cmtchunk) < 0) { pa_log_debug("Failed to push DL frame to dl_memblockq (len %zu max %zu)", pa_memblockq_get_length(u->dl_memblockq), pa_memblockq_get_maxlength(u->dl_memblockq)); } else { cmtspeech_dl_sideinfo_push(buf->spc_flags, cmtchunk.length, u); } pa_memblock_unref(cmtchunk.memblock); } } /* More than one DL frame in queue means that sink has not asked for more * data for over 20ms and something may be wrong. */ if (queue_counter > 1) { pa_log_info("%d frames found from queue (dl buf size %zu)", queue_counter, pa_memblockq_get_length(u->dl_memblockq)); } if (pa_memblockq_get_length(u->dl_memblockq) > 3*u->dl_frame_size) { size_t drop_bytes = pa_memblockq_get_length(u->dl_memblockq) - 3*u->dl_frame_size; pa_memblockq_drop(u->dl_memblockq, drop_bytes); cmtspeech_dl_sideinfo_drop(u, drop_bytes); pa_log_debug("Too much data in DL buffer dropped %zu bytes", drop_bytes); } pa_assert_fp((pa_memblockq_get_length(u->dl_memblockq) % u->dl_frame_size) == 0); if (util_memblockq_to_chunk(u->core->mempool, u->dl_memblockq, chunk, u->dl_frame_size)) { ONDEBUG_TOKENS(fprintf(stderr, "d")); cmtspeech_dl_sideinfo_forward(u); } else { if (u->cmt_connection.first_dl_frame_received) pa_log_debug("No DL audio: %zu bytes in queue %zu needed", pa_memblockq_get_length(u->dl_memblockq), u->dl_frame_size); cmtspeech_dl_sideinfo_bogus(u); pa_silence_memchunk_get(&u->core->silence_cache, u->core->mempool, chunk, &u->ss, u->dl_frame_size); } return 0; }