Ejemplo n.º 1
0
/* Called from I/O thread context */
static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
    struct userdata *u = PA_SINK(o)->userdata;

    switch (code) {

        case PA_SINK_MESSAGE_GET_LATENCY:

            /* The sink is _put() before the sink input is, so let's
             * make sure we don't access it yet */
            if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
                !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state)) {
                *((pa_usec_t*) data) = 0;
                return 0;
            }

            *((pa_usec_t*) data) =
                /* Get the latency of the master sink */
                pa_sink_get_latency_within_thread(u->sink_input->sink) +

                /* Add the latency internal to our sink input on top */
                pa_bytes_to_usec(pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq), &u->sink_input->sink->sample_spec);

            return 0;
    }

    return pa_sink_process_msg(o, code, data, offset, chunk);
}
/* Called from I/O thread context */
static int source_process_msg_cb(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
    struct userdata *u = PA_SOURCE(o)->userdata;

    switch (code) {

        case PA_SOURCE_MESSAGE_GET_LATENCY:

            /* The source is _put() before the source output is, so let's
             * make sure we don't access it in that time. Also, the
             * source output is first shut down, the source second. */
            if (!PA_SOURCE_IS_LINKED(u->source->thread_info.state) ||
                !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->thread_info.state)) {
                *((pa_usec_t*) data) = 0;
                return 0;
            }

            *((pa_usec_t*) data) =

                /* Get the latency of the master source */
                pa_source_get_latency_within_thread(u->source_output->source) +
                /* Add the latency internal to our source output on top */
                pa_bytes_to_usec(pa_memblockq_get_length(u->source_output->thread_info.delay_memblockq), &u->source_output->source->sample_spec);

            return 0;
    }

    return pa_source_process_msg(o, code, data, offset, chunk);
}
/* Called from I/O thread context */
static void sink_input_process_rewind_cb(pa_sink_input *i, size_t nbytes) {
    struct userdata *u;
    size_t amount = 0;

    pa_sink_input_assert_ref(i);
    pa_assert_se(u = i->userdata);

    if (u->sink->thread_info.rewind_nbytes > 0) {
        size_t max_rewrite;

        max_rewrite = nbytes * u->sink_fs / u->fs + pa_memblockq_get_length(u->memblockq);
        amount = PA_MIN(u->sink->thread_info.rewind_nbytes * u->sink_fs / u->fs, max_rewrite);
        u->sink->thread_info.rewind_nbytes = 0;

        if (amount > 0) {
            pa_memblockq_seek(u->memblockq, - (int64_t) amount, PA_SEEK_RELATIVE, true);

            /* Reset the input buffer */
            memset(u->input_buffer, 0, u->hrir_samples * u->sink_fs);
            u->input_buffer_offset = 0;
        }
    }

    pa_sink_process_rewind(u->sink, amount);
    pa_memblockq_rewind(u->memblockq, nbytes * u->sink_fs / u->fs);
}
/* Called from I/O thread context */
static int voip_source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
    struct userdata *u = PA_SOURCE(o)->userdata;

    switch (code) {

        case VOICE_SOURCE_SET_UL_DEADLINE: {
            u->ul_deadline = offset;
            pa_log_debug("Uplink deadline set to %lld (%lld usec from now)",
                         u->ul_deadline, u->ul_deadline - pa_rtclock_now());
            return 0;
        }

        case PA_SOURCE_MESSAGE_GET_LATENCY: {
            pa_usec_t usec = 0;

            if (PA_MSGOBJECT(u->master_source)->process_msg(
                    PA_MSGOBJECT(u->master_source), PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
                usec = 0;

            usec += pa_bytes_to_usec(pa_memblockq_get_length(u->ul_memblockq),
                                     &u->aep_sample_spec);
            *((pa_usec_t*) data) = usec;
            return 0;
        }
    }

    return pa_source_process_msg(o, code, data, offset, chunk);
}
Ejemplo n.º 5
0
static void pstream_memblock_callback(pa_pstream *p, uint32_t channel, int64_t offset, pa_seek_mode_t seek, const pa_memchunk *chunk, void *userdata) {
    pa_context *c = userdata;
    pa_stream *s;

    pa_assert(p);
    pa_assert(chunk);
    pa_assert(chunk->length > 0);
    pa_assert(c);
    pa_assert(PA_REFCNT_VALUE(c) >= 1);

    pa_context_ref(c);

    if ((s = pa_hashmap_get(c->record_streams, PA_UINT32_TO_PTR(channel)))) {

        if (chunk->memblock) {
            pa_memblockq_seek(s->record_memblockq, offset, seek, TRUE);
            pa_memblockq_push_align(s->record_memblockq, chunk);
        } else
            pa_memblockq_seek(s->record_memblockq, offset+chunk->length, seek, TRUE);

        if (s->read_callback) {
            size_t l;

            if ((l = pa_memblockq_get_length(s->record_memblockq)) > 0)
                s->read_callback(s, l, s->read_userdata);
        }
    }

    pa_context_unref(c);
}
Ejemplo n.º 6
0
/* Called from thread context */
void pa_source_output_process_rewind(pa_source_output *o, size_t nbytes /* in source sample spec */) {

    pa_source_output_assert_ref(o);
    pa_source_output_assert_io_context(o);
    pa_assert(PA_SOURCE_OUTPUT_IS_LINKED(o->thread_info.state));
    pa_assert(pa_frame_aligned(nbytes, &o->source->sample_spec));

    if (nbytes <= 0)
        return;

    if (o->process_rewind) {
        pa_assert(pa_memblockq_get_length(o->thread_info.delay_memblockq) == 0);

        if (o->thread_info.resampler)
            nbytes = pa_resampler_result(o->thread_info.resampler, nbytes);

        pa_log_debug("Have to rewind %lu bytes on implementor.", (unsigned long) nbytes);

        if (nbytes > 0)
            o->process_rewind(o, nbytes);

        if (o->thread_info.resampler)
            pa_resampler_reset(o->thread_info.resampler);

    } else
        pa_memblockq_rewind(o->thread_info.delay_memblockq, nbytes);
}
Ejemplo n.º 7
0
/* Called from I/O thread context */
static void sink_input_process_rewind_cb(pa_sink_input *i, size_t nbytes) {
    struct userdata *u;
    size_t amount = 0;

    pa_sink_input_assert_ref(i);
    pa_assert_se(u = i->userdata);

    if (u->sink->thread_info.rewind_nbytes > 0) {
        size_t max_rewrite;

        max_rewrite = nbytes + pa_memblockq_get_length(u->memblockq);
        amount = PA_MIN(u->sink->thread_info.rewind_nbytes, max_rewrite);
        u->sink->thread_info.rewind_nbytes = 0;

        if (amount > 0) {
            unsigned c;

            pa_memblockq_seek(u->memblockq, - (int64_t) amount, PA_SEEK_RELATIVE, TRUE);

            pa_log_debug("Resetting plugin");

            /* Reset the plugin */
            if (u->descriptor->deactivate)
                for (c = 0; c < (u->channels / u->max_ladspaport_count); c++)
                    u->descriptor->deactivate(u->handle[c]);
            if (u->descriptor->activate)
                for (c = 0; c < (u->channels / u->max_ladspaport_count); c++)
                    u->descriptor->activate(u->handle[c]);
        }
    }

    pa_sink_process_rewind(u->sink, amount);
    pa_memblockq_rewind(u->memblockq, nbytes);
}
Ejemplo n.º 8
0
/* Called from main context */
static pa_usec_t source_output_get_latency_cb(pa_source_output *o) {
    connection*c;

    pa_source_output_assert_ref(o);
    c = CONNECTION(o->userdata);
    pa_assert(c);

    return pa_bytes_to_usec(pa_memblockq_get_length(c->output_memblockq), &c->source_output->sample_spec);
}
/* Called from output thread context */
static void update_min_memblockq_length(struct userdata *u) {
    size_t length;

    pa_assert(u);
    pa_sink_input_assert_io_context(u->sink_input);

    length = pa_memblockq_get_length(u->memblockq);

    if (u->min_memblockq_length == (size_t) -1 ||
        length < u->min_memblockq_length)
        u->min_memblockq_length = length;
}
Ejemplo n.º 10
0
/* Called from I/O thread context */
static void sink_request_rewind_cb(pa_sink *s) {
    struct userdata *u;

    pa_sink_assert_ref(s);
    pa_assert_se(u = s->userdata);

    if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
        !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state))
        return;

    /* Just hand this one over to the master sink */
    pa_sink_input_request_rewind(u->sink_input, s->thread_info.rewind_nbytes + pa_memblockq_get_length(u->memblockq), TRUE, FALSE, FALSE);
}
Ejemplo n.º 11
0
/* Called from I/O thread context */
static int sink_input_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
    struct session *s = PA_SINK_INPUT(o)->userdata;

    switch (code) {
        case PA_SINK_INPUT_MESSAGE_GET_LATENCY:
            *((pa_usec_t*) data) = pa_bytes_to_usec(pa_memblockq_get_length(s->memblockq), &s->sink_input->sample_spec);

            /* Fall through, the default handler will add in the extra
             * latency added by the resampler */
            break;
    }

    return pa_sink_input_process_msg(o, code, data, offset, chunk);
}
Ejemplo n.º 12
0
/* Called from I/O thread context */
static int source_output_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
    struct userdata *u;
    pa_assert_se(u = PA_SOURCE_OUTPUT(o)->userdata);

    switch (code) {
        case PA_SOURCE_OUTPUT_MESSAGE_GET_LATENCY:
            *((pa_usec_t*) data) = pa_bytes_to_usec(pa_memblockq_get_length(u->memblockq), &u->source_output->sample_spec);

            /* Fall through, the default handler will add in the extra
             * latency added by the resampler */
            break;
    }

    return pa_source_output_process_msg(o, code, data, offset, chunk);
}
Ejemplo n.º 13
0
/* Called from IO thread context, except when it is not */
int pa_source_output_process_msg(pa_msgobject *mo, int code, void *userdata, int64_t offset, pa_memchunk* chunk) {
    pa_source_output *o = PA_SOURCE_OUTPUT(mo);
    pa_source_output_assert_ref(o);

    switch (code) {

        case PA_SOURCE_OUTPUT_MESSAGE_GET_LATENCY: {
            pa_usec_t *r = userdata;

            r[0] += pa_bytes_to_usec(pa_memblockq_get_length(o->thread_info.delay_memblockq), &o->source->sample_spec);
            r[1] += pa_source_get_latency_within_thread(o->source);

            return 0;
        }

        case PA_SOURCE_OUTPUT_MESSAGE_SET_RATE:

            o->thread_info.sample_spec.rate = PA_PTR_TO_UINT(userdata);
            pa_resampler_set_output_rate(o->thread_info.resampler, PA_PTR_TO_UINT(userdata));
            return 0;

        case PA_SOURCE_OUTPUT_MESSAGE_SET_STATE:

            pa_source_output_set_state_within_thread(o, PA_PTR_TO_UINT(userdata));
            return 0;

        case PA_SOURCE_OUTPUT_MESSAGE_SET_REQUESTED_LATENCY: {
            pa_usec_t *usec = userdata;

            *usec = pa_source_output_set_requested_latency_within_thread(o, *usec);

            return 0;
        }

        case PA_SOURCE_OUTPUT_MESSAGE_GET_REQUESTED_LATENCY: {
            pa_usec_t *r = userdata;

            *r = o->thread_info.requested_source_latency;
            return 0;
        }
    }

    return -PA_ERR_NOTIMPLEMENTED;
}
Ejemplo n.º 14
0
/* Called from thread context */
static int sink_input_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
    pa_sink_input *i = PA_SINK_INPUT(o);
    connection*c;

    pa_sink_input_assert_ref(i);
    c = CONNECTION(i->userdata);
    connection_assert_ref(c);

    switch (code) {

        case SINK_INPUT_MESSAGE_POST_DATA: {
            pa_assert(chunk);

            /* New data from the main loop */
            pa_memblockq_push_align(c->input_memblockq, chunk);

            if (pa_memblockq_is_readable(c->input_memblockq) && c->playback.underrun) {
                pa_log_debug("Requesting rewind due to end of underrun.");
                pa_sink_input_request_rewind(c->sink_input, 0, false, true, false);
            }

/*             pa_log("got data, %u", pa_memblockq_get_length(c->input_memblockq)); */

            return 0;
        }

        case SINK_INPUT_MESSAGE_DISABLE_PREBUF:
            pa_memblockq_prebuf_disable(c->input_memblockq);
            return 0;

        case PA_SINK_INPUT_MESSAGE_GET_LATENCY: {
            pa_usec_t *r = userdata;

            *r = pa_bytes_to_usec(pa_memblockq_get_length(c->input_memblockq), &c->sink_input->sample_spec);

            /* Fall through, the default handler will add in the extra
             * latency added by the resampler */
        }

        default:
            return pa_sink_input_process_msg(o, code, userdata, offset, chunk);
    }
}
/* Called from output thread context */
static int source_output_process_msg_cb(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) {
    struct userdata *u = PA_SOURCE_OUTPUT(obj)->userdata;

    switch (code) {

        case SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT: {
            size_t length;

            length = pa_memblockq_get_length(u->source_output->thread_info.delay_memblockq);

            u->latency_snapshot.send_counter = u->send_counter;
            u->latency_snapshot.source_output_buffer = u->source_output->thread_info.resampler ? pa_resampler_result(u->source_output->thread_info.resampler, length) : length;
            u->latency_snapshot.source_latency = pa_source_get_latency_within_thread(u->source_output->source);

            return 0;
        }
    }

    return pa_source_output_process_msg(obj, code, data, offset, chunk);
}
Ejemplo n.º 16
0
/* Called from I/O thread context */
static int raw_source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
    struct userdata *u = PA_SOURCE(o)->userdata;

    switch (code) {

        case PA_SOURCE_MESSAGE_GET_LATENCY: {
            pa_usec_t usec = 0;

            if (PA_MSGOBJECT(u->master_source)->process_msg(
                    PA_MSGOBJECT(u->master_source), PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
                usec = 0;

            *((pa_usec_t*) data) = usec + pa_bytes_to_usec(pa_memblockq_get_length(u->hw_source_memblockq),
                                                           &u->raw_source->sample_spec);
            return 0;
        }
    }

    return pa_source_process_msg(o, code, data, offset, chunk);
}
Ejemplo n.º 17
0
/* Called from output thread context */
static int source_output_process_msg_cb(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) {
    struct userdata *u = PA_SOURCE_OUTPUT(obj)->userdata;

    switch (code) {

        case SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT: {
            size_t length;

            length = pa_memblockq_get_length(u->source_output->thread_info.delay_memblockq);

            u->latency_snapshot.send_counter = u->send_counter;
            /* Add content of delay memblockq to the source latency */
            u->latency_snapshot.source_latency = pa_source_get_latency_within_thread(u->source_output->source) +
                                                 pa_bytes_to_usec(length, &u->source_output->source->sample_spec);
            u->latency_snapshot.source_timestamp = pa_rtclock_now();

            return 0;
        }
    }

    return pa_source_output_process_msg(obj, code, data, offset, chunk);
}
Ejemplo n.º 18
0
/* Called from I/O thread context */
static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
    struct userdata *u = PA_SINK(o)->userdata;

    switch (code) {

        case PA_SINK_MESSAGE_GET_LATENCY:

            /* The sink is _put() before the sink input is, so let's
             * make sure we don't access it yet */
            if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
                !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state)) {
                *((int64_t*) data) = 0;
                return 0;
            }

            *((int64_t*) data) =
                /* Get the latency of the master sink */
                pa_sink_get_latency_within_thread(u->sink_input->sink, true) +

                /* Add the latency internal to our sink input on top */
                pa_bytes_to_usec(pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq), &u->sink_input->sink->sample_spec);

            return 0;

        case PA_SINK_MESSAGE_SET_STATE: {
            pa_sink_state_t new_state = (pa_sink_state_t) PA_PTR_TO_UINT(data);

            /* When set to running or idle for the first time, request a rewind
             * of the master sink to make sure we are heard immediately */
            if ((new_state == PA_SINK_IDLE || new_state == PA_SINK_RUNNING) && u->sink->thread_info.state == PA_SINK_INIT) {
                pa_log_debug("Requesting rewind due to state change.");
                pa_sink_input_request_rewind(u->sink_input, 0, false, true, true);
            }
            break;
        }
    }

    return pa_sink_process_msg(o, code, data, offset, chunk);
}
Ejemplo n.º 19
0
/* Called from I/O thread context */
static void sink_input_process_rewind_cb(pa_sink_input *i, size_t nbytes) {
    struct userdata *u;
    size_t amount = 0;

    pa_sink_input_assert_ref(i);
    pa_assert_se(u = i->userdata);

    if (u->sink->thread_info.rewind_nbytes > 0) {
        size_t max_rewrite;

        max_rewrite = nbytes + pa_memblockq_get_length(u->memblockq);
        amount = PA_MIN(u->sink->thread_info.rewind_nbytes, max_rewrite);
        u->sink->thread_info.rewind_nbytes = 0;

        if (amount > 0) {
            pa_memblockq_seek(u->memblockq, - (int64_t) amount, PA_SEEK_RELATIVE, TRUE);

            /* (5) PUT YOUR CODE HERE TO RESET YOUR FILTER  */
        }
    }

    pa_sink_process_rewind(u->sink, amount);
    pa_memblockq_rewind(u->memblockq, nbytes);
}
/*** sink_input callbacks ***/
static int cmtspeech_sink_input_pop_cb(pa_sink_input *i, size_t length, pa_memchunk *chunk) {
    struct userdata *u;
    int queue_counter = 0;

    pa_assert_fp(i);
    pa_sink_input_assert_ref(i);
    pa_assert_se(u = i->userdata);
    pa_assert_fp(chunk);

    if (u->cmt_connection.dl_frame_queue) {
        cmtspeech_dl_buf_t *buf;
        while ((buf = pa_asyncq_pop(u->cmt_connection.dl_frame_queue, FALSE))) {
            pa_memchunk cmtchunk;
            if (cmtspeech_buffer_to_memchunk(u, buf, &cmtchunk) < 0)
                continue;
            queue_counter++;
            if (pa_memblockq_push(u->dl_memblockq, &cmtchunk) < 0) {
                pa_log_debug("Failed to push DL frame to dl_memblockq (len %zu max %zu)",
                             pa_memblockq_get_length(u->dl_memblockq),
                             pa_memblockq_get_maxlength(u->dl_memblockq));
            }
            else {
                cmtspeech_dl_sideinfo_push(buf->spc_flags, cmtchunk.length, u);
            }
            pa_memblock_unref(cmtchunk.memblock);
        }
    }

    /* More than one DL frame in queue means that sink has not asked for more
     * data for over 20ms and something may be wrong. */
    if (queue_counter > 1) {
        pa_log_info("%d frames found from queue (dl buf size %zu)", queue_counter,
                    pa_memblockq_get_length(u->dl_memblockq));
    }

    if (pa_memblockq_get_length(u->dl_memblockq) > 3*u->dl_frame_size) {
        size_t drop_bytes =
            pa_memblockq_get_length(u->dl_memblockq) - 3*u->dl_frame_size;
        pa_memblockq_drop(u->dl_memblockq, drop_bytes);
        cmtspeech_dl_sideinfo_drop(u, drop_bytes);
        pa_log_debug("Too much data in DL buffer dropped %zu bytes",
                     drop_bytes);
    }

    pa_assert_fp((pa_memblockq_get_length(u->dl_memblockq) % u->dl_frame_size) == 0);

    if (util_memblockq_to_chunk(u->core->mempool, u->dl_memblockq, chunk, u->dl_frame_size)) {
        ONDEBUG_TOKENS(fprintf(stderr, "d"));
        cmtspeech_dl_sideinfo_forward(u);
    }
    else {
        if (u->cmt_connection.first_dl_frame_received)
            pa_log_debug("No DL audio: %zu bytes in queue %zu needed",
                         pa_memblockq_get_length(u->dl_memblockq), u->dl_frame_size);
        cmtspeech_dl_sideinfo_bogus(u);
        pa_silence_memchunk_get(&u->core->silence_cache,
                                u->core->mempool,
                                chunk,
                                &u->ss,
                                u->dl_frame_size);
    }

    return 0;
}
/* Called from output thread context */
static int sink_input_process_msg_cb(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) {
    struct userdata *u = PA_SINK_INPUT(obj)->userdata;

    switch (code) {

        case PA_SINK_INPUT_MESSAGE_GET_LATENCY: {
            pa_usec_t *r = data;

            pa_sink_input_assert_io_context(u->sink_input);

            *r = pa_bytes_to_usec(pa_memblockq_get_length(u->memblockq), &u->sink_input->sample_spec);

            /* Fall through, the default handler will add in the extra
             * latency added by the resampler */
            break;
        }

        case SINK_INPUT_MESSAGE_POST:

            pa_sink_input_assert_io_context(u->sink_input);

            if (PA_SINK_IS_OPENED(u->sink_input->sink->thread_info.state))
                pa_memblockq_push_align(u->memblockq, chunk);
            else
                pa_memblockq_flush_write(u->memblockq, TRUE);

            update_min_memblockq_length(u);

            /* Is this the end of an underrun? Then let's start things
             * right-away */
            if (!u->in_pop &&
                u->sink_input->thread_info.underrun_for > 0 &&
                pa_memblockq_is_readable(u->memblockq)) {

                pa_log_debug("Requesting rewind due to end of underrun.");
                pa_sink_input_request_rewind(u->sink_input,
                                             (size_t) (u->sink_input->thread_info.underrun_for == (size_t) -1 ? 0 : u->sink_input->thread_info.underrun_for),
                                             FALSE, TRUE, FALSE);
            }

            u->recv_counter += (int64_t) chunk->length;

            return 0;

        case SINK_INPUT_MESSAGE_REWIND:

            pa_sink_input_assert_io_context(u->sink_input);

            if (PA_SINK_IS_OPENED(u->sink_input->sink->thread_info.state))
                pa_memblockq_seek(u->memblockq, -offset, PA_SEEK_RELATIVE, TRUE);
            else
                pa_memblockq_flush_write(u->memblockq, TRUE);

            u->recv_counter -= offset;

            update_min_memblockq_length(u);

            return 0;

        case SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT: {
            size_t length;

            update_min_memblockq_length(u);

            length = pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq);

            u->latency_snapshot.recv_counter = u->recv_counter;
            u->latency_snapshot.sink_input_buffer =
                pa_memblockq_get_length(u->memblockq) +
                (u->sink_input->thread_info.resampler ? pa_resampler_request(u->sink_input->thread_info.resampler, length) : length);
            u->latency_snapshot.sink_latency = pa_sink_get_latency_within_thread(u->sink_input->sink);

            u->latency_snapshot.max_request = pa_sink_input_get_max_request(u->sink_input);

            u->latency_snapshot.min_memblockq_length = u->min_memblockq_length;
            u->min_memblockq_length = (size_t) -1;

            return 0;
        }

        case SINK_INPUT_MESSAGE_MAX_REQUEST_CHANGED: {
            /* This message is sent from the IO thread to the main
             * thread! So don't be confused. All the user cases above
             * are executed in thread context, but this one is not! */

            pa_assert_ctl_context();

            if (u->time_event)
                adjust_rates(u);
            return 0;
        }
    }

    return pa_sink_input_process_msg(obj, code, data, offset, chunk);
}
Ejemplo n.º 22
0
/* Called from thread context */
void pa_source_output_push(pa_source_output *o, const pa_memchunk *chunk) {
    size_t length;
    size_t limit, mbs = 0;

    pa_source_output_assert_ref(o);
    pa_source_output_assert_io_context(o);
    pa_assert(PA_SOURCE_OUTPUT_IS_LINKED(o->thread_info.state));
    pa_assert(chunk);
    pa_assert(pa_frame_aligned(chunk->length, &o->source->sample_spec));

    if (!o->push || o->thread_info.state == PA_SOURCE_OUTPUT_CORKED)
        return;

    pa_assert(o->thread_info.state == PA_SOURCE_OUTPUT_RUNNING);

    if (pa_memblockq_push(o->thread_info.delay_memblockq, chunk) < 0) {
        pa_log_debug("Delay queue overflow!");
        pa_memblockq_seek(o->thread_info.delay_memblockq, (int64_t) chunk->length, PA_SEEK_RELATIVE, TRUE);
    }

    limit = o->process_rewind ? 0 : o->source->thread_info.max_rewind;

    if (limit > 0 && o->source->monitor_of) {
        pa_usec_t latency;
        size_t n;

        /* Hmm, check the latency for knowing how much of the buffered
         * data is actually still unplayed and might hence still
         * change. This is suboptimal. Ideally we'd have a call like
         * pa_sink_get_changeable_size() or so that tells us how much
         * of the queued data is actually still changeable. Hence
         * FIXME! */

        latency = pa_sink_get_latency_within_thread(o->source->monitor_of);

        n = pa_usec_to_bytes(latency, &o->source->sample_spec);

        if (n < limit)
            limit = n;
    }

    /* Implement the delay queue */
    while ((length = pa_memblockq_get_length(o->thread_info.delay_memblockq)) > limit) {
        pa_memchunk qchunk;

        length -= limit;

        pa_assert_se(pa_memblockq_peek(o->thread_info.delay_memblockq, &qchunk) >= 0);

        if (qchunk.length > length)
            qchunk.length = length;

        pa_assert(qchunk.length > 0);

        if (!o->thread_info.resampler)
            o->push(o, &qchunk);
        else {
            pa_memchunk rchunk;

            if (mbs == 0)
                mbs = pa_resampler_max_block_size(o->thread_info.resampler);

            if (qchunk.length > mbs)
                qchunk.length = mbs;

            pa_resampler_run(o->thread_info.resampler, &qchunk, &rchunk);

            if (rchunk.length > 0)
                o->push(o, &rchunk);

            if (rchunk.memblock)
                pa_memblock_unref(rchunk.memblock);
        }

        pa_memblock_unref(qchunk.memblock);
        pa_memblockq_drop(o->thread_info.delay_memblockq, qchunk.length);
    }
}
Ejemplo n.º 23
0
/* Called from output thread context */
static int sink_input_process_msg_cb(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) {
    struct userdata *u = PA_SINK_INPUT(obj)->userdata;

    switch (code) {

        case PA_SINK_INPUT_MESSAGE_GET_LATENCY: {
            pa_usec_t *r = data;

            pa_sink_input_assert_io_context(u->sink_input);

            *r = pa_bytes_to_usec(pa_memblockq_get_length(u->memblockq), &u->sink_input->sample_spec);

            /* Fall through, the default handler will add in the extra
             * latency added by the resampler */
            break;
        }

        case SINK_INPUT_MESSAGE_POST:

            pa_sink_input_assert_io_context(u->sink_input);

            if (PA_SINK_IS_OPENED(u->sink_input->sink->thread_info.state))
                pa_memblockq_push_align(u->memblockq, chunk);
            else
                pa_memblockq_flush_write(u->memblockq, true);

            /* Is this the end of an underrun? Then let's start things
             * right-away */
            if (!u->in_pop &&
                u->sink_input->thread_info.underrun_for > 0 &&
                pa_memblockq_is_readable(u->memblockq)) {

                pa_log_debug("Requesting rewind due to end of underrun.");
                pa_sink_input_request_rewind(u->sink_input,
                                             (size_t) (u->sink_input->thread_info.underrun_for == (size_t) -1 ? 0 : u->sink_input->thread_info.underrun_for),
                                             false, true, false);
            }

            u->recv_counter += (int64_t) chunk->length;

            return 0;

        case SINK_INPUT_MESSAGE_REWIND:

            pa_sink_input_assert_io_context(u->sink_input);

            if (PA_SINK_IS_OPENED(u->sink_input->sink->thread_info.state))
                pa_memblockq_seek(u->memblockq, -offset, PA_SEEK_RELATIVE, true);
            else
                pa_memblockq_flush_write(u->memblockq, true);

            u->recv_counter -= offset;

            return 0;

        case SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT: {
            size_t length;

            length = pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq);

            u->latency_snapshot.recv_counter = u->recv_counter;
            u->latency_snapshot.sink_input_buffer = pa_memblockq_get_length(u->memblockq);
            /* Add content of render memblockq to sink latency */
            u->latency_snapshot.sink_latency = pa_sink_get_latency_within_thread(u->sink_input->sink) +
                                               pa_bytes_to_usec(length, &u->sink_input->sink->sample_spec);
            u->latency_snapshot.sink_timestamp = pa_rtclock_now();

            return 0;
        }
    }

    return pa_sink_input_process_msg(obj, code, data, offset, chunk);
}
Ejemplo n.º 24
0
/* Called from I/O thread context */
static int sink_input_pop_cb(pa_sink_input *i, size_t nbytes, pa_memchunk *chunk) {
    struct userdata *u;
    float *src, *dst;
    size_t fs;
    unsigned n, c;
    pa_memchunk tchunk;
    pa_usec_t current_latency PA_GCC_UNUSED;

    pa_sink_input_assert_ref(i);
    pa_assert(chunk);
    pa_assert_se(u = i->userdata);

    /* Hmm, process any rewind request that might be queued up */
    pa_sink_process_rewind(u->sink, 0);

    /* (1) IF YOU NEED A FIXED BLOCK SIZE USE
     * pa_memblockq_peek_fixed_size() HERE INSTEAD. NOTE THAT FILTERS
     * WHICH CAN DEAL WITH DYNAMIC BLOCK SIZES ARE HIGHLY
     * PREFERRED. */
    while (pa_memblockq_peek(u->memblockq, &tchunk) < 0) {
        pa_memchunk nchunk;

        pa_sink_render(u->sink, nbytes, &nchunk);
        pa_memblockq_push(u->memblockq, &nchunk);
        pa_memblock_unref(nchunk.memblock);
    }

    /* (2) IF YOU NEED A FIXED BLOCK SIZE, THIS NEXT LINE IS NOT
     * NECESSARY */
    tchunk.length = PA_MIN(nbytes, tchunk.length);
    pa_assert(tchunk.length > 0);

    fs = pa_frame_size(&i->sample_spec);
    n = (unsigned) (tchunk.length / fs);

    pa_assert(n > 0);

    chunk->index = 0;
    chunk->length = n*fs;
    chunk->memblock = pa_memblock_new(i->sink->core->mempool, chunk->length);

    pa_memblockq_drop(u->memblockq, chunk->length);

    src = pa_memblock_acquire_chunk(&tchunk);
    dst = pa_memblock_acquire(chunk->memblock);

    /* (3) PUT YOUR CODE HERE TO DO SOMETHING WITH THE DATA */

    /* As an example, copy input to output */
    for (c = 0; c < u->channels; c++) {
        pa_sample_clamp(PA_SAMPLE_FLOAT32NE,
                        dst+c, u->channels * sizeof(float),
                        src+c, u->channels * sizeof(float),
                        n);
    }

    pa_memblock_release(tchunk.memblock);
    pa_memblock_release(chunk->memblock);

    pa_memblock_unref(tchunk.memblock);

    /* (4) IF YOU NEED THE LATENCY FOR SOMETHING ACQUIRE IT LIKE THIS: */
    current_latency =
        /* Get the latency of the master sink */
        pa_sink_get_latency_within_thread(i->sink) +

        /* Add the latency internal to our sink input on top */
        pa_bytes_to_usec(pa_memblockq_get_length(i->thread_info.render_memblockq), &i->sink->sample_spec);

    return 0;
}
Ejemplo n.º 25
0
int mi6k_needs_frame()
{
  return pa_memblockq_get_length(mi6k.queue) < FRAME_SIZE*2;
}