static void process_render(struct userdata *u, pa_usec_t now) { pa_memchunk chunk; int request_bytes; //int index; pa_assert(u); if (u->got_max_latency) { return; } //index = 0; while (u->timestamp < now + u->block_usec) { //index++; //if (index > 3) { /* used when u->block_usec and u->sink->thread_info.max_request get big using got_max_latency now */ // return; //} request_bytes = u->sink->thread_info.max_request; request_bytes = MIN(request_bytes, 16 * 1024); pa_sink_render(u->sink, request_bytes, &chunk); //pa_log("bytes %d index %d", chunk.length, index); data_send(u, &chunk); pa_memblock_unref(chunk.memblock); u->timestamp += pa_bytes_to_usec(chunk.length, &u->sink->sample_spec); } }
void pa__done(pa_module*m) { struct userdata *u; pa_assert(m); if (!(u = m->userdata)) return; if (u->sink) pa_sink_unlink(u->sink); if (u->thread) { pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL); pa_thread_free(u->thread); } pa_thread_mq_done(&u->thread_mq); if (u->sink) pa_sink_unref(u->sink); if (u->rtpoll_item) pa_rtpoll_item_free(u->rtpoll_item); if (u->rtpoll) pa_rtpoll_free(u->rtpoll); if (u->raw_memchunk.memblock) pa_memblock_unref(u->raw_memchunk.memblock); if (u->encoded_memchunk.memblock) pa_memblock_unref(u->encoded_memchunk.memblock); if (u->raop) pa_raop_client_free(u->raop); pa_xfree(u->read_data); pa_xfree(u->write_data); if (u->smoother) pa_smoother_free(u->smoother); if (u->fd >= 0) pa_close(u->fd); pa_xfree(u); }
static void *mi6k_render_thread(void *userdata) { while (1) { pa_memchunk chunk; int i; int is_empty = 1; static int sleeptimer = 0; i = pa_memblockq_peek(mi6k.queue, &chunk); /* * Before writing this chunk, let's see if there's anything * printable in it. For our purposes, these include * special characters but not control characters: thus, anything * above the space. */ for (i=0; i<chunk.length; i++) { int c = ((unsigned char*)chunk.memblock->data)[chunk.index + i]; if (c > ' ') { is_empty = 0; } } if (is_empty) { if (mi6k.power) { /* * We're rendering blank frames. Our sleep timer counts down... * If we stay blank for quite a while, power down the VFD. */ if (++sleeptimer > AUTO_OFF_FRAMES) { mi6k_set_power(0); } } } else { sleeptimer = 0; if (!mi6k.power) { /* * We're rendering non-blank frames, but the display is off. * Turn on the juice, wait for it to start up, then do our initialization. */ mi6k_set_power(1); usleep(200000); mi6k_init_hardware(); } } fwrite(chunk.memblock->data + chunk.index, 1, chunk.length, mi6k.device); fflush(mi6k.device); pa_memblock_unref(chunk.memblock); pa_memblockq_drop(mi6k.queue, &chunk, chunk.length); } return NULL; }
static int do_read(connection *c) { pa_memchunk chunk; ssize_t r; size_t l; void *p; size_t space = 0; connection_assert_ref(c); if (!c->sink_input || (l = (size_t) pa_atomic_load(&c->playback.missing)) <= 0) return 0; if (c->playback.current_memblock) { space = pa_memblock_get_length(c->playback.current_memblock) - c->playback.memblock_index; if (space <= 0) { pa_memblock_unref(c->playback.current_memblock); c->playback.current_memblock = NULL; } } if (!c->playback.current_memblock) { pa_assert_se(c->playback.current_memblock = pa_memblock_new(c->protocol->core->mempool, (size_t) -1)); c->playback.memblock_index = 0; space = pa_memblock_get_length(c->playback.current_memblock); } if (l > space) l = space; p = pa_memblock_acquire(c->playback.current_memblock); r = pa_iochannel_read(c->io, (uint8_t*) p + c->playback.memblock_index, l); pa_memblock_release(c->playback.current_memblock); if (r <= 0) { if (r < 0 && (errno == EINTR || errno == EAGAIN)) return 0; pa_log_debug("read(): %s", r == 0 ? "EOF" : pa_cstrerror(errno)); return -1; } chunk.memblock = c->playback.current_memblock; chunk.index = c->playback.memblock_index; chunk.length = (size_t) r; c->playback.memblock_index += (size_t) r; pa_asyncmsgq_post(c->sink_input->sink->asyncmsgq, PA_MSGOBJECT(c->sink_input), SINK_INPUT_MESSAGE_POST_DATA, NULL, 0, &chunk, NULL); pa_atomic_sub(&c->playback.missing, (int) r); return 0; }
/* No lock necessary. This function is not multiple caller safe*/ void pa_memblock_unref_fixed(pa_memblock *b) { pa_assert(b); pa_assert(PA_REFCNT_VALUE(b) > 0); pa_assert(b->type == PA_MEMBLOCK_FIXED); if (PA_REFCNT_VALUE(b) > 1) memblock_make_local(b); pa_memblock_unref(b); }
void pa_silence_cache_done(pa_silence_cache *cache) { pa_sample_format_t f; pa_assert(cache); for (f = 0; f < PA_SAMPLE_MAX; f++) if (cache->blocks[f]) pa_memblock_unref(cache->blocks[f]); memset(cache, 0, sizeof(pa_silence_cache)); }
static void do_read(struct userdata *u) { uint32_t free_frags; pa_memchunk memchunk; WAVEHDR *hdr; MMRESULT res; void *p; if (!u->source) return; if (!PA_SOURCE_IS_LINKED(u->source->state)) return; EnterCriticalSection(&u->crit); free_frags = u->free_ifrags; u->free_ifrags = 0; LeaveCriticalSection(&u->crit); if (free_frags == u->fragments) pa_log_debug("WaveIn overflow!"); while (free_frags) { hdr = &u->ihdrs[u->cur_ihdr]; if (hdr->dwFlags & WHDR_PREPARED) waveInUnprepareHeader(u->hwi, hdr, sizeof(WAVEHDR)); if (hdr->dwBytesRecorded) { memchunk.memblock = pa_memblock_new(u->core->mempool, hdr->dwBytesRecorded); pa_assert(memchunk.memblock); p = pa_memblock_acquire(memchunk.memblock); memcpy((char*) p, hdr->lpData, hdr->dwBytesRecorded); pa_memblock_release(memchunk.memblock); memchunk.length = hdr->dwBytesRecorded; memchunk.index = 0; pa_source_post(u->source, &memchunk); pa_memblock_unref(memchunk.memblock); } res = waveInPrepareHeader(u->hwi, hdr, sizeof(WAVEHDR)); if (res != MMSYSERR_NOERROR) pa_log_error("Unable to prepare waveIn block: %d", res); res = waveInAddBuffer(u->hwi, hdr, sizeof(WAVEHDR)); if (res != MMSYSERR_NOERROR) pa_log_error("Unable to add waveIn block: %d", res); free_frags--; u->cur_ihdr++; u->cur_ihdr %= u->fragments; } }
static void thread_func(void *userdata) { struct userdata *u = userdata; pa_assert(u); pa_log_debug("Thread starting up"); pa_thread_mq_install(&u->thread_mq); u->timestamp = pa_rtclock_now(); for (;;) { int ret; /* Generate some null data */ if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) { pa_usec_t now; pa_memchunk chunk; now = pa_rtclock_now(); if ((chunk.length = pa_usec_to_bytes(now - u->timestamp, &u->source->sample_spec)) > 0) { chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1); /* or chunk.length? */ chunk.index = 0; pa_source_post(u->source, &chunk); pa_memblock_unref(chunk.memblock); u->timestamp = now; } pa_rtpoll_set_timer_absolute(u->rtpoll, u->timestamp + u->latency_time * PA_USEC_PER_MSEC); } else pa_rtpoll_set_timer_disabled(u->rtpoll); /* Hmm, nothing to do. Let's sleep */ if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0) goto fail; if (ret == 0) goto finish; } fail: /* If this was no regular exit from the loop we have to continue * processing messages until we received PA_MESSAGE_SHUTDOWN */ pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL); pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN); finish: pa_log_debug("Thread shutting down"); }
static void free_entry(pa_scache_entry *e) { pa_assert(e); pa_namereg_unregister(e->core, e->name); pa_subscription_post(e->core, PA_SUBSCRIPTION_EVENT_SAMPLE_CACHE|PA_SUBSCRIPTION_EVENT_REMOVE, e->index); pa_xfree(e->name); pa_xfree(e->filename); if (e->memchunk.memblock) pa_memblock_unref(e->memchunk.memblock); if (e->proplist) pa_proplist_free(e->proplist); pa_xfree(e); }
/* Called from I/O thread context */ static void cmtspeech_sink_input_reset_dl_stream(struct userdata *u) { cmtspeech_dl_buf_t *buf; pa_assert(u); /* Flush all DL buffers */ pa_memblockq_flush_read(u->dl_memblockq); cmtspeech_dl_sideinfo_flush(u); while ((buf = pa_asyncq_pop(u->cmt_connection.dl_frame_queue, FALSE))) { pa_memchunk cmtchunk; if (0 == cmtspeech_buffer_to_memchunk(u, buf, &cmtchunk)) pa_memblock_unref(cmtchunk.memblock); } }
void pa__done(pa_module *m) { struct userdata *u; pa_assert(m); if (!(u = m->userdata)) return; if (u->sig) { ioctl(u->fd, I_SETSIG, 0); pa_signal_free(u->sig); } if (u->sink) pa_sink_unlink(u->sink); if (u->source) pa_source_unlink(u->source); if (u->thread) { pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL); pa_thread_free(u->thread); } pa_thread_mq_done(&u->thread_mq); if (u->sink) pa_sink_unref(u->sink); if (u->source) pa_source_unref(u->source); if (u->memchunk.memblock) pa_memblock_unref(u->memchunk.memblock); if (u->rtpoll_item) pa_rtpoll_item_free(u->rtpoll_item); if (u->rtpoll) pa_rtpoll_free(u->rtpoll); if (u->fd >= 0) close(u->fd); if (u->smoother) pa_smoother_free(u->smoother); pa_xfree(u->device_name); pa_xfree(u); }
void pa_memblockq_free(pa_memblockq* bq) { pa_assert(bq); pa_memblockq_silence(bq); if (bq->silence.memblock) pa_memblock_unref(bq->silence.memblock); if (bq->mcalign) pa_mcalign_free(bq->mcalign); pa_xfree(bq->name); pa_xfree(bq); }
static void connection_free(pa_object *o) { connection *c = CONNECTION(o); pa_assert(c); if (c->playback.current_memblock) pa_memblock_unref(c->playback.current_memblock); if (c->input_memblockq) pa_memblockq_free(c->input_memblockq); if (c->output_memblockq) pa_memblockq_free(c->output_memblockq); pa_xfree(c); }
void pa_raop_packet_buffer_free(pa_raop_packet_buffer *pb) { size_t i; pa_assert(pb); for (i = 0; pb->packets && i < pb->size; i++) { if (pb->packets[i].memblock) pa_memblock_unref(pb->packets[i].memblock); pa_memchunk_reset(&pb->packets[i]); } pa_xfree(pb->packets); pb->packets = NULL; pa_xfree(pb); }
void pa_raop_packet_buffer_reset(pa_raop_packet_buffer *pb, uint16_t seq) { size_t i; pa_assert(pb); pa_assert(pb->packets); pb->pos = 0; pb->count = 0; pb->seq = (!seq) ? UINT16_MAX : seq - 1; for (i = 0; i < pb->size; i++) { if (pb->packets[i].memblock) pa_memblock_unref(pb->packets[i].memblock); pa_memchunk_reset(&pb->packets[i]); } }
static pa_scache_entry* scache_add_item(pa_core *c, const char *name) { pa_scache_entry *e; pa_assert(c); pa_assert(name); if ((e = pa_namereg_get(c, name, PA_NAMEREG_SAMPLE))) { if (e->memchunk.memblock) pa_memblock_unref(e->memchunk.memblock); pa_xfree(e->filename); pa_proplist_clear(e->proplist); pa_assert(e->core == c); pa_subscription_post(c, PA_SUBSCRIPTION_EVENT_SAMPLE_CACHE|PA_SUBSCRIPTION_EVENT_CHANGE, e->index); } else { e = pa_xnew(pa_scache_entry, 1); if (!pa_namereg_register(c, name, PA_NAMEREG_SAMPLE, e, TRUE)) { pa_xfree(e); return NULL; } e->name = pa_xstrdup(name); e->core = c; e->proplist = pa_proplist_new(); pa_idxset_put(c->scache, e, &e->index); pa_subscription_post(c, PA_SUBSCRIPTION_EVENT_SAMPLE_CACHE|PA_SUBSCRIPTION_EVENT_NEW, e->index); } e->last_used_time = 0; pa_memchunk_reset(&e->memchunk); e->filename = NULL; e->lazy = FALSE; e->last_used_time = 0; pa_sample_spec_init(&e->sample_spec); pa_channel_map_init(&e->channel_map); pa_cvolume_init(&e->volume); e->volume_is_set = FALSE; pa_proplist_sets(e->proplist, PA_PROP_MEDIA_ROLE, "event"); return e; }
static int process_render(struct userdata *u) { pa_assert(u); if (u->memchunk.length <= 0) pa_sink_render(u->sink, ioring->usable_buffer_space, &u->memchunk); pa_assert(u->memchunk.length > 0); xc_evtchn_notify(xce, xen_evtchn_port); for (;;) { ssize_t l; void *p; p = pa_memblock_acquire(u->memchunk.memblock); /* xen: write data to ring buffer & notify backend */ l = ring_write(ioring, (uint8_t*)p + u->memchunk.index, u->memchunk.length); pa_memblock_release(u->memchunk.memblock); pa_assert(l != 0); if (l < 0) { if (errno == EINTR) continue; else if (errno == EAGAIN) return 0; else { pa_log("Failed to write data to FIFO: %s", pa_cstrerror(errno)); return -1; } } else { u->memchunk.index += (size_t) l; u->memchunk.length -= (size_t) l; if (u->memchunk.length <= 0) { pa_memblock_unref(u->memchunk.memblock); pa_memchunk_reset(&u->memchunk); } } return 0; } }
static int process_sink_render(struct userdata *u) { pa_assert(u); if (u->memchunk_sink.length <= 0) pa_sink_render(u->sink, libvchan_buffer_space(u->play_ctrl), &u->memchunk_sink); pa_assert(u->memchunk_sink.length > 0); for (;;) { ssize_t l; void *p; p = pa_memblock_acquire(u->memchunk_sink.memblock); l = write_to_vchan(u->play_ctrl, (char *) p + u->memchunk_sink.index, u->memchunk_sink.length); pa_memblock_release(u->memchunk_sink.memblock); pa_assert(l != 0); if (l < 0) { if (errno == EINTR) continue; else if (errno == EAGAIN) return 0; else { pa_log ("Failed to write data to VCHAN: %s", pa_cstrerror(errno)); return -1; } } else { u->memchunk_sink.index += (size_t) l; u->memchunk_sink.length -= (size_t) l; if (u->memchunk_sink.length <= 0) { pa_memblock_unref(u->memchunk_sink.memblock); pa_memchunk_reset(&u->memchunk_sink); } } return 0; } }
static void process_render(struct userdata *u, pa_usec_t now) { pa_memchunk chunk; int request_bytes; pa_assert(u); if (u->got_max_latency) { return; } pa_log_debug("process_render: u->block_usec %d", u->block_usec); while (u->timestamp < now + u->block_usec) { request_bytes = u->sink->thread_info.max_request; request_bytes = MIN(request_bytes, 16 * 1024); pa_sink_render(u->sink, request_bytes, &chunk); data_send(u, &chunk); pa_memblock_unref(chunk.memblock); u->timestamp += pa_bytes_to_usec(chunk.length, &u->sink->sample_spec); } }
void pa__done(pa_module*m) { struct userdata *u; pa_assert(m); if (!(u = m->userdata)) return; if (u->sink_input) { pa_sink_input_unlink(u->sink_input); pa_sink_input_unref(u->sink_input); } if (u->memchunk.memblock) pa_memblock_unref(u->memchunk.memblock); pa_xfree(u); }
static int process_render(struct userdata *u) { pa_assert(u); if (u->memchunk.length <= 0) pa_sink_render(u->sink, u->buffer_size, &u->memchunk); pa_assert(u->memchunk.length > 0); for (;;) { ssize_t l; void *p; p = pa_memblock_acquire(u->memchunk.memblock); l = pa_write(u->fd, (uint8_t*) p + u->memchunk.index, u->memchunk.length, &u->write_type); pa_memblock_release(u->memchunk.memblock); pa_assert(l != 0); if (l < 0) { if (errno == EINTR) continue; else if (errno == EAGAIN) return 0; else { pa_log("Failed to write data to FIFO: %s", pa_cstrerror(errno)); return -1; } } else { u->memchunk.index += (size_t) l; u->memchunk.length -= (size_t) l; if (u->memchunk.length <= 0) { pa_memblock_unref(u->memchunk.memblock); pa_memchunk_reset(&u->memchunk); } } return 0; } }
void pa__done(pa_module *m) { struct userdata *u; pa_assert(m); if (!(u = m->userdata)) return; if (u->source) pa_source_unlink(u->source); if (u->thread) { pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL); pa_thread_free(u->thread); } pa_thread_mq_done(&u->thread_mq); if (u->source) pa_source_unref(u->source); if (u->memchunk.memblock) pa_memblock_unref(u->memchunk.memblock); if (u->rtpoll_item) pa_rtpoll_item_free(u->rtpoll_item); if (u->rtpoll) pa_rtpoll_free(u->rtpoll); if (u->filename) { unlink(u->filename); pa_xfree(u->filename); } if (u->fd >= 0) pa_assert_se(pa_close(u->fd) == 0); pa_xfree(u); }
static void handle_srbchannel_memblock(pa_context *c, pa_memblock *memblock) { pa_srbchannel *sr; pa_tagstruct *t; pa_assert(c); /* Memblock sanity check */ if (!memblock) { pa_context_fail(c, PA_ERR_PROTOCOL); return; } else if (pa_memblock_is_read_only(memblock)) { pa_context_fail(c, PA_ERR_PROTOCOL); return; } else if (pa_memblock_is_ours(memblock)) { pa_context_fail(c, PA_ERR_PROTOCOL); return; } /* Create the srbchannel */ c->srb_template.memblock = memblock; pa_memblock_ref(memblock); sr = pa_srbchannel_new_from_template(c->mainloop, &c->srb_template); if (!sr) { pa_log_warn("Failed to create srbchannel from template"); c->srb_template.readfd = -1; c->srb_template.writefd = -1; pa_memblock_unref(c->srb_template.memblock); c->srb_template.memblock = NULL; return; } /* Ack the enable command */ t = pa_tagstruct_new(); pa_tagstruct_putu32(t, PA_COMMAND_ENABLE_SRBCHANNEL); pa_tagstruct_putu32(t, c->srb_setup_tag); pa_pstream_send_tagstruct(c->pstream, t); /* ...and switch over */ pa_pstream_set_srbchannel(c->pstream, sr); }
static void context_unlink(pa_context *c) { pa_stream *s; pa_assert(c); s = c->streams ? pa_stream_ref(c->streams) : NULL; while (s) { pa_stream *n = s->next ? pa_stream_ref(s->next) : NULL; pa_stream_set_state(s, c->state == PA_CONTEXT_FAILED ? PA_STREAM_FAILED : PA_STREAM_TERMINATED); pa_stream_unref(s); s = n; } while (c->operations) pa_operation_cancel(c->operations); if (c->pdispatch) { pa_pdispatch_unref(c->pdispatch); c->pdispatch = NULL; } if (c->pstream) { pa_pstream_unlink(c->pstream); pa_pstream_unref(c->pstream); c->pstream = NULL; } if (c->srb_template.memblock) { pa_memblock_unref(c->srb_template.memblock); c->srb_template.memblock = NULL; } if (c->client) { pa_socket_client_unref(c->client); c->client = NULL; } reset_callbacks(c); }
/* Self-locked */ int pa_memexport_process_release(pa_memexport *e, uint32_t id) { pa_memblock *b; pa_assert(e); pa_mutex_lock(e->mutex); if (id >= e->n_init) goto fail; if (!e->slots[id].block) goto fail; b = e->slots[id].block; e->slots[id].block = NULL; PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]); PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]); pa_mutex_unlock(e->mutex); /* pa_log("Processing release for %u", id); */ pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0); pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length); pa_atomic_dec(&e->pool->stat.n_exported); pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length); pa_memblock_unref(b); return 0; fail: pa_mutex_unlock(e->mutex); return -1; }
static void dump(pa_memblockq *bq) { printf(">"); for (;;) { pa_memchunk out; char *e; size_t n; void *q; if (pa_memblockq_peek(bq, &out) < 0) break; q = pa_memblock_acquire(out.memblock); for (e = (char*) q + out.index, n = 0; n < out.length; n++) printf("%c", *e); pa_memblock_release(out.memblock); pa_memblock_unref(out.memblock); pa_memblockq_drop(bq, out.length); } printf("<\n"); }
static int do_write(connection *c) { pa_memchunk chunk; ssize_t r; void *p; connection_assert_ref(c); if (!c->source_output) return 0; if (pa_memblockq_peek(c->output_memblockq, &chunk) < 0) { /* pa_log("peek failed"); */ return 0; } pa_assert(chunk.memblock); pa_assert(chunk.length); p = pa_memblock_acquire(chunk.memblock); r = pa_iochannel_write(c->io, (uint8_t*) p+chunk.index, chunk.length); pa_memblock_release(chunk.memblock); pa_memblock_unref(chunk.memblock); if (r < 0) { if (errno == EINTR || errno == EAGAIN) return 0; pa_log("write(): %s", pa_cstrerror(errno)); return -1; } pa_memblockq_drop(c->output_memblockq, (size_t) r); return 0; }
void pa_srbchannel_free(pa_srbchannel *sr) { #ifdef DEBUG_SRBCHANNEL pa_log("Freeing srbchannel"); #endif pa_assert(sr); if (sr->defer_event) sr->mainloop->defer_free(sr->defer_event); if (sr->read_event) sr->mainloop->io_free(sr->read_event); if (sr->sem_read) pa_fdsem_free(sr->sem_read); if (sr->sem_write) pa_fdsem_free(sr->sem_write); if (sr->memblock) { pa_memblock_release(sr->memblock); pa_memblock_unref(sr->memblock); } pa_xfree(sr); }
static void process_render(struct context *context, pa_usec_t now) { pa_memchunk chunk; int request_bytes; pa_assert(context); if (context->got_max_latency) { return; } pa_log_debug("process_render: u->block_usec %lu", context->block_usec); while (context->timestamp < now + context->block_usec) { request_bytes = context->sink->thread_info.max_request; request_bytes = MIN(request_bytes, 16 * 1024); pa_sink_render(context->sink, request_bytes, &chunk); pa_log("process_render: %lu bytes", chunk.length); data_send(context, &chunk); pa_memblock_unref(chunk.memblock); context->timestamp += pa_bytes_to_usec(chunk.length, &context->sink->sample_spec); } }
static void thread_func(void *userdata) { struct userdata *u = userdata; int read_type = 0; pa_assert(u); pa_log_debug("Thread starting up"); pa_thread_mq_install(&u->thread_mq); for (;;) { int ret; struct pollfd *pollfd; pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL); /* Try to read some data and pass it on to the source driver */ if (u->source->thread_info.state == PA_SOURCE_RUNNING && pollfd->revents) { ssize_t l; void *p; if (!u->memchunk.memblock) { u->memchunk.memblock = pa_memblock_new(u->core->mempool, pa_pipe_buf(u->fd)); u->memchunk.index = u->memchunk.length = 0; } pa_assert(pa_memblock_get_length(u->memchunk.memblock) > u->memchunk.index); p = pa_memblock_acquire(u->memchunk.memblock); l = pa_read(u->fd, (uint8_t*) p + u->memchunk.index, pa_memblock_get_length(u->memchunk.memblock) - u->memchunk.index, &read_type); pa_memblock_release(u->memchunk.memblock); pa_assert(l != 0); /* EOF cannot happen, since we opened the fifo for both reading and writing */ if (l < 0) { if (errno == EINTR) continue; else if (errno != EAGAIN) { pa_log("Failed to read data from FIFO: %s", pa_cstrerror(errno)); goto fail; } } else { u->memchunk.length = (size_t) l; pa_source_post(u->source, &u->memchunk); u->memchunk.index += (size_t) l; if (u->memchunk.index >= pa_memblock_get_length(u->memchunk.memblock)) { pa_memblock_unref(u->memchunk.memblock); pa_memchunk_reset(&u->memchunk); } pollfd->revents = 0; } } /* Hmm, nothing to do. Let's sleep */ pollfd->events = (short) (u->source->thread_info.state == PA_SOURCE_RUNNING ? POLLIN : 0); if ((ret = pa_rtpoll_run(u->rtpoll)) < 0) goto fail; if (ret == 0) goto finish; pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL); if (pollfd->revents & ~POLLIN) { pa_log("FIFO shutdown."); goto fail; } } fail: /* If this was no regular exit from the loop we have to continue * processing messages until we received PA_MESSAGE_SHUTDOWN */ pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL); pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN); finish: pa_log_debug("Thread shutting down"); }